From 48a803aa469b897ee5862add103d03dcde313b56 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Tue, 9 Jan 2024 09:13:17 +1100 Subject: [PATCH 001/305] Initial commit of onedrive-v2.5.0-alpha-5 * Initial commit of onedrive-v2.5.0-alpha-5 --- CHANGELOG.md | 7 + Makefile.in | 24 +- README.md | 47 +- config | 15 +- configure | 20 +- configure.ac | 2 +- contrib/completions/complete.bash | 4 +- contrib/completions/complete.fish | 1 - contrib/completions/complete.zsh | 1 - contrib/docker/entrypoint.sh | 7 + docs/BusinessSharedFolders.md | 192 - docs/Docker.md | 5 +- docs/INSTALL.md | 9 +- docs/Podman.md | 5 +- docs/USAGE.md | 1642 +- docs/application-config-options.md | 1075 ++ docs/business-shared-folders.md | 40 + docs/known-issues.md | 64 +- ...d-Libraries.md => sharepoint-libraries.md} | 0 docs/ubuntu-package-install.md | 6 + onedrive.1.in | 5 - src/clientSideFiltering.d | 400 + src/config.d | 2726 +++- src/curlEngine.d | 110 + src/itemdb.d | 394 +- src/log.d | 361 +- src/main.d | 3001 ++-- src/monitor.d | 426 +- src/onedrive.d | 2263 +-- src/progress.d | 156 - src/qxor.d | 22 +- src/selective.d | 422 - src/sqlite.d | 147 +- src/sync.d | 13102 ++++++++-------- src/upload.d | 302 - src/util.d | 1083 +- 36 files changed, 14952 insertions(+), 13134 deletions(-) delete mode 100644 docs/BusinessSharedFolders.md create mode 100644 docs/application-config-options.md create mode 100644 docs/business-shared-folders.md rename docs/{SharePoint-Shared-Libraries.md => sharepoint-libraries.md} (100%) create mode 100644 src/clientSideFiltering.d create mode 100644 src/curlEngine.d delete mode 100644 src/progress.d delete mode 100644 src/selective.d delete mode 100644 src/upload.d diff --git a/CHANGELOG.md b/CHANGELOG.md index a6d2d3f1b..8f7f357ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,13 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## 2.5.0 - TBA + + +### Changed +* Renamed various documentation files to align with document content + + ## 2.4.25 - 2023-06-21 ### Fixed * Fixed that the application was reporting as v2.2.24 when in fact it was v2.4.24 (release tagging issue) diff --git a/Makefile.in b/Makefile.in index 5f0ad31cb..3cab7aeb8 100644 --- a/Makefile.in +++ b/Makefile.in @@ -55,7 +55,7 @@ endif system_unit_files = contrib/systemd/onedrive@.service user_unit_files = contrib/systemd/onedrive.service -DOCFILES = README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md docs/application-security.md +DOCFILES = readme.md config LICENSE changelog.md docs/advanced-usage.md docs/application-config-options.md docs/application-security.md docs/business-shared-folders.md docs/docker.md docs/install.md docs/national-cloud-deployments.md docs/podman.md docs/privacy-policy.md docs/sharepoint-libraries.md docs/terms-of-service.md docs/ubuntu-package-install.md docs/usage.md ifneq ("$(wildcard /etc/redhat-release)","") RHEL = $(shell cat /etc/redhat-release | grep -E "(Red Hat Enterprise Linux|CentOS)" | wc -l) @@ -66,19 +66,18 @@ RHEL_VERSION = 0 endif SOURCES = \ + src/main.d \ src/config.d \ - src/itemdb.d \ src/log.d \ - src/main.d \ - src/monitor.d \ - src/onedrive.d \ + src/util.d \ src/qxor.d \ - src/selective.d \ - src/sqlite.d \ + src/curlEngine.d \ + src/onedrive.d \ src/sync.d \ - src/upload.d \ - src/util.d \ - src/progress.d \ + src/itemdb.d \ + src/sqlite.d \ + src/clientSideFiltering.d \ + src/monitor.d \ src/arsd/cgi.d ifeq ($(NOTIFICATIONS),yes) @@ -92,10 +91,9 @@ clean: rm -rf autom4te.cache rm -f config.log config.status -# also remove files generated via ./configure +# Remove files generated via ./configure distclean: clean - rm -f Makefile contrib/pacman/PKGBUILD contrib/spec/onedrive.spec onedrive.1 \ - $(system_unit_files) $(user_unit_files) + rm -f Makefile contrib/pacman/PKGBUILD contrib/spec/onedrive.spec onedrive.1 $(system_unit_files) $(user_unit_files) onedrive: $(SOURCES) if [ -f .git/HEAD ] ; then \ diff --git a/README.md b/README.md index 28b663595..602e72321 100644 --- a/README.md +++ b/README.md @@ -5,14 +5,17 @@ [![Build Docker Images](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml) [![Docker Pulls](https://img.shields.io/docker/pulls/driveone/onedrive)](https://hub.docker.com/r/driveone/onedrive) -A free Microsoft OneDrive Client which supports OneDrive Personal, OneDrive for Business, OneDrive for Office365 and SharePoint. +Introducing a free Microsoft OneDrive Client that seamlessly supports OneDrive Personal, OneDrive for Business, OneDrive for Office365, and SharePoint Libraries. -This powerful and highly configurable client can run on all major Linux distributions, FreeBSD, or as a Docker container. It supports one-way and two-way sync capabilities and securely connects to Microsoft OneDrive services. +This robust and highly customisable client is compatible with all major Linux distributions and FreeBSD, and can also be deployed as a container using Docker or Podman. It offers both one-way and two-way synchronisation capabilities while ensuring a secure connection to Microsoft OneDrive services. -This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) client, which the developer has confirmed he has no desire to maintain or support the client ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). This fork has been in active development since mid 2018. +Originally derived as a 'fork' from the [skilion](https://github.com/skilion/onedrive) client, it's worth noting that the developer of the original client has explicitly stated they have no intention of maintaining or supporting their work ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). + +This client represents a 100% re-imagining of the original work, addressing numerous notable bugs and issues while incorporating a significant array of new features. This client has been under active development since mid-2018. ## Features -* State caching +* Supports 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive +* Sync State Caching * Real-Time local file monitoring with inotify * Real-Time syncing of remote updates via webhooks * File upload / download validation to ensure data integrity @@ -26,6 +29,7 @@ This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) cl * Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China) * Supports single & multi-tenanted applications * Supports rate limiting of traffic +* Supports multi-threaded uploads and downloads ## What's missing * Ability to encrypt/decrypt files on-the-fly when uploading/downloading files from OneDrive @@ -36,28 +40,17 @@ This client is a 'fork' of the [skilion](https://github.com/skilion/onedrive) cl * Colorful log output terminal modification: [OneDrive Client for Linux Colorful log Output](https://github.com/zzzdeb/dotfiles/blob/master/scripts/tools/onedrive_log) * System Tray Icon: [OneDrive Client for Linux System Tray Icon](https://github.com/DanielBorgesOliveira/onedrive_tray) -## Supported Application Version -Only the current application release version or greater is supported. - -The current application release version is: [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) - -Check the version of the application you are using `onedrive --version` and ensure that you are running either the current release or compile the application yourself from master to get the latest version. - -If you are not using the above application version or greater, you must upgrade your application to obtain support. - -## Have a Question -If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions) - -Be sure to review the Frequently Asked Questions as well before raising a new discussion post. - ## Frequently Asked Questions Refer to [Frequently Asked Questions](https://github.com/abraunegg/onedrive/wiki/Frequently-Asked-Questions) +## Have a question +If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions) + ## Reporting an Issue or Bug -If you encounter any bugs you can report them here on GitHub. Before filing an issue be sure to: +If you encounter any bugs you can report them here on Github. Before filing an issue be sure to: -1. Check the version of the application you are using `onedrive --version` and ensure that you are running a supported application version. If you are not using a supported application version, you must first upgrade your application to a supported version and then re-test for your issue. -2. If you are using a supported applcation version, fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md) +1. Check the version of the application you are using `onedrive --version` and ensure that you are running either the latest [release](https://github.com/abraunegg/onedrive/releases) or built from master. +2. Fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md) 3. Generate a debug log for support using the following [process](https://github.com/abraunegg/onedrive/wiki/Generate-debug-log-for-support) * If you are in *any* way concerned regarding the sensitivity of the data contained with in the verbose debug log file, create a new OneDrive account, configure the client to use that, use *dummy* data to simulate your environment and then replicate your original issue * If you are still concerned, provide an NDA or confidentiality document to sign @@ -70,23 +63,23 @@ Refer to [docs/known-issues.md](https://github.com/abraunegg/onedrive/blob/maste ## Documentation and Configuration Assistance ### Installing from Distribution Packages or Building the OneDrive Client for Linux from source -Refer to [docs/INSTALL.md](https://github.com/abraunegg/onedrive/blob/master/docs/INSTALL.md) +Refer to [docs/install.md](https://github.com/abraunegg/onedrive/blob/master/docs/install.md) ### Configuration and Usage -Refer to [docs/USAGE.md](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md) +Refer to [docs/usage.md](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md) ### Configure OneDrive Business Shared Folders -Refer to [docs/BusinessSharedFolders.md](https://github.com/abraunegg/onedrive/blob/master/docs/BusinessSharedFolders.md) +Refer to [docs/business-shared-folders.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-folders.md) ### Configure SharePoint / Office 365 Shared Libraries (Business or Education) -Refer to [docs/SharePoint-Shared-Libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/SharePoint-Shared-Libraries.md) +Refer to [docs/sharepoint-libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/sharepoint-libraries.md) ### Configure National Cloud support Refer to [docs/national-cloud-deployments.md](https://github.com/abraunegg/onedrive/blob/master/docs/national-cloud-deployments.md) ### Docker support -Refer to [docs/Docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/Docker.md) +Refer to [docs/docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/docker.md) ### Podman support -Refer to [docs/Podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/Podman.md) +Refer to [docs/podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/podman.md) diff --git a/config b/config index 807180ea5..027194977 100644 --- a/config +++ b/config @@ -3,7 +3,7 @@ # with their default values. # All values need to be enclosed in quotes # When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. +# For explanations of all config options below see docs/usage.md or the man page. # # sync_dir = "~/OneDrive" # skip_file = "~*|.~*|*.tmp" @@ -40,22 +40,19 @@ # bypass_data_preservation = "false" # azure_ad_endpoint = "" # azure_tenant_id = "common" -# sync_business_shared_folders = "false" +# sync_business_shared_items = "false" # sync_dir_permissions = "700" # sync_file_permissions = "600" # rate_limit = "131072" +# operation_timeout = "3600" # webhook_enabled = "false" # webhook_public_url = "" # webhook_listening_host = "" # webhook_listening_port = "8888" -# webhook_expiration_interval = "86400" -# webhook_renewal_interval = "43200" +# webhook_expiration_interval = "600" +# webhook_renewal_interval = "300" +# webhook_retry_interval = "60" # space_reservation = "50" # display_running_config = "false" # read_only_auth_scope = "false" # cleanup_local_files = "false" -# operation_timeout = "3600" -# dns_timeout = "60" -# connect_timeout = "10" -# data_timeout = "600" -# ip_protocol_version = "0" diff --git a/configure b/configure index f68a775cc..8c896eaed 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for onedrive v2.4.25. +# Generated by GNU Autoconf 2.69 for onedrive v2.5.0-alpha-5. # # Report bugs to . # @@ -579,8 +579,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='onedrive' PACKAGE_TARNAME='onedrive' -PACKAGE_VERSION='v2.4.25' -PACKAGE_STRING='onedrive v2.4.25' +PACKAGE_VERSION='v2.5.0-alpha-5' +PACKAGE_STRING='onedrive v2.5.0-alpha-5' PACKAGE_BUGREPORT='https://github.com/abraunegg/onedrive' PACKAGE_URL='' @@ -1219,7 +1219,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures onedrive v2.4.25 to adapt to many kinds of systems. +\`configure' configures onedrive v2.5.0-alpha-5 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1280,7 +1280,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of onedrive v2.4.25:";; + short | recursive ) echo "Configuration of onedrive v2.5.0-alpha-5:";; esac cat <<\_ACEOF @@ -1393,7 +1393,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -onedrive configure v2.4.25 +onedrive configure v2.5.0-alpha-5 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -1410,7 +1410,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by onedrive $as_me v2.4.25, which was +It was created by onedrive $as_me v2.5.0-alpha-5, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -2162,7 +2162,7 @@ fi -PACKAGE_DATE="June 2023" +PACKAGE_DATE="January 2024" @@ -3159,7 +3159,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by onedrive $as_me v2.4.25, which was +This file was extended by onedrive $as_me v2.5.0-alpha-5, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -3212,7 +3212,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -onedrive config.status v2.4.25 +onedrive config.status v2.5.0-alpha-5 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 9c2c0db26..69b9227b4 100644 --- a/configure.ac +++ b/configure.ac @@ -9,7 +9,7 @@ dnl - commit the changed files (configure.ac, configure) dnl - tag the release AC_PREREQ([2.69]) -AC_INIT([onedrive],[v2.4.25], [https://github.com/abraunegg/onedrive], [onedrive]) +AC_INIT([onedrive],[v2.5.0-alpha-5], [https://github.com/abraunegg/onedrive], [onedrive]) AC_CONFIG_SRCDIR([src/main.d]) diff --git a/contrib/completions/complete.bash b/contrib/completions/complete.bash index 358084640..68895c9d9 100644 --- a/contrib/completions/complete.bash +++ b/contrib/completions/complete.bash @@ -11,7 +11,7 @@ _onedrive() prev=${COMP_WORDS[COMP_CWORD-1]} options='--check-for-nomount --check-for-nosync --debug-https --disable-notifications --display-config --display-sync-status --download-only --disable-upload-validation --dry-run --enable-logging --force-http-1.1 --force-http-2 --get-file-link --local-first --logout -m --monitor --no-remote-delete --print-token --reauth --resync --skip-dot-files --skip-symlinks --synchronize --upload-only -v --verbose --version -h --help' - argopts='--create-directory --get-O365-drive-id --operation-timeout --remove-directory --single-directory --source-directory' + argopts='--create-directory --get-O365-drive-id --remove-directory --single-directory --source-directory' # Loop on the arguments to manage conflicting options for (( i=0; i < ${#COMP_WORDS[@]}-1; i++ )); do @@ -34,7 +34,7 @@ _onedrive() fi return 0 ;; - --create-directory|--get-O365-drive-id|--operation-timeout|--remove-directory|--single-directory|--source-directory) + --create-directory|--get-O365-drive-id|--remove-directory|--single-directory|--source-directory) return 0 ;; *) diff --git a/contrib/completions/complete.fish b/contrib/completions/complete.fish index 7547574c4..185a85823 100644 --- a/contrib/completions/complete.fish +++ b/contrib/completions/complete.fish @@ -23,7 +23,6 @@ complete -c onedrive -l local-first -d 'Synchronize from the local directory sou complete -c onedrive -l logout -d 'Logout the current user.' complete -c onedrive -n "not __fish_seen_subcommand_from --synchronize" -a "-m --monitor" -d 'Keep monitoring for local and remote changes.' complete -c onedrive -l no-remote-delete -d 'Do not delete local file deletes from OneDrive when using --upload-only.' -complete -c onedrive -l operation-timeout -d 'Specify the maximum amount of time (in seconds) an operation is allowed to take.' complete -c onedrive -l print-token -d 'Print the access token, useful for debugging.' complete -c onedrive -l remote-directory -d 'Remove a directory on OneDrive - no sync will be performed.' complete -c onedrive -l reauth -d 'Reauthenticate the client with OneDrive.' diff --git a/contrib/completions/complete.zsh b/contrib/completions/complete.zsh index b03ea6866..ff92e6f8d 100644 --- a/contrib/completions/complete.zsh +++ b/contrib/completions/complete.zsh @@ -27,7 +27,6 @@ all_opts=( '--logout[Logout the current user]' '(-m --monitor)'{-m,--monitor}'[Keep monitoring for local and remote changes]' '--no-remote-delete[Do not delete local file deletes from OneDrive when using --upload-only]' - '--operation-timeout[Specify the maximum amount of time (in seconds) an operation is allowed to take.]:seconds:' '--print-token[Print the access token, useful for debugging]' '--reauth[Reauthenticate the client with OneDrive]' '--resync[Forget the last saved state, perform a full sync]' diff --git a/contrib/docker/entrypoint.sh b/contrib/docker/entrypoint.sh index 8eb529480..f75f937d3 100755 --- a/contrib/docker/entrypoint.sh +++ b/contrib/docker/entrypoint.sh @@ -118,6 +118,13 @@ if [ -n "${ONEDRIVE_SINGLE_DIRECTORY:=""}" ]; then ARGS=(--single-directory \"${ONEDRIVE_SINGLE_DIRECTORY}\" ${ARGS[@]}) fi +# Tell client run in dry-run mode +if [ "${ONEDRIVE_DRYRUN:=0}" == "1" ]; then + echo "# We are running in dry-run mode" + echo "# Adding --dry-run" + ARGS=(--dry-run ${ARGS[@]}) +fi + if [ ${#} -gt 0 ]; then ARGS=("${@}") fi diff --git a/docs/BusinessSharedFolders.md b/docs/BusinessSharedFolders.md deleted file mode 100644 index 3f0429434..000000000 --- a/docs/BusinessSharedFolders.md +++ /dev/null @@ -1,192 +0,0 @@ -# How to configure OneDrive Business Shared Folder Sync -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Process Overview -Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: -1. List available shared folders to determine which folder you wish to sync & to validate that you have access to that folder -2. Create a new file called 'business_shared_folders' in your config directory which contains a list of the shared folders you wish to sync -3. Test the configuration using '--dry-run' -4. Sync the OneDrive Business Shared folders as required - -## Listing available OneDrive Business Shared Folders -List the available OneDrive Business Shared folders with the following command: -```text -onedrive --list-shared-folders -``` - This will return a listing of all OneDrive Business Shared folders which have been shared with you and by whom. This is important for conflict resolution: -```text -Initializing the Synchronization Engine ... - -Listing available OneDrive Business Shared Folders: ---------------------------------------- -Shared Folder: SharedFolder0 -Shared By: Firstname Lastname ---------------------------------------- -Shared Folder: SharedFolder1 -Shared By: Firstname Lastname ---------------------------------------- -Shared Folder: SharedFolder2 -Shared By: Firstname Lastname ---------------------------------------- -Shared Folder: SharedFolder0 -Shared By: Firstname Lastname (user@domain) ---------------------------------------- -Shared Folder: SharedFolder1 -Shared By: Firstname Lastname (user@domain) ---------------------------------------- -Shared Folder: SharedFolder2 -Shared By: Firstname Lastname (user@domain) -... -``` - -## Configuring OneDrive Business Shared Folders -1. Create a new file called 'business_shared_folders' in your config directory -2. On each new line, list the OneDrive Business Shared Folder you wish to sync -```text -[alex@centos7full onedrive]$ cat ~/.config/onedrive/business_shared_folders -# comment -Child Shared Folder -# Another comment -Top Level to Share -[alex@centos7full onedrive]$ -``` -3. Validate your configuration with `onedrive --display-config`: -```text -Configuration file successfully loaded -onedrive version = v2.4.3 -Config path = /home/alex/.config/onedrive-business/ -Config file found in config path = true -Config option 'check_nosync' = false -Config option 'sync_dir' = /home/alex/OneDriveBusiness -Config option 'skip_dir' = -Config option 'skip_file' = ~*|.~*|*.tmp -Config option 'skip_dotfiles' = false -Config option 'skip_symlinks' = false -Config option 'monitor_interval' = 300 -Config option 'min_notify_changes' = 5 -Config option 'log_dir' = /var/log/onedrive/ -Config option 'classify_as_big_delete' = 1000 -Config option 'sync_root_files' = false -Selective sync 'sync_list' configured = false -Business Shared Folders configured = true -business_shared_folders contents: -# comment -Child Shared Folder -# Another comment -Top Level to Share -``` - -## Performing a sync of OneDrive Business Shared Folders -Perform a standalone sync using the following command: `onedrive --synchronize --sync-shared-folders --verbose`: -```text -onedrive --synchronize --sync-shared-folders --verbose -Using 'user' Config Dir: /home/alex/.config/onedrive-business/ -Using 'system' Config Dir: -Configuration file successfully loaded -Initializing the OneDrive API ... -Configuring Global Azure AD Endpoints -Opening the item database ... -All operations will be performed in: /home/alex/OneDriveBusiness -Application version: v2.4.3 -Account Type: business -Default Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA -Default Root ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ -Remaining Free Space: 1098316220277 -Fetching details for OneDrive Root -OneDrive Root exists in the database -Initializing the Synchronization Engine ... -Syncing changes from OneDrive ... -Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ -Number of items from OneDrive to process: 0 -Attempting to sync OneDrive Business Shared Folders -Syncing this OneDrive Business Shared Folder: Child Shared Folder -OneDrive Business Shared Folder - Shared By: test user -Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared -Processing 11 OneDrive items to ensure consistent local state -Syncing this OneDrive Business Shared Folder: Top Level to Share -OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com) -Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 3 OneDrive items for processing from /Top Level to Share/10-Files -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP -Processing 31 OneDrive items to ensure consistent local state -Uploading differences of ~/OneDriveBusiness -Processing root -The directory has not changed -Processing SMPP_Local -The directory has not changed -Processing SMPP-IF-SPEC_v3_3-24858.pdf -The file has not changed -Processing SMPP_v3_4_Issue1_2-24857.pdf -The file has not changed -Processing new_local_file.txt -The file has not changed -Processing root -The directory has not changed -... -The directory has not changed -Processing week02-03-Combinational_Logic-v1.pptx -The file has not changed -Uploading new items of ~/OneDriveBusiness -Applying changes of Path ID: 01WIXGO5V6Y2GOVW7725BZO354PWSELRRZ -Number of items from OneDrive to process: 0 -Attempting to sync OneDrive Business Shared Folders -Syncing this OneDrive Business Shared Folder: Child Shared Folder -OneDrive Business Shared Folder - Shared By: test user -Applying changes of Path ID: 01JRXHEZMREEB3EJVHNVHKNN454Q7DFXPR -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 2 OneDrive items for processing from /Child Shared Folder/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Child Shared Folder/SMPP_Shared -Processing 11 OneDrive items to ensure consistent local state -Syncing this OneDrive Business Shared Folder: Top Level to Share -OneDrive Business Shared Folder - Shared By: test user (testuser@mynasau3.onmicrosoft.com) -Applying changes of Path ID: 01JRXHEZLRMXHKBYZNOBF3TQOPBXS3VZMA -Adding OneDrive root details for processing -Adding OneDrive folder details for processing -Adding 4 OneDrive items for processing from OneDrive folder -Adding 3 OneDrive items for processing from /Top Level to Share/10-Files -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Cisco VDI Whitepaper -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/Images -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/JPG -Adding 8 OneDrive items for processing from /Top Level to Share/10-Files/Images/PNG -Adding 2 OneDrive items for processing from /Top Level to Share/10-Files/SMPP -Processing 31 OneDrive items to ensure consistent local state -``` - -**Note:** Whenever you modify the `business_shared_folders` file you must perform a `--resync` of your database to clean up stale entries due to changes in your configuration. - -## Enable / Disable syncing of OneDrive Business Shared Folders -Performing a sync of the configured OneDrive Business Shared Folders can be enabled / disabled via adding the following to your configuration file. - -### Enable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_folders = "true" -``` - -### Disable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_folders = "false" -``` - -## Known Issues -Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. - -Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: - -![shared_with_me](./images/shared_with_me.JPG) - -This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/Docker.md b/docs/Docker.md index 1f0050fd6..1bf6251ff 100644 --- a/docs/Docker.md +++ b/docs/Docker.md @@ -228,7 +228,7 @@ docker volume inspect onedrive_conf Or you can map your own config folder to the config volume. Make sure to copy all files from the docker volume into your mapped folder first. -The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration) +The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) ### Syncing multiple accounts There are many ways to do this, the easiest is probably to do the following: @@ -271,9 +271,10 @@ docker run $firstRun --restart unless-stopped --name onedrive -v onedrive_conf:/ | ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | | ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | | ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | -| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) | +| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) | | ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | +| ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | ### Environment Variables Usage Examples **Verbose Output:** diff --git a/docs/INSTALL.md b/docs/INSTALL.md index 3f00ae212..f5338122d 100644 --- a/docs/INSTALL.md +++ b/docs/INSTALL.md @@ -11,9 +11,10 @@ Distribution packages may be of an older release when compared to the latest rel | Distribution | Package Name & Package Link |   PKG_Version   |  i686  | x86_64 | ARMHF | AARCH64 | Extra Details | |---------------------------------|------------------------------------------------------------------------------|:---------------:|:----:|:------:|:-----:|:-------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Alpine Linux | [onedrive](https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge) |Alpine Linux Edge package|❌|✔|❌|✔ | | -| Arch Linux

Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |AUR package|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)

**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'

**Note:** System must have at least 1GB of memory & 1GB swap space +| Arch Linux

Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |AUR package|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)

**Note:** You must first install 'base-devel' as this is a pre-requisite for using the AUR

**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'

**Note:** System must have at least 1GB of memory & 1GB swap space | Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |Debian 11 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | | Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |Debian 12 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Debian Sid | [onedrive](https://packages.debian.org/sid/onedrive) |Debian Sid package|✔|✔|✔|✔| | | Fedora | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |Fedora Rawhide package|✔|✔|✔|✔| | | Gentoo | [onedrive](https://gpo.zugaina.org/net-misc/onedrive) | No API Available |✔|✔|❌|❌| | | Homebrew | [onedrive](https://formulae.brew.sh/formula/onedrive) | Homebrew package |❌|✔|❌|❌| | @@ -211,8 +212,10 @@ sudo make install ``` ### Build options -Notifications can be enabled using the `configure` switch `--enable-notifications`. +#### GUI Notification Support +GUI notification support can be enabled using the `configure` switch `--enable-notifications`. +#### systemd service directory customisation support Systemd service files are installed in the appropriate directories on the system, as provided by `pkg-config systemd` settings. If the need for overriding the deduced path are necessary, the two options `--with-systemdsystemunitdir` (for @@ -220,9 +223,11 @@ the Systemd system unit location), and `--with-systemduserunitdir` (for the Systemd user unit location) can be specified. Passing in `no` to one of these options disabled service file installation. +#### Additional Compiler Debug By passing `--enable-debug` to the `configure` call, `onedrive` gets built with additional debug information, useful (for example) to get `perf`-issued figures. +#### Shell Completion Support By passing `--enable-completions` to the `configure` call, shell completion functions are installed for `bash`, `zsh` and `fish`. The installation directories are determined as far as possible automatically, but can be overridden by passing diff --git a/docs/Podman.md b/docs/Podman.md index 35e69d26e..4f3474f34 100644 --- a/docs/Podman.md +++ b/docs/Podman.md @@ -255,7 +255,7 @@ podman volume inspect onedrive_conf ``` Or you can map your own config folder to the config volume. Make sure to copy all files from the volume into your mapped folder first. -The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#configuration) +The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) ### Syncing multiple accounts There are many ways to do this, the easiest is probably to do the following: @@ -291,9 +291,10 @@ podman run -it --name onedrive_work --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ | ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | | ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | | ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | -| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#authorize-the-application-with-your-onedrive-account) | +| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) | | ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | +| ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | ### Environment Variables Usage Examples **Verbose Output:** diff --git a/docs/USAGE.md b/docs/USAGE.md index 235b15d3e..880de9522 100644 --- a/docs/USAGE.md +++ b/docs/USAGE.md @@ -1,84 +1,71 @@ -# Configuration and Usage of the OneDrive Free Client +# Using the OneDrive Client for Linux ## Application Version Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. ## Table of Contents -- [Using the client](#using-the-client) - * [Upgrading from 'skilion' client](#upgrading-from-skilion-client) - * [Local File and Folder Naming Conventions](#local-file-and-folder-naming-conventions) - * [curl compatibility](#curl-compatibility) - * [Authorize the application with your OneDrive Account](#authorize-the-application-with-your-onedrive-account) - * [Show your configuration](#show-your-configuration) - * [Testing your configuration](#testing-your-configuration) - * [Performing a sync](#performing-a-sync) - * [Performing a single directory sync](#performing-a-single-directory-sync) - * [Performing a 'one-way' download sync](#performing-a-one-way-download-sync) - * [Performing a 'one-way' upload sync](#performing-a-one-way-upload-sync) - * [Performing a selective sync via 'sync_list' file](#performing-a-selective-sync-via-sync_list-file) - * [Performing a --resync](#performing-a---resync) - * [Performing a --force-sync without a --resync or changing your configuration](#performing-a---force-sync-without-a---resync-or-changing-your-configuration) - * [Increasing logging level](#increasing-logging-level) - * [Client Activity Log](#client-activity-log) - * [Notifications](#notifications) - * [Handling a OneDrive account password change](#handling-a-onedrive-account-password-change) -- [Configuration](#configuration) - * [The default configuration](#the-default-configuration-file-is-listed-below) - * ['config' file configuration examples](#config-file-configuration-examples) - + [sync_dir](#sync_dir) - + [sync_dir directory and file permissions](#sync_dir-directory-and-file-permissions) - + [skip_dir](#skip_dir) - + [skip_file](#skip_file) - + [skip_dotfiles](#skip_dotfiles) - + [monitor_interval](#monitor_interval) - + [monitor_fullscan_frequency](#monitor_fullscan_frequency) - + [monitor_log_frequency](#monitor_log_frequency) - + [min_notify_changes](#min_notify_changes) - + [operation_timeout](#operation_timeout) - + [ip_protocol_version](#ip_protocol_version) - + [classify_as_big_delete](#classify_as_big_delete) - * [Configuring the client for 'single tenant application' use](#configuring-the-client-for-single-tenant-application-use) - * [Configuring the client to use older 'skilion' application identifier](#configuring-the-client-to-use-older-skilion-application-identifier) + +- [Important Notes](#important-notes) + - [Upgrading from the 'skilion' Client](#upgrading-from-the-sklion-client) + - [Guidelines for Naming Local Files and Folders in the Synchronisation Directory](#guidelines-for-naming-local-files-and-folders-in-the-synchronisation-directory) + - [Compatibility with curl](#compatibility-with-curl) +- [First Steps](#first-steps) + - [Authorise the Application with Your Microsoft OneDrive Account](#authorise-the-application-with-your-microsoft-onedrive-account) + - [Display Your Applicable Runtime Configuration](#display-your-applicable-runtime-configuration) + - [Understanding OneDrive Client for Linux Operational Modes](#understanding-onedrive-client-for-linux-operational-modes) + - [Standalone Synchronisation Operational Mode (Standalone Mode)](#standalone-synchronisation-operational-mode-standalone-mode) + - [Ongoing Synchronisation Operational Mode (Monitor Mode)](#ongoing-synchronisation-operational-mode-monitor-mode) + - [Increasing application logging level](#increasing-application-logging-level) + - [Using 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive](#using-client-side-filtering-rules-to-determine-what-should-be-synced-with-microsoft-onedrive) + - [Testing your configuration](#testing-your-configuration) + - [Performing a sync with Microsoft OneDrive](#performing-a-sync-with-microsoft-onedrive) + - [Performing a single directory synchronisation with Microsoft OneDrive](#performing-a-single-directory-synchronisation-with-microsoft-onedrive) + - [Performing a 'one-way' download synchronisation with Microsoft OneDrive](#performing-a-one-way-download-synchronisation-with-microsoft-onedrive) + - [Performing a 'one-way' upload synchronisation with Microsoft OneDrive](#performing-a-one-way-upload-synchronisation-with-microsoft-onedrive) + - [Performing a selective synchronisation via 'sync_list' file](#performing-a-selective-synchronisation-via-sync_list-file) + - [Performing a --resync](#performing-a---resync) + - [Performing a --force-sync without a --resync or changing your configuration](#performing-a---force-sync-without-a---resync-or-changing-your-configuration) + - [Enabling the Client Activity Log](#enabling-the-client-activity-log) + - [Client Activity Log Example:](#client-activity-log-example) + - [Client Activity Log Differences](#client-activity-log-differences) + - [GUI Notifications](#gui-notifications) + - [Handling a Microsoft OneDrive Account Password Change](#handling-a-microsoft-onedrive-account-password-change) + - [Determining the synchronisation result](#determining-the-synchronisation-result) - [Frequently Asked Configuration Questions](#frequently-asked-configuration-questions) - * [How to sync only specific or single directory?](#how-to-sync-only-specific-or-single-directory) - * [How to 'skip' directories from syncing?](#how-to-skip-directories-from-syncing) - * [How to 'skip' files from syncing?](#how-to-skip-files-from-syncing) - * [How to 'skip' dot files and folders from syncing?](#how-to-skip-dot-files-and-folders-from-syncing) - * [How to 'skip' files larger than a certain size from syncing?](#how-to-skip-files-larger-than-a-certain-size-from-syncing) - * [How to 'rate limit' the application to control bandwidth consumed for upload & download operations?](#how-to-rate-limit-the-application-to-control-bandwidth-consumed-for-upload--download-operations) - * [How to prevent your local disk from filling up?](#how-to-prevent-your-local-disk-from-filling-up) - * [How are symbolic links handled by the client?](#how-are-symbolic-links-handled-by-the-client) - * [How to sync shared folders (OneDrive Personal)?](#how-to-sync-shared-folders-onedrive-personal) - * [How to sync shared folders (OneDrive Business or Office 365)?](#how-to-sync-shared-folders-onedrive-business-or-office-365) - * [How to sync sharePoint / Office 365 Shared Libraries?](#how-to-sync-sharepoint--office-365-shared-libraries) - * [How to run a user systemd service at boot without user login?](#how-to-run-a-user-systemd-service-at-boot-without-user-login) - * [How to create a shareable link?](#how-to-create-a-shareable-link) - * [How to sync both Personal and Business accounts at the same time?](#how-to-sync-both-personal-and-business-accounts-at-the-same-time) - * [How to sync multiple SharePoint Libraries at the same time?](#how-to-sync-multiple-sharepoint-libraries-at-the-same-time) -- [Running 'onedrive' in 'monitor' mode](#running-onedrive-in-monitor-mode) - * [Use webhook to subscribe to remote updates in 'monitor' mode](#use-webhook-to-subscribe-to-remote-updates-in-monitor-mode) - * [More webhook configuration options](#more-webhook-configuration-options) - + [webhook_listening_host and webhook_listening_port](#webhook_listening_host-and-webhook_listening_port) - + [webhook_expiration_interval and webhook_renewal_interval](#webhook_expiration_interval-and-webhook_renewal_interval) -- [Running 'onedrive' as a system service](#running-onedrive-as-a-system-service) - * [OneDrive service running as root user via init.d](#onedrive-service-running-as-root-user-via-initd) - * [OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-root-user-via-systemd-arch-ubuntu-debian-opensuse-fedora) - * [OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)](#onedrive-service-running-as-root-user-via-systemd-red-hat-enterprise-linux-centos-linux) - * [OneDrive service running as a non-root user via systemd (All Linux Distributions)](#onedrive-service-running-as-a-non-root-user-via-systemd-all-linux-distributions) - * [OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-a-non-root-user-via-systemd-with-notifications-enabled-arch-ubuntu-debian-opensuse-fedora) - * [OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)](#onedrive-service-running-as-a-non-root-user-via-runit-antix-devuan-artix-void) -- [Additional Configuration](#additional-configuration) - * [Advanced Configuration of the OneDrive Free Client](#advanced-configuration-of-the-onedrive-free-client) - * [Access OneDrive service through a proxy](#access-onedrive-service-through-a-proxy) - * [Setup selinux for a sync folder outside of the home folder](#setup-selinux-for-a-sync-folder-outside-of-the-home-folder) -- [All available commands](#all-available-commands) - -## Using the client -### Upgrading from 'skilion' client -The 'skilion' version contains a significant number of defects in how the local sync state is managed. When upgrading from the 'skilion' version to this version, it is advisable to stop any service / onedrive process from running and then remove any `items.sqlite3` file from your configuration directory (`~/.config/onedrive/`) as this will force the creation of a new local cache file. - -Additionally, if you are using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as per below: - -**Invalid configuration:** + - [How to change the default configuration of the client?](#how-to-change-the-default-configuration-of-the-client) + - [How to change where my data from Microsoft OneDrive is stored?](#how-to-change-where-my-data-from-microsoft-onedrive-is-stored) + - [How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive?](#how-to-change-what-file-and-directory-permissions-are-assigned-to-data-that-is-downloaded-from-microsoft-onedrive) + - [How are uploads and downloads managed?](#how-are-uploads-and-downloads-managed) + - [How to only sync a specific directory?](#how-to-only-sync-a-specific-directory) + - [How to 'skip' files from syncing?](#how-to-skip-files-from-syncing) + - [How to 'skip' directories from syncing?](#how-to-skip-directories-from-syncing) + - [How to 'skip' .files and .folders from syncing?](#how-to-skip-files-and-folders-from-syncing) + - [How to 'skip' files larger than a certain size from syncing?](#how-to-skip-files-larger-than-a-certain-size-from-syncing) + - [How to 'rate limit' the application to control bandwidth consumed for upload & download operations?](#how-to-rate-limit-the-application-to-control-bandwidth-consumed-for-upload--download-operations) + - [How can I prevent my local disk from filling up?](#how-can-i-prevent-my-local-disk-from-filling-up) + - [How does the client handle symbolic links?](#how-does-the-client-handle-symbolic-links) + - [How to synchronise shared folders (OneDrive Personal)?](#how-to-synchronise-shared-folders-onedrive-personal) + - [How to synchronise shared folders (OneDrive Business or Office 365)?](#how-to-synchronise-shared-folders-onedrive-business-or-office-365) + - [How to synchronise SharePoint / Office 365 Shared Libraries?](#how-to-synchronise-sharepoint--office-365-shared-libraries) + - [How to Create a Shareable Link?](#how-to-create-a-shareable-link) + - [How to Synchronise Both Personal and Business Accounts at once?](#how-to-synchronise-both-personal-and-business-accounts-at-once) + - [How to Synchronise Multiple SharePoint Libraries simultaneously?](#how-to-synchronise-multiple-sharepoint-libraries-simultaneously) + - [How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period?](#how-to-receive-real-time-changes-from-microsoft-onedrive-service-instead-of-waiting-for-the-next-sync-period) + - [How to initiate the client as a background service?](#how-to-initiate-the-client-as-a-background-service) + - [OneDrive service running as root user via init.d](#onedrive-service-running-as-root-user-via-initd) + - [OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-root-user-via-systemd-arch-ubuntu-debian-opensuse-fedora) + - [OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)](#onedrive-service-running-as-root-user-via-systemd-red-hat-enterprise-linux-centos-linux) + - [OneDrive service running as a non-root user via systemd (All Linux Distributions)](#onedrive-service-running-as-a-non-root-user-via-systemd-all-linux-distributions) + - [OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-a-non-root-user-via-systemd-with-notifications-enabled-arch-ubuntu-debian-opensuse-fedora) + - [OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)](#onedrive-service-running-as-a-non-root-user-via-runit-antix-devuan-artix-void) + - [How to start a user systemd service at boot without user login?](#how-to-start-a-user-systemd-service-at-boot-without-user-login) + +## Important Notes +### Upgrading from the 'skilion' Client +The 'skilion' version has a significant number of issues in how it manages the local sync state. When upgrading from the 'skilion' client to this client, it's recommended to stop any service or OneDrive process that may be running. Once all OneDrive services are stopped, make sure to remove any old client binaries from your system. + +Furthermore, if you're using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as shown below: + +**Invalid 'skilion' configuration:** ```text skip_file = ".*|~*" ``` @@ -88,154 +75,270 @@ skip_file = "~*" ``` **Default valid configuration:** ```text -skip_file = "~*|.~*|*.tmp" +skip_file = "~*|.~*|*.tmp|*.swp|*.partial" ``` -Do not use a skip_file entry of `.*` as this will prevent correct searching of local changes to process. - -### Local File and Folder Naming Conventions -The files and directories in the synchronization directory must follow the [Windows naming conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file). -The application will attempt to handle instances where you have two files with the same names but with different capitalization. Where there is a namespace clash, the file name which clashes will not be synced. This is expected behavior and won't be fixed. - -### curl compatibility -If your system utilises curl < 7.47.0, curl defaults to HTTP/1.1 for HTTPS operations. The client will use HTTP/1.1. - -If your system utilises curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS but will stick to HTTP/1.1 by default. The client will use HTTP/1.1 for HTTPS operations. +Avoid using a 'skip_file' entry of `.*` as it may prevent the correct detection of local changes to process. The configuration values for 'skip_file' will be checked for validity, and if there is an issue, the following error message will be displayed: +```text +ERROR: Invalid skip_file entry '.*' detected +``` -If your system utilises curl >= 7.62.0, curl defaults to prefer HTTP/2 over HTTP/1.1 by default. The client will utilse HTTP/2 for most HTTPS operations and HTTP/1.1 for others. This difference is governed by the OneDrive platform and not this client. +### Guidelines for Naming Local Files and Folders in the Synchronisation Directory +When naming your files and folders in the synchronisation directory, it is important to follow the [Windows naming conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file) for your files and folders. -If you wish to explicitly use HTTP/1.1 you will need to use the `--force-http-11` flag or set the config option `force_http_11 = "true"` to force the application to use HTTP/1.1 otherwise all client operations will use whatever is the curl default for your distribution. +Moreover, Microsoft OneDrive does not adhere to POSIX standards. As a result, if you have two files with identical names differing only in capitalisation, the OneDrive Client for Linux will try to manage this. However, in cases of naming conflicts, the conflicting file or folder will not synchronise. This is a deliberate design choice and will not be modified. To avoid such issues, you should rename any conflicting local files or folders. -### Authorize the application with your OneDrive Account -After installing the application you must authorize the application with your OneDrive Account. This is done by running the application without any additional command switches. +### Compatibility with curl +If your system uses curl < 7.47.0, curl will default to HTTP/1.1 for HTTPS operations, and the client will follow suit, using HTTP/1.1. -Note that some companies require to explicitly add this app in [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an (approved) app to your apps, click on the ellipsis in the top-right corner and choose "Request new apps". On the next page you can add this app. If its not listed, you should request through your IT department. +For systems running curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS, but it will still use HTTP/1.1 as the default for these operations. The client will employ HTTP/1.1 for HTTPS operations as well. -You will be asked to open a specific URL by using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving permission to the application, you will be redirected to a blank page. Copy the URI of the blank page into the application. -```text -[user@hostname ~]$ onedrive +However, if your system employs curl >= 7.62.0, curl will, by default, prioritise HTTP/2 over HTTP/1.1. In this case, the client will utilise HTTP/2 for most HTTPS operations and stick with HTTP/1.1 for others. Please note that this distinction is governed by the OneDrive platform, not our client. -Authorize this app visiting: +If you explicitly want to use HTTP/1.1, you can do so by using the `--force-http-11` flag or setting the configuration option `force_http_11 = "true"`. This will compel the application to exclusively use HTTP/1.1. Otherwise, all client operations will align with the curl default settings for your distribution. -https://..... +## First Steps +### Authorise the Application with Your Microsoft OneDrive Account +Once you've installed the application, you'll need to authorise it using your Microsoft OneDrive Account. This can be done by simply running the application without any additional command switches. -Enter the response uri: +Please be aware that some companies may require you to explicitly add this app to the [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an approved app to your apps, click on the ellipsis in the top-right corner and select "Request new apps." On the next page, you can add this app. If it's not listed, you should make a request through your IT department. -``` +When you run the application for the first time, you'll be prompted to open a specific URL using your web browser, where you'll need to log in to your Microsoft Account and grant the application permission to access your files. After granting permission to the application, you'll be redirected to a blank page. Simply copy the URI from the blank page and paste it into the application. **Example:** -``` +```text [user@hostname ~]$ onedrive -Authorize this app visiting: +Authorise this app by visiting: -https://login.microsoftonline.com/common/oauth2/v2.0/authorize?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient +https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient -Enter the response uri: https://login.microsoftonline.com/common/oauth2/nativeclient?code= +Enter the response URI from your browser: https://login.microsoftonline.com/common/oauth2/nativeclient?code= -Application has been successfully authorised, however no additional command switches were provided. +The application has been successfully authorised, but no additional command switches were provided. -Please use 'onedrive --help' for further assistance in regards to running this application. +Please use 'onedrive --help' for further assistance on how to run this application. ``` -### Show your configuration -To validate your configuration the application will use, utilize the following: +**Please Note:** Without additional input or configuration, the OneDrive Client for Linux will automatically adhere to default application settings during synchronisation processes with Microsoft OneDrive. + + +### Display Your Applicable Runtime Configuration +To verify the configuration that the application will use, use the following command: ```text onedrive --display-config ``` -This will display all the pertinent runtime interpretation of the options and configuration you are using. Example output is as follows: +This command will display all the relevant runtime interpretations of the options and configurations you are using. An example output is as follows: ```text +Reading configuration file: /home/user/.config/onedrive/config Configuration file successfully loaded onedrive version = vX.Y.Z-A-bcdefghi -Config path = /home/alex/.config/onedrive +Config path = /home/user/.config/onedrive Config file found in config path = true -Config option 'sync_dir' = /home/alex/OneDrive -Config option 'enable_logging' = false +Config option 'drive_id' = +Config option 'sync_dir' = ~/OneDrive ... -Selective sync 'sync_list' configured = false -Config option 'sync_business_shared_folders' = false -Business Shared Folders configured = false Config option 'webhook_enabled' = false ``` +**Important Reminder:** When using multiple OneDrive accounts, it's essential to always use the `--confdir` command followed by the appropriate configuration directory. This ensures that the specific configuration you intend to view is correctly displayed. + +### Understanding OneDrive Client for Linux Operational Modes +There are two modes of operation when using the client: +1. Standalone sync mode that performs a single sync action against Microsoft OneDrive. +2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive. + +**Important Information:** The default setting for the OneDrive Client on Linux will sync all data from your Microsoft OneDrive account to your local device. To avoid this and select specific items for synchronisation, you should explore setting up 'Client Side Filtering' rules. This will help you manage and specify what exactly gets synced with your Microsoft OneDrive account. + +#### Standalone Synchronisation Operational Mode (Standalone Mode) +This method of use can be employed by issuing the following option to the client: +```text +onedrive --sync +``` +For simplicity, this can be shortened to the following: +```text +onedrive -s +``` + +#### Ongoing Synchronisation Operational Mode (Monitor Mode) +This method of use can be utilised by issuing the following option to the client: +```text +onedrive --monitor +``` +For simplicity, this can be shortened to the following: +```text +onedrive -m +``` +**Note:** This method of use is typically employed when enabling a systemd service to run the application in the background. + +Two common errors can occur when using monitor mode: +* Initialisation failure +* Unable to add a new inotify watch + +Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low: +* `fs.file-max` +* `fs.inotify.max_user_watches` + +To determine what the existing values are on your system, use the following commands: +```text +sysctl fs.file-max +sysctl fs.inotify.max_user_watches +``` +Alternatively, when running the client with increased verbosity (see below), the client will display what the current configured system maximum values are: +```text +... +All application operations will be performed in: /home/user/OneDrive +OneDrive synchronisation interval (seconds): 300 +Maximum allowed open files: 393370 <-- This is the current operating system fs.file-max value +Maximum allowed inotify watches: 29374 <-- This is the current operating system fs.inotify.max_user_watches value +Initialising filesystem inotify monitoring ... +... +``` +To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir': +```text +cd /path/to/your/sync/dir +ls -laR | wc -l +``` + +To make a change to these variables using your file and folder count, use the following process: +```text +sudo sysctl fs.file-max= +sudo sysctl fs.inotify.max_user_watches= +``` +Once these values are changed, you will need to restart your client so that the new values are detected and used. + +To make these changes permanent on your system, refer to your OS reference documentation. + +### Increasing application logging level +When running a sync (`--sync`) or using monitor mode (`--monitor`), it may be desirable to see additional information regarding the progress and operation of the client. For example, for a `--sync` command, this would be: +```text +onedrive --sync --verbose +``` +Furthermore, for simplicity, this can be simplified to the following: +``` +onedrive -s -v +``` +Adding `--verbose` twice will enable debug logging output. This is generally required when raising a bug report or needing to understand a problem. + +### Using 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive +Client Side Filtering in the context of the OneDrive Client for Linux refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this: + +* **skip_dir:** This option allows the user to specify directories that should not be synchronised with OneDrive. It's particularly useful for omitting large or irrelevant directories from the sync process. + +* **skip_dotfiles:** Dotfiles, usually configuration files or scripts, can be excluded from the sync. This is useful for users who prefer to keep these files local. + +* **skip_file:** Specific files can be excluded from synchronisation using this option. It provides flexibility in selecting which files are essential for cloud storage. + +* **skip_symlinks:** Symlinks often point to files outside the OneDrive directory or to locations that are not relevant for cloud storage. This option prevents them from being included in the sync. + +Additionally, the OneDrive Client for Linux allows the implementation of Client Side Filtering rules through a 'sync_list' file. This file explicitly states which directories or files should be included in the synchronisation. By default, any item not listed in the 'sync_list' file is excluded. This method offers a more granular approach to synchronisation, ensuring that only the necessary data is transferred to and from Microsoft OneDrive. + +These configurable options and the 'sync_list' file provide users with the flexibility to tailor the synchronisation process to their specific needs, conserving bandwidth and storage space while ensuring that important files are always backed up and accessible. + +**Note:** After changing any Client Side Filtering rule, you must perform a full re-synchronisation. + ### Testing your configuration -You are able to test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded or removed, however the application will display what 'would' have occurred. For example: +You can test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded, or removed; however, the application will display what 'would' have occurred. For example: ```text -onedrive --synchronize --verbose --dry-run +onedrive --sync --verbose --dry-run +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Using 'user' Config Dir: /home/user/.config/onedrive DRY-RUN Configured. Output below shows what 'would' have occurred. -Loading config ... -Using Config Dir: /home/user/.config/onedrive -Initializing the OneDrive API ... +DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations +DRY RUN: Not creating backup config file as --dry-run has been used +DRY RUN: Not updating hash files as --dry-run has been used +Checking Application Version ... +Attempting to initialise the OneDrive API ... +Configuring Global Azure AD Endpoints +The OneDrive API was initialised successfully Opening the item database ... -All operations will be performed in: /home/user/OneDrive -Initializing the Synchronization Engine ... -Account Type: personal -Default Drive ID: -Default Root ID: -Remaining Free Space: 5368709120 -Fetching details for OneDrive Root -OneDrive Root exists in the database -Syncing changes from OneDrive ... -Applying changes of Path ID: -Uploading differences of . -Processing root +Sync Engine Initialised with new Onedrive API instance +Application version: vX.Y.Z-A-bcdefghi +Account Type: +Default Drive ID: +Default Root ID: +Remaining Free Space: 1058488129 KB +All application operations will be performed in: /home/user/OneDrive +Fetching items from the OneDrive API for Drive ID: .. +... +Performing a database consistency and integrity check on locally stored data ... +Processing DB entries for this Drive ID: +Processing ~/OneDrive The directory has not changed -Uploading new items of . -OneDrive Client requested to create remote path: ./newdir -The requested directory to create was not found on OneDrive - creating remote directory: ./newdir -Successfully created the remote directory ./newdir on OneDrive -Uploading new file ./newdir/newfile.txt ... done. -Remaining free space: 5368709076 -Applying changes of Path ID: +... +Scanning local filesystem '~/OneDrive' for new data to upload ... +... +Performing a final true-up scan of online data from Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: .. + +Sync with Microsoft OneDrive is complete ``` -**Note:** `--dry-run` can only be used with `--synchronize`. It cannot be used with `--monitor` and will be ignored. +### Performing a sync with Microsoft OneDrive +By default, all files are downloaded in `~/OneDrive`. This download location is controlled by the 'sync_dir' config option. + +After authorising the application, a sync of your data can be performed by running: +```text +onedrive --sync +``` +This will synchronise files from your Microsoft OneDrive account to your `~/OneDrive` local directory or to your specified 'sync_dir' location. -### Performing a sync -By default all files are downloaded in `~/OneDrive`. After authorizing the application, a sync of your data can be performed by running: +If you prefer to use your local files as stored in `~/OneDrive` as your 'source of truth,' use the following sync command: ```text -onedrive --synchronize +onedrive --sync --local-first ``` -This will synchronize files from your OneDrive account to your `~/OneDrive` local directory. -If you prefer to use your local files as stored in `~/OneDrive` as the 'source of truth' use the following sync command: +### Performing a single directory synchronisation with Microsoft OneDrive +In some cases, it may be desirable to synchronise a single directory under ~/OneDrive without having to change your client configuration. To do this, use the following command: ```text -onedrive --synchronize --local-first +onedrive --sync --single-directory '' ``` -### Performing a single directory sync -In some cases it may be desirable to sync a single directory under ~/OneDrive without having to change your client configuration. To do this use the following command: +**Example:** If the full path is `~/OneDrive/mydir`, the command would be `onedrive --sync --single-directory 'mydir'` + +### Performing a 'one-way' download synchronisation with Microsoft OneDrive +In some cases, it may be desirable to 'download only' from Microsoft OneDrive. To do this, use the following command: ```text -onedrive --synchronize --single-directory '' +onedrive --sync --download-only ``` +This will download all the content from Microsoft OneDrive to your `~/OneDrive` location. Any files that are deleted online remain locally and will not be removed. -Example: If the full path is `~/OneDrive/mydir`, the command would be `onedrive --synchronize --single-directory 'mydir'` +However, in some circumstances, it may be desirable to clean up local files that have been removed online. To do this, use the following command: -### Performing a 'one-way' download sync -In some cases it may be desirable to 'download only' from OneDrive. To do this use the following command: ```text -onedrive --synchronize --download-only +onedrive --sync --download-only --cleanup-local-files ``` -### Performing a 'one-way' upload sync -In some cases it may be desirable to 'upload only' to OneDrive. To do this use the following command: +### Performing a 'one-way' upload synchronisation with Microsoft OneDrive +In certain scenarios, you might need to perform an 'upload only' operation to Microsoft OneDrive. This means that you'll be uploading data to OneDrive, but not synchronising any changes or additions made elsewhere. Use this command to initiate an upload-only synchronisation: + ```text -onedrive --synchronize --upload-only +onedrive --sync --upload-only ``` -**Note:** If a file or folder is present on OneDrive, that was previously synced and now does not exist locally, that item it will be removed from OneDrive. If the data on OneDrive should be kept, the following should be used: + +**Important Points:** +- The 'upload only' mode operates independently of OneDrive's online content. It doesn't check or sync with what's already stored on OneDrive. It only uploads data from the local client. +- If a local file or folder that was previously synchronised with Microsoft OneDrive is now missing locally, it will be deleted from OneDrive during this operation. + +To ensure that all data on Microsoft OneDrive remains intact (e.g., preventing deletion of items on OneDrive if they're deleted locally), use this command instead: + ```text -onedrive --synchronize --upload-only --no-remote-delete +onedrive --sync --upload-only --no-remote-delete ``` -**Note:** The operation of 'upload only' does not request data from OneDrive about what 'other' data exists online. The client only knows about the data that 'this' client uploaded, thus any files or folders created or uploaded outside of this client will remain untouched online. -### Performing a selective sync via 'sync_list' file -Selective sync allows you to sync only specific files and directories. -To enable selective sync create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`). +**Understanding both Commands:** +- `--upload-only`: This command will only upload local changes to OneDrive. These changes can include additions, modifications, moves, and deletions of files and folders. +- `--no-remote-delete`: Adding this command prevents the deletion of any items on OneDrive, even if they're deleted locally. This creates a one-way archive on OneDrive where files are only added and never removed. + +### Performing a selective synchronisation via 'sync_list' file +Selective synchronisation allows you to sync only specific files and directories. +To enable selective synchronisation, create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`). Important points to understand before using 'sync_list'. -* 'sync_list' excludes _everything_ by default on onedrive. +* 'sync_list' excludes _everything_ by default on OneDrive. * 'sync_list' follows an _"exclude overrides include"_ rule, and requires **explicit inclusion**. * Order exclusions before inclusions, so that anything _specifically included_ is included. -* How and where you place your `/` matters for excludes and includes in sub directories. +* How and where you place your `/` matters for excludes and includes in subdirectories. Each line of the file represents a relative path from your `sync_dir`. All files and directories not matching any line of the file will be skipped during all operations. @@ -246,7 +349,7 @@ Here is an example of `sync_list`: # # The ordering of entries is highly recommended - exclusions before inclusions # -# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in Onedrive +# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in OneDrive !Documents/temp* # # Exclude secret data folder in root directory only @@ -255,28 +358,28 @@ Here is an example of `sync_list`: # Include everything else in root directory /* # -# Include my Backup folder(s) or file(s) anywhere on Onedrive +# Include my Backup folder(s) or file(s) anywhere on OneDrive Backup # # Include my Backup folder in root /Backup/ # -# Include Documents folder(s) anywhere in Onedrive +# Include Documents folder(s) anywhere in OneDrive Documents/ # -# Include all PDF files in Documents folder(s), anywhere in Onedrive +# Include all PDF files in Documents folder(s), anywhere in OneDrive Documents/*.pdf # -# Include this single document in Documents folder(s), anywhere in Onedrive +# Include this single document in Documents folder(s), anywhere in OneDrive Documents/latest_report.docx # -# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in Onedrive +# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in OneDrive Work/Project* # -# Include all "notes.txt" files, anywhere in Onedrive +# Include all "notes.txt" files, anywhere in OneDrive notes.txt # -# Include /Blender in the ~Onedrive root but not if elsewhere in Onedrive +# Include /Blender in the ~OneDrive root but not if elsewhere in OneDrive /Blender # # Include these directories(or files) in 'Pictures' folder(s), that have a space in their name @@ -293,90 +396,83 @@ The following are supported for pattern matching and exclusion rules: * Use the `*` to wildcard select any characters to match for the item to be included * Use either `!` or `-` characters at the start of the line to exclude an otherwise included item +**Note:** When enabling the use of 'sync_list,' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement. -**Note:** When enabling the use of 'sync_list' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement. - -**Note:** After changing the sync_list, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` +**Note:** After changing the sync_list, you must perform a full re-synchronisation by adding `--resync` to your existing command line - for example: `onedrive --sync --resync` **Note:** In some circumstances, it may be required to sync all the individual files within the 'sync_dir', but due to frequent name change / addition / deletion of these files, it is not desirable to constantly change the 'sync_list' file to include / exclude these files and force a resync. To assist with this, enable the following in your configuration file: ```text sync_root_files = "true" ``` -This will tell the application to sync any file that it finds in your 'sync_dir' root by default. +This will tell the application to sync any file that it finds in your 'sync_dir' root by default, negating the need to constantly update your 'sync_list' file. ### Performing a --resync -If you modify any of the following configuration items, you will be required to perform a `--resync` to ensure your client is syncing your data with the updated configuration: +If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: +* drive_id * sync_dir -* skip_dir * skip_file -* drive_id -* Modifying sync_list -* Modifying business_shared_folders +* skip_dir +* skip_dotfiles +* skip_symlinks +* sync_business_shared_items +* Creating, Modifying or Deleting the 'sync_list' file -Additionally, you may choose to perform a `--resync` if you feel that this action needs to be taken to ensure your data is in sync. If you are using this switch simply because you dont know the sync status, you can query the actual sync status using `--display-sync-status`. +Additionally, you might opt for a `--resync` if you think it's necessary to ensure your data remains in sync. If you're using this switch simply because you're unsure of the sync status, you can check the actual sync status using `--display-sync-status`. -When using `--resync`, the following warning and advice will be presented: +When you use `--resync`, you'll encounter the following warning and advice: ```text -The use of --resync will remove your local 'onedrive' client state, thus no record will exist regarding your current 'sync status' -This has the potential to overwrite local versions of files with potentially older versions downloaded from OneDrive which can lead to data loss -If in-doubt, backup your local data first before proceeding with --resync +Using --resync will delete your local 'onedrive' client state, so there won't be a record of your current 'sync status.' +This may potentially overwrite local versions of files with older versions downloaded from OneDrive, leading to local data loss. +If in doubt, back up your local data before using --resync. -Are you sure you wish to proceed with --resync? [Y/N] +Are you sure you want to proceed with --resync? [Y/N] ``` -To proceed with using `--resync`, you must type 'y' or 'Y' to allow the application to continue. +To proceed with `--resync`, you must type 'y' or 'Y' to allow the application to continue. -**Note:** It is highly recommended to only use `--resync` if the application advises you to use it. Do not just blindly set the application to start with `--resync` as the default option. +**Note:** It's highly recommended to use `--resync` only if the application prompts you to do so. Don't blindly set the application to start with `--resync` as the default option. -**Note:** In some automated environments (and it is 100% assumed you *know* what you are doing because of automation), in order to avoid this 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt. +**Note:** In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt. ### Performing a --force-sync without a --resync or changing your configuration In some cases and situations, you may have configured the application to skip certain files and folders using 'skip_file' and 'skip_dir' configuration. You then may have a requirement to actually sync one of these items, but do not wish to modify your configuration, nor perform an entire `--resync` twice. -The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync` +The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync`. -In order to use this option, you must run the application manually in the following manner: +To use this option, you must run the application manually in the following manner: ```text -onedrive --synchronize --single-directory '' --force-sync +onedrive --sync --single-directory '' --force-sync ``` -When using `--force-sync`, the following warning and advice will be presented: +When using `--force-sync`, you'll encounter the following warning and advice: ```text -WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synchronize --single-directory --force-sync being used +WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used -The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts. -By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync. +Using --force-sync will reconfigure the application to use defaults. This may have unknown future impacts. +By proceeding with this option, you accept any impacts, including potential data loss resulting from using --force-sync. -Are you sure you wish to proceed with --force-sync [Y/N] +Are you sure you want to proceed with --force-sync [Y/N] ``` -To proceed with using `--force-sync`, you must type 'y' or 'Y' to allow the application to continue. - -### Increasing logging level -When running a sync it may be desirable to see additional information as to the progress and operation of the client. To do this, use the following command: -```text -onedrive --synchronize --verbose -``` +To proceed with `--force-sync`, you must type 'y' or 'Y' to allow the application to continue. -### Client Activity Log -When running onedrive all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag. By default, log files will be written to `/var/log/onedrive/` +### Enabling the Client Activity Log +When running onedrive, all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag. By default, log files will be written to `/var/log/onedrive/` and will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client to allow easy sorting of user to client activity log. -**Note:** You will need to ensure the existence of this directory, and that your user has the applicable permissions to write to this directory or the following warning will be printed: +**Note:** You will need to ensure the existence of this directory and that your user has the applicable permissions to write to this directory; otherwise, the following error message will be printed: ```text -Unable to access /var/log/onedrive/ -Please manually create '/var/log/onedrive/' and set appropriate permissions to allow write access -The requested client activity log will instead be located in the users home directory +ERROR: Unable to access /var/log/onedrive +ERROR: Please manually create '/var/log/onedrive' and set appropriate permissions to allow write access +ERROR: The requested client activity log will instead be located in your user's home directory ``` -On many systems this can be achieved by +On many systems, this can be achieved by performing the following: ```text sudo mkdir /var/log/onedrive sudo chown root:users /var/log/onedrive sudo chmod 0775 /var/log/onedrive ``` -All log files will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client. - Additionally, you need to ensure that your user account is part of the 'users' group: ``` cat /etc/group | grep users @@ -387,837 +483,409 @@ If your user is not part of this group, then you need to add your user to this g sudo usermod -a -G users ``` -You then need to 'logout' of all sessions / SSH sessions to login again to have the new group access applied. - - -**Note:** -To use a different log directory rather than the default above, add the following as a configuration option to `~/.config/onedrive/config`: -```text -log_dir = "/path/to/location/" +If you need to make a group modification, you will need to 'logout' of all sessions / SSH sessions to log in again to have the new group access applied. + +If the client is unable to write the client activity log, the following error message will be printed: +```text +ERROR: Unable to write the activity log to /var/log/onedrive/%username%.onedrive.log +ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account +ERROR: The requested client activity log will instead be located in your user's home directory +``` + +If you receive this error message, you will need to diagnose why your system cannot write to the specified file location. + +#### Client Activity Log Example: +An example of a client activity log for the command `onedrive --sync --enable-logging` is below: +```text +2023-Sep-27 08:16:00.1128806 Configuring Global Azure AD Endpoints +2023-Sep-27 08:16:00.1160620 Sync Engine Initialised with new Onedrive API instance +2023-Sep-27 08:16:00.5227122 All application operations will be performed in: /home/user/OneDrive +2023-Sep-27 08:16:00.5227977 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:16:00.7780979 Processing changes and items received from Microsoft OneDrive ... +2023-Sep-27 08:16:00.7781548 Performing a database consistency and integrity check on locally stored data ... +2023-Sep-27 08:16:00.7785889 Scanning the local file system '~/OneDrive' for new data to upload ... +2023-Sep-27 08:16:00.7813710 Performing a final true-up scan of online data from Microsoft OneDrive +2023-Sep-27 08:16:00.7814668 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:16:01.0141776 Processing changes and items received from Microsoft OneDrive ... +2023-Sep-27 08:16:01.0142454 Sync with Microsoft OneDrive is complete +``` +An example of a client activity log for the command `onedrive --sync --verbose --enable-logging` is below: +```text +2023-Sep-27 08:20:05.4600464 Checking Application Version ... +2023-Sep-27 08:20:05.5235017 Attempting to initialise the OneDrive API ... +2023-Sep-27 08:20:05.5237207 Configuring Global Azure AD Endpoints +2023-Sep-27 08:20:05.5238087 The OneDrive API was initialised successfully +2023-Sep-27 08:20:05.5238536 Opening the item database ... +2023-Sep-27 08:20:05.5270612 Sync Engine Initialised with new Onedrive API instance +2023-Sep-27 08:20:05.9226535 Application version: vX.Y.Z-A-bcdefghi +2023-Sep-27 08:20:05.9227079 Account Type: +2023-Sep-27 08:20:05.9227360 Default Drive ID: +2023-Sep-27 08:20:05.9227550 Default Root ID: +2023-Sep-27 08:20:05.9227862 Remaining Free Space: +2023-Sep-27 08:20:05.9228296 All application operations will be performed in: /home/user/OneDrive +2023-Sep-27 08:20:05.9228989 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:20:06.2076569 Performing a database consistency and integrity check on locally stored data ... +2023-Sep-27 08:20:06.2077121 Processing DB entries for this Drive ID: +2023-Sep-27 08:20:06.2078408 Processing ~/OneDrive +2023-Sep-27 08:20:06.2078739 The directory has not changed +2023-Sep-27 08:20:06.2079783 Processing Attachments +2023-Sep-27 08:20:06.2080071 The directory has not changed +2023-Sep-27 08:20:06.2081585 Processing Attachments/file.docx +2023-Sep-27 08:20:06.2082079 The file has not changed +2023-Sep-27 08:20:06.2082760 Processing Documents +2023-Sep-27 08:20:06.2083225 The directory has not changed +2023-Sep-27 08:20:06.2084284 Processing Documents/file.log +2023-Sep-27 08:20:06.2084886 The file has not changed +2023-Sep-27 08:20:06.2085150 Scanning the local file system '~/OneDrive' for new data to upload ... +2023-Sep-27 08:20:06.2087133 Skipping item - excluded by sync_list config: ./random_25k_files +2023-Sep-27 08:20:06.2116235 Performing a final true-up scan of online data from Microsoft OneDrive +2023-Sep-27 08:20:06.2117190 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:20:06.5049743 Sync with Microsoft OneDrive is complete +``` + +#### Client Activity Log Differences +Despite application logging being enabled as early as possible, the following log entries will be missing from the client activity log when compared to console output: + +**No user configuration file:** +```text +No user or system config file found, using application defaults +Using 'user' configuration path for application state data: /home/user/.config/onedrive +Using the following path to store the runtime application log: /var/log/onedrive +``` +**User configuration file:** +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Using 'user' configuration path for application state data: /home/user/.config/onedrive +Using the following path to store the runtime application log: /var/log/onedrive ``` -Trailing slash required -An example of the log file is below: -```text -2018-Apr-07 17:09:32.1162837 Loading config ... -2018-Apr-07 17:09:32.1167908 No config file found, using defaults -2018-Apr-07 17:09:32.1170626 Initializing the OneDrive API ... -2018-Apr-07 17:09:32.5359143 Opening the item database ... -2018-Apr-07 17:09:32.5515295 All operations will be performed in: /root/OneDrive -2018-Apr-07 17:09:32.5518387 Initializing the Synchronization Engine ... -2018-Apr-07 17:09:36.6701351 Applying changes of Path ID: -2018-Apr-07 17:09:37.4434282 Adding OneDrive Root to the local database -2018-Apr-07 17:09:37.4478342 The item is already present -2018-Apr-07 17:09:37.4513752 The item is already present -2018-Apr-07 17:09:37.4550062 The item is already present -2018-Apr-07 17:09:37.4586444 The item is already present -2018-Apr-07 17:09:37.7663571 Adding OneDrive Root to the local database -2018-Apr-07 17:09:37.7739451 Fetching details for OneDrive Root -2018-Apr-07 17:09:38.0211861 OneDrive Root exists in the database -2018-Apr-07 17:09:38.0215375 Uploading differences of . -2018-Apr-07 17:09:38.0220464 Processing -2018-Apr-07 17:09:38.0224884 The directory has not changed -2018-Apr-07 17:09:38.0229369 Processing -2018-Apr-07 17:09:38.02338 The directory has not changed -2018-Apr-07 17:09:38.0237678 Processing -2018-Apr-07 17:09:38.0242285 The directory has not changed -2018-Apr-07 17:09:38.0245977 Processing -2018-Apr-07 17:09:38.0250788 The directory has not changed -2018-Apr-07 17:09:38.0254657 Processing -2018-Apr-07 17:09:38.0259923 The directory has not changed -2018-Apr-07 17:09:38.0263547 Uploading new items of . -2018-Apr-07 17:09:38.5708652 Applying changes of Path ID: -``` - -### Notifications -If notification support is compiled in, the following events will trigger a notification within the display manager session: +### GUI Notifications +If notification support has been compiled in (refer to GUI Notification Support in install.md .. ADD LINK LATER), the following events will trigger a GUI notification within the display manager session: * Aborting a sync if .nosync file is found +* Skipping a particular item due to an invalid name +* Skipping a particular item due to an invalid symbolic link +* Skipping a particular item due to an invalid UTF sequence +* Skipping a particular item due to an invalid character encoding sequence * Cannot create remote directory -* Cannot upload file changes +* Cannot upload file changes (free space issue, breaches maximum allowed size, breaches maximum OneDrive Account path length) * Cannot delete remote file / folder * Cannot move remote file / folder +* When a re-authentication is required +* When a new client version is available +* Files that fail to upload +* Files that fail to download - -### Handling a OneDrive account password change -If you change your OneDrive account password, the client will no longer be authorised to sync, and will generate the following error: +### Handling a Microsoft OneDrive Account Password Change +If you change your Microsoft OneDrive Account Password, the client will no longer be authorised to sync, and will generate the following error upon next application run: ```text -ERROR: OneDrive returned a 'HTTP 401 Unauthorized' - Cannot Initialize Sync Engine +AADSTS50173: The provided grant has expired due to it being revoked, a fresh auth token is needed. The user might have changed or reset their password. The grant was issued on '' and the TokensValidFrom date (before which tokens are not valid) for this user is ''. + +ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token. ``` + To re-authorise the client, follow the steps below: -1. If running the client as a service (init.d or systemd), stop the service -2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. -3. Restart the client if running as a service or perform a manual sync +1. If running the client as a system service (init.d or systemd), stop the applicable system service +2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. Please note, if you are using `--confdir` as part of your application runtime configuration, you must include this when telling the client to re-authenticate. +3. Restart the client if running as a system service or perform the standalone sync operation again The application will now sync with OneDrive with the new credentials. -## Configuration +### Determining the synchronisation result +When the client has finished syncing without errors, the following will be displayed: +``` +Sync with Microsoft OneDrive is complete +``` -Configuration is determined by three layers: the default values, values set in the configuration file, and values passed in via the command line. The default values provide a reasonable default, and configuration is optional. +If any items failed to sync, the following will be displayed: +``` +Sync with Microsoft OneDrive has completed, however there are items that failed to sync. +``` +A file list of failed upload or download items will also be listed to allow you to determine your next steps. -Most command line options have a respective configuration file setting. +In order to fix the upload or download failures, you may need to: +* Review the application output to determine what happened +* Re-try your command utilising a resync to ensure your system is correctly synced with your Microsoft OneDrive Account -If you want to change the defaults, you can copy and edit the included config file into your configuration directory. Valid default directories for the config file are: +## Frequently Asked Configuration Questions + +### How to change the default configuration of the client? +Configuration is determined by three layers, and applied in the following order: +* Application default values +* Values that are set in the configuration file +* Values that are passed in via the command line at application runtime. These values will override any configuration file set value. + +The default application values provide a reasonable operational default, and additional configuration is entirely optional. + +If you want to change the application defaults, you can download a copy of the config file into your application configuration directory. Valid default directories for the config file are: * `~/.config/onedrive` * `/etc/onedrive` -**Example:** +**Example:** To download a copy of the config file, use the following: ```text mkdir -p ~/.config/onedrive wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/onedrive/config -nano ~/.config/onedrive/config ``` -This file does not get created by default, and should only be created if you want to change the 'default' operational parameters. -See the [config](https://raw.githubusercontent.com/abraunegg/onedrive/master/config) file for the full list of options, and [All available commands](https://github.com/abraunegg/onedrive/blob/master/docs/USAGE.md#all-available-commands) for all possible keys and their default values. +For full configuration options and CLI switches, please refer to application-config-options.md -**Note:** The location of the application configuration information can also be specified by using the `--confdir` configuration option which can be passed in at client run-time. +### How to change where my data from Microsoft OneDrive is stored? +By default, the location where your Microsoft OneDrive data is stored, is within your Home Directory under a directory called 'OneDrive'. This replicates as close as possible where the Microsoft Windows OneDrive client stores data. -### The default configuration file is listed below: -```text -# Configuration for OneDrive Linux Client -# This file contains the list of supported configuration fields -# with their default values. -# All values need to be enclosed in quotes -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -# skip_file = "~*|.~*|*.tmp" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -# drive_id = "" -# upload_only = "false" -# check_nomount = "false" -# check_nosync = "false" -# download_only = "false" -# disable_notifications = "false" -# disable_upload_validation = "false" -# enable_logging = "false" -# force_http_11 = "false" -# local_first = "false" -# no_remote_delete = "false" -# skip_symlinks = "false" -# debug_https = "false" -# skip_dotfiles = "false" -# skip_size = "1000" -# dry_run = "false" -# min_notify_changes = "5" -# monitor_log_frequency = "6" -# monitor_fullscan_frequency = "12" -# sync_root_files = "false" -# classify_as_big_delete = "1000" -# user_agent = "" -# remove_source_files = "false" -# skip_dir_strict_match = "false" -# application_id = "" -# resync = "false" -# resync_auth = "false" -# bypass_data_preservation = "false" -# azure_ad_endpoint = "" -# azure_tenant_id = "common" -# sync_business_shared_folders = "false" -# sync_dir_permissions = "700" -# sync_file_permissions = "600" -# rate_limit = "131072" -# webhook_enabled = "false" -# webhook_public_url = "" -# webhook_listening_host = "" -# webhook_listening_port = "8888" -# webhook_expiration_interval = "86400" -# webhook_renewal_interval = "43200" -# space_reservation = "50" -# display_running_config = "false" -# read_only_auth_scope = "false" -# cleanup_local_files = "false" -# operation_timeout = "3600" -# dns_timeout = "60" -# connect_timeout = "10" -# data_timeout = "600" -# ip_protocol_version = "0" -``` - -### 'config' file configuration examples: -The below are 'config' file examples to assist with configuration of the 'config' file: - -#### sync_dir -Configure your local sync directory location. - -Example: -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -sync_dir="~/MyDirToSync" -# skip_file = "~*|.~*|*.tmp" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -``` -**Please Note:** -Proceed with caution here when changing the default sync dir from `~/OneDrive` to `~/MyDirToSync` +To change this location, the application configuration option 'sync_dir' is used to specify a new local directory where your Microsoft OneDrive data should be stored. -The issue here is around how the client stores the sync_dir path in the database. If the config file is missing, or you don't use the `--syncdir` parameter - what will happen is the client will default back to `~/OneDrive` and 'think' that either all your data has been deleted - thus delete the content on OneDrive, or will start downloading all data from OneDrive into the default location. +**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible when using 'Monitor Mode'. Local filesystem changes will be replicated between the local filesystem and Microsoft OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix. -**Note:** After changing `sync_dir`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible. Local filesystem changes will be replicated between the local filesystem and OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix. - -#### sync_dir directory and file permissions -The following are directory and file default permissions for any new directory or file that is created: +### How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive? +The following are the application default permissions for any new directory or file that is created locally when downloaded from Microsoft OneDrive: * Directories: 700 - This provides the following permissions: `drwx------` * Files: 600 - This provides the following permissions: `-rw-------` -To change the default permissions, update the following 2 configuration options with the required permissions. Utilise the [Unix Permissions Calculator](https://chmod-calculator.com/) to assist in determining the required permissions. +These default permissions align to the security principal of 'least privilege' so that only you should have access to your data that you download from Microsoft OneDrive. +To alter these default permissions, you can adjust the values of two configuration options as follows. You can also use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. ```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -... -# sync_business_shared_folders = "false" sync_dir_permissions = "700" sync_file_permissions = "600" - ``` -**Important:** Special permission bits (setuid, setgid, sticky bit) are not supported. Valid permission values are from `000` to `777` only. - -#### skip_dir -This option is used to 'skip' certain directories and supports pattern matching. - -Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. - -**Important:** Entries under `skip_dir` are relative to your `sync_dir` path. - -Example: -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -# skip_file = "~*|.~*|*.tmp" -# monitor_interval = "300" -skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell" -# log_dir = "/var/log/onedrive/" -``` - -**Note:** The `skip_dir` can be specified multiple times, for example: -```text -skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir" -skip_dir = "/Path/To/A/Directory" -skip_dir = "/Another/Path/To/Different/Directory" -``` -This will be interpreted the same as: -```text -skip_dir = "SomeDir|OtherDir|ThisDir|ThatDir|/Path/To/A/Directory|/Another/Path/To/Different/Directory" -``` - -**Note:** After changing `skip_dir`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -#### skip_file -This option is used to 'skip' certain files and supports pattern matching. - -Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. - -Files can be skipped in the following fashion: -* Specify a wildcard, eg: '*.txt' (skip all txt files) -* Explicitly specify the filename and it's full path relative to your sync_dir, eg: '/path/to/file/filename.ext' -* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' - -By default, the following files will be skipped: -* Files that start with ~ -* Files that start with .~ (like .~lock.* files generated by LibreOffice) -* Files that end in .tmp - -**Important:** Do not use a skip_file entry of `.*` as this will prevent correct searching of local changes to process. - -Example: -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -``` - -**Note:** The `skip_file` can be specified multiple times, for example: -```text -skip_file = "~*|.~*|*.tmp|*.swp" -skip_file = "*.blah" -skip_file = "never_sync.file" -skip_file = "/Documents/keepass.kdbx" -``` -This will be interpreted the same as: -```text -skip_file = "~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx" -``` - -**Note:** after changing `skip_file`, you must perform a full re-synchronization by adding `--resync` to your existing command line - for example: `onedrive --synchronize --resync` - -#### skip_dotfiles -Setting this to `"true"` will skip all .files and .folders while syncing. - -Example: -```text -# skip_symlinks = "false" -# debug_https = "false" -skip_dotfiles = "true" -# dry_run = "false" -# monitor_interval = "300" -``` +**Important:** Please note that special permission bits such as setuid, setgid, and the sticky bit are not supported. Valid permission values range from `000` to `777` only. -#### monitor_interval -The monitor interval is defined as the wait time 'between' sync's when running in monitor mode. When this interval expires, the client will check OneDrive for changes online, performing data integrity checks and scanning the local 'sync_dir' for new content. +### How are uploads and downloads managed? +The system manages downloads and uploads using a multi-threaded approach. Specifically, the application utilises 16 threads for these processes. This thread count is preset and cannot be modified by users. This design ensures efficient handling of data transfers but does not allow for customisation of thread allocation. -By default without configuration, 'monitor_interval' is set to 300 seconds. Setting this value to 600 will run the sync process every 10 minutes. - -Example: -```text -# skip_dotfiles = "false" -# dry_run = "false" -monitor_interval = "600" -# min_notify_changes = "5" -# monitor_log_frequency = "6" -``` -**Note:** It is strongly advised you do not use a value of less than 300 seconds for 'monitor_interval'. Using a value less than 300 means your application will be constantly needlessly checking OneDrive online for changes. Future versions of the application may enforce the checking of this minimum value. - -#### monitor_fullscan_frequency -This configuration option controls the number of 'monitor_interval' iterations between when a full scan of your data is performed to ensure data integrity and consistency. - -By default without configuration, 'monitor_fullscan_frequency' is set to 12. In this default state, this means that a full scan is performed every 'monitor_interval' x 'monitor_fullscan_frequency' = 3600 seconds. This is only applicable when running in --monitor mode. - -Setting this value to 24 means that the full scan of OneDrive and checking the integrity of the data stored locally will occur every 2 hours (assuming 'monitor_interval' is set to 300 seconds): - -Example: -```text -# min_notify_changes = "5" -# monitor_log_frequency = "6" -monitor_fullscan_frequency = "24" -# sync_root_files = "false" -# classify_as_big_delete = "1000" -``` - -**Note:** When running in --monitor mode, at application start-up, a full scan will be performed to ensure data integrity. This option has zero effect when running the application in `--synchronize` mode and a full scan will always be performed. - -#### monitor_log_frequency -This configuration option controls the output of when logging is performed to detail that a sync is occuring with OneDrive when using `--monitor` mode. The frequency of syncing with OneDrive is controled via 'monitor_interval'. - -By default without configuration, 'monitor_log_frequency' is set to 6. - -By default, at application start-up when using `--monitor` mode, the following will be logged to indicate that the application has correctly started and performed all the initial processing steps: -``` -Configuring Global Azure AD Endpoints -Initializing the Synchronization Engine ... -Initializing monitor ... -OneDrive monitor interval (seconds): 300 -Starting a sync with OneDrive -Syncing changes from OneDrive ... -Performing a database consistency and integrity check on locally stored data ... -Sync with OneDrive is complete -``` -Then, based on 'monitor_log_frequency', the following will be logged when the value is reached: -``` -Starting a sync with OneDrive -Syncing changes from OneDrive ... -Sync with OneDrive is complete -``` -**Note:** The additional log output `Performing a database consistency and integrity check on locally stored data ...` will only be displayed when this activity is occuring which is triggered by 'monitor_fullscan_frequency'. - -#### min_notify_changes -This option defines the minimum number of pending incoming changes necessary to trigger a desktop notification. This allows controlling the frequency of notifications. - -Example: -```text -# dry_run = "false" -# monitor_interval = "300" -min_notify_changes = "50" -# monitor_log_frequency = "6" -# monitor_fullscan_frequency = "12" -``` - -#### operation_timeout -Operation Timeout is the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. - -Example: -```text -# sync_file_permissions = "600" -# rate_limit = "131072" -operation_timeout = "3600" -``` - -#### ip_protocol_version -By default, the application will use IPv4 and IPv6 to resolve and communicate with Microsoft OneDrive. In some Linux distributions (most notably Ubuntu and those distributions based on Ubuntu) this will cause problems due to how DNS resolution is being performed. - -To configure the application to use a specific IP version, configure the following in your config file: -```text -# operation_timeout = "3600" -# dns_timeout = "60" -# connect_timeout = "10" -# data_timeout = "600" -ip_protocol_version = "1" - -``` -**Note:** -* A value of 0 will mean the client will use IPv4 and IPv6. This is the default. -* A value of 1 will mean the client will use IPv4 only. -* A value of 2 will mean the client will use IPv6 only. - -#### classify_as_big_delete -This configuration option will help prevent the online deletion of files and folders online, when the directory that has been deleted contains more items than the specified value. - -By default, this value is 1000 which will count files and folders as children of the directory that has been deleted. - -To change this value, configure the following in your config file: -```text -# monitor_fullscan_frequency = "12" -# sync_root_files = "false" -classify_as_big_delete = "3000" -# user_agent = "" -# remove_source_files = "false" -``` - -**Note:** -* This option only looks at Directories. It has zero effect on deleting files located in your 'sync_dir' root -* This option (in v2.4.x and below) only gets activated when using `--monitor`. In `--synchronize` mode it is ignored as it is assumed you performed that desired operation before you started your next manual sync with OneDrive. -* Be sensible with setting this value - do not use a low value such as '1' as this will prevent you from syncing your data each and every time you delete a single file. - - -#### Configuring the client for 'single tenant application' use -In some instances when using OneDrive Business Accounts, depending on the Azure organisational configuration, it will be necessary to configure the client as a 'single tenant application'. -To configure this, after creating the application on your Azure tenant, update the 'config' file with the tenant name (not the GUID) and the newly created Application ID, then this will be used for the authentication process. -```text -# skip_dir_strict_match = "false" -application_id = "your.application.id.guid" -# resync = "false" -# bypass_data_preservation = "false" -# azure_ad_endpoint = "xxxxxx" -azure_tenant_id = "your.azure.tenant.name" -# sync_business_shared_folders = "false" -``` - -#### Configuring the client to use older 'skilion' application identifier -In some instances it may be desirable to utilise the older 'skilion' application identifier to avoid authorising a new application ID within Microsoft Azure environments. -To configure this, update the 'config' file with the old Application ID, then this will be used for the authentication process. -```text -# skip_dir_strict_match = "false" -application_id = "22c49a0d-d21c-4792-aed1-8f163c982546" -# resync = "false" -# bypass_data_preservation = "false" -``` -**Note:** The application will now use the older 'skilion' client identifier, however this may increase your chances of getting a OneDrive 429 error. - -**Note:** After changing the 'application_id' you will need to restart any 'onedrive' process you have running, and potentially issue a `--reauth` to re-authenticate the client with this updated application ID. - -## Frequently Asked Configuration Questions - -### How to sync only specific or single directory? +### How to only sync a specific directory? There are two methods to achieve this: -* Utilise '--single-directory' option to only sync this specific path -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded - -### How to 'skip' directories from syncing? -There are several mechanisms available to 'skip' a directory from the sync process: -* Utilise 'skip_dir' to configure what directories to skip. Refer to above for configuration advice. -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded - -One further method is to add a '.nosync' empty file to any folder. When this file is present, adding `--check-for-nosync` to your command line will now make the sync process skip any folder where the '.nosync' file is present. - -To make this a permanent change to always skip folders when a '.nosync' empty file is present, add the following to your config file: - -Example: -```text -# upload_only = "false" -# check_nomount = "false" -check_nosync = "true" -# download_only = "false" -# disable_notifications = "false" -``` -**Default:** False +* Employ the '--single-directory' option to only sync this specific path +* Employ 'sync_list' as part of your 'config' file to configure what files and directories to sync, and what should be excluded ### How to 'skip' files from syncing? There are two methods to achieve this: -* Utilise 'skip_file' to configure what files to skip. Refer to above for configuration advice. -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded +* Employ 'skip_file' as part of your 'config' file to configure what files to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded -### How to 'skip' dot files and folders from syncing? -There are three methods to achieve this: -* Utilise 'skip_file' or 'skip_dir' to configure what files or folders to skip. Refer to above for configuration advice. -* Utilise 'sync_list' to configure what files and directories to sync, and what should be exluded -* Utilise 'skip_dotfiles' to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive. +### How to 'skip' directories from syncing? +There are three methods available to 'skip' a directory from the sync process: +* Employ 'skip_dir' as part of your 'config' file to configure what directories to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded +* Employ 'check_nosync' as part of your 'config' file and a '.nosync' empty file within the directory to exclude to skip that directory -Example: -```text -# skip_symlinks = "false" -# debug_https = "false" -skip_dotfiles = "true" -# skip_size = "1000" -# dry_run = "false" -``` -**Default:** False +### How to 'skip' .files and .folders from syncing? +There are three methods to achieve this: +* Employ 'skip_file' or 'skip_dir' to configure what files or folders to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded +* Employ 'skip_dotfiles' as part of your 'config' file to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive ### How to 'skip' files larger than a certain size from syncing? -There are two methods to achieve this: -* Use `--skip-size ARG` as part of a CLI command to skip new files larger than this size (in MB) -* Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped +Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped. ### How to 'rate limit' the application to control bandwidth consumed for upload & download operations? -To minimise the Internet bandwidth for upload and download operations, you can configure the 'rate_limit' option within the config file. +To optimise Internet bandwidth usage during upload and download processes, include the 'rate_limit' setting in your configuration file. This setting controls the bandwidth allocated to each thread. -Example valid values for this are as follows: -* 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts -* 262144 = 256 KB/s -* 524288 = 512 KB/s -* 1048576 = 1 MB/s -* 10485760 = 10 MB/s -* 104857600 = 100 MB/s +By default, 'rate_limit' is set to '0', indicating that the application will utilise the maximum available bandwidth across all threads. -Example: -```text -# sync_business_shared_folders = "false" -# sync_dir_permissions = "700" -# sync_file_permissions = "600" -rate_limit = "131072" -``` +To check the current 'rate_limit' value, use the `--display-config` command. -**Note:** A number greater than '131072' is a valid value, with '104857600' being tested as an upper limit. +**Note:** Since downloads and uploads are processed through multiple threads, the 'rate_limit' value applies to each thread separately. For instance, setting 'rate_limit' to 1048576 (1MB) means that during data transfers, the total bandwidth consumption might reach around 16MB, not just the 1MB configured due to the number of threads being used. -### How to prevent your local disk from filling up? -By default, the application will reserve 50MB of disk space to prevent your filesystem to run out of disk space. This value can be modified by adding the following to your config file: - -Example: -```text -... -# webhook_expiration_interval = "86400" -# webhook_renewal_interval = "43200" -space_reservation = "10" -``` +### How can I prevent my local disk from filling up? +By default, the application will reserve 50MB of disk space to prevent your filesystem from running out of disk space. -The value entered is in MB (Mega Bytes). In this example, a value of 10MB is being used, and will be converted to bytes by the application. The value being used can be reviewed when using `--display-config`: -``` -Config option 'sync_dir_permissions' = 700 -Config option 'sync_file_permissions' = 600 -Config option 'space_reservation' = 10485760 -Config option 'application_id' = -Config option 'azure_ad_endpoint' = -Config option 'azure_tenant_id' = common -``` +This default value can be modified by adding the 'space_reservation' configuration option and the applicable value as part of your 'config' file. -Any value is valid here, however, if you use a value of '0' a value of '1' will actually be used, so that you actually do not run out of disk space. +You can review the value being used when using `--display-config`. -### How are symbolic links handled by the client? -Microsoft OneDrive has zero concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. +### How does the client handle symbolic links? +Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. As such, there are only two methods to support symbolic links with this client: -1. Follow the Linux symbolic link and upload what ever the link is pointing at to OneDrive. This is the default behaviour. -2. Skip symbolic links by configuring the application to do so. In skipping, no data, no link, no reference is uploaded to OneDrive. - -To skip symbolic links, edit your configuration as per below: +1. Follow the Linux symbolic link and upload whatever the local symbolic link is pointing to to Microsoft OneDrive. This is the default behaviour. +2. Skip symbolic links by configuring the application to do so. When skipping, no data, no link, no reference is uploaded to OneDrive. -```text -# local_first = "false" -# no_remote_delete = "false" -skip_symlinks = "true" -# debug_https = "false" -# skip_dotfiles = "false" -``` -Setting this to `"true"` will configure the client to skip all symbolic links while syncing. +Use 'skip_symlinks' as part of your 'config' file to configure the skipping of all symbolic links while syncing. -The default setting is `"false"` which will sync the whole folder structure referenced by the symbolic link, duplicating the contents on OneDrive in the place where the symbolic link is. +### How to synchronise shared folders (OneDrive Personal)? +Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". -### How to sync shared folders (OneDrive Personal)? -Folders shared with you can be synced by adding them to your OneDrive. To do that open your Onedrive, go to the Shared files list, right click on the folder you want to sync and then click on "Add to my OneDrive". +### How to synchronise shared folders (OneDrive Business or Office 365)? +Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". -### How to sync shared folders (OneDrive Business or Office 365)? -Refer to [./BusinessSharedFolders.md](BusinessSharedFolders.md) for configuration assistance. +Refer to [./business-shared-folders.md](business-shared-folders.md) for further details. -Do not use the 'Add shortcut to My files' from the OneDrive web based interface to add a 'shortcut' to your shared folder. This shortcut is not supported by the OneDrive API, thus it cannot be used. - -### How to sync sharePoint / Office 365 Shared Libraries? -Refer to [./SharePoint-Shared-Libraries.md](SharePoint-Shared-Libraries.md) for configuration assistance. - -### How to run a user systemd service at boot without user login? -In some cases it may be desirable for the systemd service to start without having to login as your 'user' - -To avoid this issue, you need to reconfigure your 'user' account so that the systemd services you have created will startup without you having to login to your system: -```text -loginctl enable-linger -``` +### How to synchronise SharePoint / Office 365 Shared Libraries? +There are two methods to achieve this: +* SharePoint library can be directly added to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the SharePoint Library you want to synchronise, and then click on "Add to my OneDrive". +* Configure a separate application instance to only synchronise that specific SharePoint Library. Refer to [./sharepoint-libraries.md](sharepoint-libraries.md) for configuration assistance. -### How to create a shareable link? -In some cases it may be desirable to create a shareable file link and give this link to other users to access a specific file. +### How to Create a Shareable Link? +In certain situations, you might want to generate a shareable file link and provide this link to other users for accessing a specific file. -To do this, use the following command: +To accomplish this, employ the following command: ```text onedrive --create-share-link ``` -**Note:** By default this will be a read-only link. +**Note:** By default, this access permissions for the file link will be read-only. -To make this a read-write link, use the following command: +To make it a read-write link, execute the following command: ```text onedrive --create-share-link --with-editing-perms ``` -**Note:** The ordering of the option file path and option flag is important. - -### How to sync both Personal and Business accounts at the same time? -You must configure separate instances of the application configuration for each account. - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration assistance. - -### How to sync multiple SharePoint Libraries at the same time? -You must configure a separate instances of the application configuration for each SharePoint Library. - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration assistance. - -## Running 'onedrive' in 'monitor' mode -Monitor mode (`--monitor`) allows the onedrive process to continually monitor your local file system for changes to files. - -Two common errors can occur when using monitor mode: -* Intialisation failure -* Unable to add a new inotify watch - -Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low: -* `fs.file-max` -* `fs.inotify.max_user_watches` - -To determine what the existing values are on your system use the following commands: -```text -sysctl fs.file-max -sysctl fs.inotify.max_user_watches -``` - -To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir': -```text -cd /path/to/your/sync/dir -ls -laR | wc -l -``` - -To make a change to these variables using your file and folder count: -``` -sudo sysctl fs.file-max= -sudo sysctl fs.inotify.max_user_watches= -``` - -To make these changes permanent, refer to your OS reference documentation. - -### Use webhook to subscribe to remote updates in 'monitor' mode - -A webhook can be optionally enabled in the monitor mode to allow the onedrive process to subscribe to remote updates. Remote changes can be synced to your local file system as soon as possible, without waiting for the next sync cycle. - -To enable this feature, you need to configure the following options in the config file: - -```text -webhook_enabled = "true" -webhook_public_url = "" -``` - -Setting `webhook_enabled` to `true` enables the webhook in 'monitor' mode. The onedrive process will listen for incoming updates at a configurable endpoint, which defaults to `0.0.0.0:8888`. The `webhook_public_url` must be set to an public-facing url for Microsoft to send updates to your webhook. If your host is directly exposed to the Internet, the `webhook_public_url` can be set to `http://:8888/` to match the default endpoint. However, the recommended approach is to configure a reverse proxy like nginx. - -**Note:** A valid HTTPS certificate is required for your public-facing URL if using nginx. +**Note:** The order of the file path and option flag is crucial. -For example, below is a nginx config snippet to proxy traffic into the webhook: - -```text -server { - listen 80; - location /webhooks/onedrive { - proxy_http_version 1.1; - proxy_pass http://127.0.0.1:8888; - } -} -``` +### How to Synchronise Both Personal and Business Accounts at once? +You need to set up separate instances of the application configuration for each account. -With nginx running, you can configure `webhook_public_url` to `https:///webhooks/onedrive`. +Refer to [./advanced-usage.md](advanced-usage.md) for guidance on configuration. -If you receive this application error: -```text -Subscription validation request failed. Response must exactly match validationToken query parameter. -``` -The most likely cause for this error will be your nginx configuration. To resolve, potentially investigate the following configuration for nginx: +### How to Synchronise Multiple SharePoint Libraries simultaneously? +For each SharePoint Library, configure a separate instance of the application configuration. -```text -server { - listen 80; - location /webhooks/onedrive { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Original-Request-URI $request_uri; - proxy_read_timeout 300s; - proxy_connect_timeout 75s; - proxy_buffering off; - proxy_http_version 1.1; - proxy_pass http://127.0.0.1:8888; - } -} -``` +Refer to [./advanced-usage.md](advanced-usage.md) for configuration instructions. -For any further nginx configuration assistance, please refer to: https://docs.nginx.com/ +### How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period? +When operating in 'Monitor Mode,' it may be advantageous to receive real-time updates to online data. A 'webhook' is the method to achieve this, so that when in 'Monitor Mode,' the client subscribes to remote updates. -### More webhook configuration options +Remote changes can then be promptly synchronised to your local file system, without waiting for the next synchronisation cycle. -Below options can be optionally configured. The default is usually good enough. +This is accomplished by: +* Using 'webhook_enabled' as part of your 'config' file to enable this feature +* Using 'webhook_public_url' as part of your 'config' file to configure the URL the webhook will use for subscription updates -#### webhook_listening_host and webhook_listening_port +### How to initiate the client as a background service? +There are a few ways to employ onedrive as a service: +* via init.d +* via systemd +* via runit -Set `webhook_listening_host` and `webhook_listening_port` to change the webhook listening endpoint. If `webhook_listening_host` is left empty, which is the default, the webhook will bind to `0.0.0.0`. The default `webhook_listening_port` is `8888`. - -``` -webhook_listening_host = "" -webhook_listening_port = "8888" -``` - -#### webhook_expiration_interval and webhook_renewal_interval - -Set `webhook_expiration_interval` and `webhook_renewal_interval` to change the frequency of subscription renewal. By default, the webhook asks Microsoft to keep subscriptions alive for 24 hours, and it renews subscriptions when it is less than 12 hours before their expiration. - -``` -# Default expiration interval is 24 hours -webhook_expiration_interval = "86400" - -# Default renewal interval is 12 hours -webhook_renewal_interval = "43200" -``` - -## Running 'onedrive' as a system service -There are a few ways to use onedrive as a service -* via init.d -* via systemd -* via runit - -**Note:** If using the service files, you may need to increase the `fs.inotify.max_user_watches` value on your system to handle the number of files in the directory you are monitoring as the initial value may be too low. - -### OneDrive service running as root user via init.d +#### OneDrive service running as root user via init.d ```text chkconfig onedrive on service onedrive start ``` -To see the logs run: +To view the logs, execute: ```text tail -f /var/log/onedrive/.onedrive.log ``` -To change what 'user' the client runs under (by default root), manually edit the init.d service file and modify `daemon --user root onedrive_service.sh` for the correct user. +To alter the 'user' under which the client operates (typically root by default), manually modify the init.d service file and adjust `daemon --user root onedrive_service.sh` to match the correct user. -### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora) -First, su to root using `su - root`, then enable the systemd service: +#### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora) +Initially, switch to the root user with `su - root`, then activate the systemd service: ```text systemctl --user enable onedrive systemctl --user start onedrive ``` -**Note:** `systemctl --user` directive is not applicable for Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below. +**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below. -**Note:** This will run the 'onedrive' process with a UID/GID of '0', thus, any files or folders that are created will be owned by 'root' +**Note:** This will execute the 'onedrive' process with a UID/GID of '0', which means any files or folders created will be owned by 'root'. -To view the status of the service running, use the following: +To monitor the service's status, use the following: ```text systemctl --user status onedrive.service ``` -To see the systemd application logs run: +To observe the systemd application logs, use: ```text journalctl --user-unit=onedrive -f ``` -**Note:** It is a 'systemd' requirement that the XDG environment variables exist for correct enablement and operation of systemd services. If you receive this error when enabling the systemd service: -``` +**Note:** For systemd to function correctly, it requires the presence of XDG environment variables. If you encounter the following error while enabling the systemd service: +```text Failed to connect to bus: No such file or directory ``` -The most likely cause is that the XDG environment variables are missing. To fix this, you must add the following to `.bashrc` or any other file which is run on user login: -``` +The most likely cause is missing XDG environment variables. To resolve this, add the following lines to `.bashrc` or another file executed upon user login: +```text export XDG_RUNTIME_DIR="/run/user/$UID" export DBUS_SESSION_BUS_ADDRESS="unix:path=${XDG_RUNTIME_DIR}/bus" ``` -To make this change effective, you must logout of all user accounts where this change has been made. +To apply this change, you must log out of all user accounts where it has been made. -**Note:** On some systems (for example - Raspbian / Ubuntu / Debian on Raspberry Pi) the above XDG fix may not be reliable after system reboots. The potential alternative to start the client via systemd as root, is to perform the following: -1. Create a symbolic link from `/home/root/.config/onedrive` pointing to `/root/.config/onedrive/` -2. Create a systemd service using the '@' service file: `systemctl enable onedrive@root.service` -3. Start the root@service: `systemctl start onedrive@root.service` +**Note:** On certain systems (e.g., Raspbian / Ubuntu / Debian on Raspberry Pi), the XDG fix above may not persist after system reboots. An alternative to starting the client via systemd as root is as follows: +1. Create a symbolic link from `/home/root/.config/onedrive` to `/root/.config/onedrive/`. +2. Establish a systemd service using the '@' service file: `systemctl enable onedrive@root.service`. +3. Start the root@service: `systemctl start onedrive@root.service`. -This will ensure that the service will correctly restart on system reboot. +This ensures that the service correctly restarts upon system reboot. -To see the systemd application logs run: +To examine the systemd application logs, run: ```text journalctl --unit=onedrive@ -f ``` -### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux) +#### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux) ```text systemctl enable onedrive systemctl start onedrive ``` -**Note:** This will run the 'onedrive' process with a UID/GID of '0', thus, any files or folders that are created will be owned by 'root' +**Note:** This will execute the 'onedrive' process with a UID/GID of '0', meaning any files or folders created will be owned by 'root'. -To see the systemd application logs run: +To view the systemd application logs, execute: ```text journalctl --unit=onedrive -f ``` -### OneDrive service running as a non-root user via systemd (All Linux Distributions) -In some cases it is desirable to run the OneDrive client as a service, but not running as the 'root' user. In this case, follow the directions below to configure the service for your normal user login. +#### OneDrive service running as a non-root user via systemd (All Linux Distributions) +In some instances, it is preferable to run the OneDrive client as a service without the 'root' user. Follow the instructions below to configure the service for your regular user login. -1. As the user, who will be running the service, run the application in standalone mode, authorize the application for use & validate that the synchronization is working as expected: +1. As the user who will run the service, launch the application in standalone mode, authorize it for use, and verify that synchronization is functioning as expected: ```text -onedrive --synchronize --verbose +onedrive --sync --verbose ``` -2. Once the application is validated and working for your user, as the 'root' user, where is your username from step 1 above. +2. After validating the application for your user, switch to the 'root' user, where is your username from step 1 above. ```text systemctl enable onedrive@.service systemctl start onedrive@.service ``` -3. To view the status of the service running for the user, use the following: +3. To check the service's status for the user, use the following: ```text systemctl status onedrive@.service ``` -To see the systemd application logs run: +To observe the systemd application logs, use: ```text journalctl --unit=onedrive@ -f ``` -### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora) -In some cases you may wish to receive GUI notifications when using the client when logged in as a non-root user. In this case, follow the directions below: +#### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora) +In some scenarios, you may want to receive GUI notifications when using the client as a non-root user. In this case, follow these steps: -1. Login via graphical UI as user you wish to enable the service for -2. Disable any `onedrive@` service files for your username - eg: +1. Log in via the graphical UI as the user you want to enable the service for. +2. Disable any `onedrive@` service files for your username, e.g.: ```text sudo systemctl stop onedrive@alex.service sudo systemctl disable onedrive@alex.service ``` -3. Enable service as per the following: +3. Enable the service as follows: ```text systemctl --user enable onedrive systemctl --user start onedrive ``` -To view the status of the service running for the user, use the following: +To check the service's status for the user, use the following: ```text systemctl --user status onedrive.service ``` -To see the systemd application logs run: +To view the systemd application logs, execute: ```text journalctl --user-unit=onedrive -f ``` -**Note:** `systemctl --user` directive is not applicable for Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms +**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms. -### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void) +#### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void) -1. Create the following folder if not present already `/etc/sv/runsvdir-` +1. Create the following folder if it doesn't already exist: `/etc/sv/runsvdir-` - where `` is the `USER` targeted for the service - - _e.g_ `# mkdir /etc/sv/runsvdir-nolan` + - e.g., `# mkdir /etc/sv/runsvdir-nolan` -2. Create a file called `run` under the previously created folder with - executable permissions +2. Create a file called `run` under the previously created folder with executable permissions - `# touch /etc/sv/runsvdir-/run` - `# chmod 0755 /etc/sv/runsvdir-/run` -3. Edit the `run` file with the following contents (priviledges needed) +3. Edit the `run` file with the following contents (permissions needed): ```sh #!/bin/sh @@ -1230,24 +898,21 @@ journalctl --user-unit=onedrive -f exec chpst -u "${USER}:${groups}" runsvdir "${svdir}" ``` - - do not forget to correct the `` according to the `USER` set on - step #1 + - Ensure you replace `` with the `USER` set in step #1. 4. Enable the previously created folder as a service - `# ln -fs /etc/sv/runsvdir- /var/service/` -5. Create a subfolder on the `USER`'s `HOME` directory to store the services - (or symlinks) +5. Create a subfolder in the `USER`'s `HOME` directory to store the services (or symlinks) - `$ mkdir ~/service` -6. Create a subfolder for OneDrive specifically +6. Create a subfolder specifically for OneDrive - `$ mkdir ~/service/onedrive/` -7. Create a file called `run` under the previously created folder with - executable permissions +7. Create a file called `run` under the previously created folder with executable permissions - `$ touch ~/service/onedrive/run` - `$ chmod 0755 ~/service/onedrive/run` @@ -1259,211 +924,20 @@ journalctl --user-unit=onedrive -f exec /usr/bin/onedrive --monitor ``` - - in some scenario the path for the `onedrive` binary might differ, you can - obtain it regardless by running `$ command -v onedrive` + - In some scenarios, the path to the `onedrive` binary may vary. You can obtain it by running `$ command -v onedrive`. -9. Reboot to apply changes +9. Reboot to apply the changes -10. Check status of user-defined services +10. Check the status of user-defined services - `$ sv status ~/service/*` -You may refer to Void's documentation regarding -[Per-User Services](https://docs.voidlinux.org/config/services/user-services.html) -for extra details. - -## Additional Configuration -### Advanced Configuration of the OneDrive Free Client -* Configuring the client to use mulitple OneDrive accounts / configurations, for example: - * Setup to use onedrive with both Personal and Business accounts - * Setup to use onedrive with multiple SharePoint Libraries -* Configuring the client for use in dual-boot (Windows / Linux) situations -* Configuring the client for use when 'sync_dir' is a mounted directory -* Upload data from the local ~/OneDrive folder to a specific location on OneDrive - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration assistance. - -### Access OneDrive service through a proxy -If you have a requirement to run the client through a proxy, there are a couple of ways to achieve this: -1. Set proxy configuration in `~/.bashrc` to allow the authorization process and when utilizing `--synchronize` -2. If running as a systemd service, edit the applicable systemd service file to include the proxy configuration information: -```text -[Unit] -Description=OneDrive Free Client -Documentation=https://github.com/abraunegg/onedrive -After=network-online.target -Wants=network-online.target - -[Service] -Environment="HTTP_PROXY=http://ip.address:port" -Environment="HTTPS_PROXY=http://ip.address:port" -ExecStart=/usr/local/bin/onedrive --monitor -Restart=on-failure -RestartSec=3 +For additional details, you can refer to Void's documentation on [Per-User Services](https://docs.voidlinux.org/config/services/user-services.html). -[Install] -WantedBy=default.target -``` - -**Note:** After modifying the service files, you will need to run `sudo systemctl daemon-reload` to ensure the service file changes are picked up. A restart of the OneDrive service will also be required to pick up the change to send the traffic via the proxy server +### How to start a user systemd service at boot without user login? +In some situations, it may be necessary for the systemd service to start without requiring your 'user' to log in. -### Setup selinux for a sync folder outside of the home folder -If selinux is enforced and the sync folder is outside of the home folder, as long as there is no policy for cloud fileservice providers, label the file system folder to user_home_t. +To address this issue, you need to reconfigure your 'user' account so that the systemd services you've created launch without the need for you to log in to your system: ```text -sudo semanage fcontext -a -t user_home_t /path/to/onedriveSyncFolder -sudo restorecon -R -v /path/to/onedriveSyncFolder -``` -To remove this change from selinux and restore the default behaivor: -```text -sudo semanage fcontext -d /path/to/onedriveSyncFolder -sudo restorecon -R -v /path/to/onedriveSyncFolder -``` - -## All available commands -Output of `onedrive --help` -```text -OneDrive - a client for OneDrive Cloud Services - -Usage: - onedrive [options] --synchronize - Do a one time synchronization - onedrive [options] --monitor - Monitor filesystem and sync regularly - onedrive [options] --display-config - Display the currently used configuration - onedrive [options] --display-sync-status - Query OneDrive service and report on pending changes - onedrive -h | --help - Show this help screen - onedrive --version - Show version - -Options: - - --auth-files ARG - Perform authorization via two files passed in as ARG in the format `authUrl:responseUrl` - The authorization URL is written to the `authUrl`, then onedrive waits for the file `responseUrl` - to be present, and reads the response from that file. - --auth-response ARG - Perform authentication not via interactive dialog but via providing the response url directly. - --check-for-nomount - Check for the presence of .nosync in the syncdir root. If found, do not perform sync. - --check-for-nosync - Check for the presence of .nosync in each directory. If found, skip directory from sync. - --classify-as-big-delete - Number of children in a path that is locally removed which will be classified as a 'big data delete' - --cleanup-local-files - Cleanup additional local files when using --download-only. This will remove local data. - --confdir ARG - Set the directory used to store the configuration files - --create-directory ARG - Create a directory on OneDrive - no sync will be performed. - --create-share-link ARG - Create a shareable link for an existing file on OneDrive - --debug-https - Debug OneDrive HTTPS communication. - --destination-directory ARG - Destination directory for renamed or move on OneDrive - no sync will be performed. - --disable-download-validation - Disable download validation when downloading from OneDrive - --disable-notifications - Do not use desktop notifications in monitor mode. - --disable-upload-validation - Disable upload validation when uploading to OneDrive - --display-config - Display what options the client will use as currently configured - no sync will be performed. - --display-running-config - Display what options the client has been configured to use on application startup. - --display-sync-status - Display the sync status of the client - no sync will be performed. - --download-only - Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive. - --dry-run - Perform a trial sync with no changes made - --enable-logging - Enable client activity to a separate log file - --force - Force the deletion of data when a 'big delete' is detected - --force-http-11 - Force the use of HTTP 1.1 for all operations - --force-sync - Force a synchronization of a specific folder, only when using --single-directory and ignoring all non-default skip_dir and skip_file rules - --get-O365-drive-id ARG - Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library - --get-file-link ARG - Display the file link of a synced file - --help -h - This help information. - --list-shared-folders - List OneDrive Business Shared Folders - --local-first - Synchronize from the local directory source first, before downloading changes from OneDrive. - --log-dir ARG - Directory where logging output is saved to, needs to end with a slash. - --logout - Logout the current user - --min-notify-changes ARG - Minimum number of pending incoming changes necessary to trigger a desktop notification - --modified-by ARG - Display the last modified by details of a given path - --monitor -m - Keep monitoring for local and remote changes - --monitor-fullscan-frequency ARG - Number of sync runs before performing a full local scan of the synced directory - --monitor-interval ARG - Number of seconds by which each sync operation is undertaken when idle under monitor mode. - --monitor-log-frequency ARG - Frequency of logging in monitor mode - --no-remote-delete - Do not delete local file 'deletes' from OneDrive when using --upload-only - --operation-timeout ARG - Maximum amount of time (in seconds) an operation is allowed to take - --print-token - Print the access token, useful for debugging - --reauth - Reauthenticate the client with OneDrive - --remove-directory ARG - Remove a directory on OneDrive - no sync will be performed. - --remove-source-files - Remove source file after successful transfer to OneDrive when using --upload-only - --resync - Forget the last saved state, perform a full sync - --resync-auth - Approve the use of performing a --resync action - --single-directory ARG - Specify a single local directory within the OneDrive root to sync. - --skip-dir ARG - Skip any directories that match this pattern from syncing - --skip-dir-strict-match - When matching skip_dir directories, only match explicit matches - --skip-dot-files - Skip dot files and folders from syncing - --skip-file ARG - Skip any files that match this pattern from syncing - --skip-size ARG - Skip new files larger than this size (in MB) - --skip-symlinks - Skip syncing of symlinks - --source-directory ARG - Source directory to rename or move on OneDrive - no sync will be performed. - --space-reservation ARG - The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation - --sync-root-files - Sync all files in sync_dir root when using sync_list. - --sync-shared-folders - Sync OneDrive Business Shared Folders - --syncdir ARG - Specify the local directory used for synchronization to OneDrive - --synchronize - Perform a synchronization - --upload-only - Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive. - --user-agent ARG - Specify a User Agent string to the http client - --verbose -v+ - Print more details, useful for debugging (repeat for extra debugging) - --version - Print the version and exit - --with-editing-perms - Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link -``` +loginctl enable-linger +``` \ No newline at end of file diff --git a/docs/application-config-options.md b/docs/application-config-options.md new file mode 100644 index 000000000..31b50614b --- /dev/null +++ b/docs/application-config-options.md @@ -0,0 +1,1075 @@ +# Application Configuration Options for the OneDrive Client for Linux +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Table of Contents + +- [Configuration File Options](#configuration-file-options) + - [application_id](#application_id) + - [azure_ad_endpoint](#azure_ad_endpoint) + - [azure_tenant_id](#azure_tenant_id) + - [bypass_data_preservation](#bypass_data_preservation) + - [check_nomount](#check_nomount) + - [check_nosync](#check_nosync) + - [classify_as_big_delete](#classify_as_big_delete) + - [cleanup_local_files](#cleanup_local_files) + - [connect_timeout](#connect_timeout) + - [data_timeout](#data_timeout) + - [debug_https](#debug_https) + - [disable_download_validation](#disable_download_validation) + - [disable_notifications](#disable_notifications) + - [disable_upload_validation](#disable_upload_validation) + - [display_running_config](#display_running_config) + - [dns_timeout](#dns_timeout) + - [download_only](#download_only) + - [drive_id](#drive_id) + - [dry_run](#dry_run) + - [enable_logging](#enable_logging) + - [force_http_11](#force_http_11) + - [ip_protocol_version](#ip_protocol_version) + - [local_first](#local_first) + - [log_dir](#log_dir) + - [monitor_fullscan_frequency](#monitor_fullscan_frequency) + - [monitor_interval](#monitor_interval) + - [monitor_log_frequency](#monitor_log_frequency) + - [no_remote_delete](#no_remote_delete) + - [operation_timeout](#operation_timeout) + - [rate_limit](#rate_limit) + - [read_only_auth_scope](#read_only_auth_scope) + - [remove_source_files](#remove_source_files) + - [resync](#resync) + - [resync_auth](#resync_auth) + - [skip_dir](#skip_dir) + - [skip_dir_strict_match](#skip_dir_strict_match) + - [skip_dotfiles](#skip_dotfiles) + - [skip_file](#skip_file) + - [skip_size](#skip_size) + - [skip_symlinks](#skip_symlinks) + - [space_reservation](#space_reservation) + - [sync_business_shared_items](#sync_business_shared_items) + - [sync_dir](#sync_dir) + - [sync_dir_permissions](#sync_dir_permissions) + - [sync_file_permissions](#sync_file_permissions) + - [sync_root_files](#sync_root_files) + - [upload_only](#upload_only) + - [user_agent](#user_agent) + - [webhook_enabled](#webhook_enabled) + - [webhook_expiration_interval](#webhook_expiration_interval) + - [webhook_listening_host](#webhook_listening_host) + - [webhook_listening_port](#webhook_listening_port) + - [webhook_public_url](#webhook_public_url) + - [webhook_renewal_interval](#webhook_renewal_interval) +- [Command Line Interface (CLI) Only Options](#command-line-interface-cli-only-options) + - [CLI Option: --auth-files](#cli-option---auth-files) + - [CLI Option: --auth-response](#cli-option---auth-response) + - [CLI Option: --confdir](#cli-option---confdir) + - [CLI Option: --create-directory](#cli-option---create-directory) + - [CLI Option: --create-share-link](#cli-option---create-share-link) + - [CLI Option: --destination-directory](#cli-option---destination-directory) + - [CLI Option: --display-config](#cli-option---display-config) + - [CLI Option: --display-sync-status](#cli-option---display-sync-status) + - [CLI Option: --display-quota](#cli-option---display-quota) + - [CLI Option: --force](#cli-option---force) + - [CLI Option: --force-sync](#cli-option---force-sync) + - [CLI Option: --get-file-link](#cli-option---get-file-link) + - [CLI Option: --get-sharepoint-drive-id](#cli-option---get-sharepoint-drive-id) + - [CLI Option: --logout](#cli-option---logout) + - [CLI Option: --modified-by](#cli-option---modified-by) + - [CLI Option: --monitor | -m](#cli-option---monitor--m) + - [CLI Option: --print-access-token](#cli-option---print-access-token) + - [CLI Option: --reauth](#cli-option---reauth) + - [CLI Option: --remove-directory](#cli-option---remove-directory) + - [CLI Option: --single-directory](#cli-option---single-directory) + - [CLI Option: --source-directory](#cli-option---source-directory) + - [CLI Option: --sync | -s](#cli-option---sync--s) + - [CLI Option: --verbose | -v+](#cli-option---verbose--v) + - [CLI Option: --with-editing-perms](#cli-option---with-editing-perms) +- [Depreciated Configuration File and CLI Options](#depreciated-configuration-file-and-cli-options) + - [min_notify_changes](#min_notify_changes) + - [CLI Option: --synchronize](#cli-option---synchronize) + + +## Configuration File Options + +### application_id +_**Description:**_ This is the config option for application id that used used to identify itself to Microsoft OneDrive. In some circumstances, it may be desirable to use your own application id. To do this, you must register a new application with Microsoft Azure via https://portal.azure.com/, then use your new application id with this config option. + +_**Value Type:**_ String + +_**Default Value:**_ d50ca740-c83f-4d1b-b616-12c519384f0c + +_**Config Example:**_ `application_id = "d50ca740-c83f-4d1b-b616-12c519384f0c"` + +### azure_ad_endpoint +_**Description:**_ This is the config option to change the Microsoft Azure Authentication Endpoint that the client uses to conform with data and security requirements that requires data to reside within the geographic borders of that country. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Valid Values:**_ USL4, USL5, DE, CN + +_**Config Example:**_ `azure_ad_endpoint = "DE"` + +### azure_tenant_id +_**Description:**_ This config option allows the locking of the client to a specific single tenant and will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common". The tenant id may be the GUID Directory ID or the fully qualified tenant name. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Config Example:**_ `azure_tenant_id = "example.onmicrosoft.us"` or `azure_tenant_id = "0c4be462-a1ab-499b-99e0-da08ce52a2cc"` + +_**Additional Usage Requirement:**_ Must be configured if 'azure_ad_endpoint' is configured. + +### bypass_data_preservation +_**Description:**_ This config option allows the disabling of preserving local data by renaming the local file in the event of data conflict. If this is enabled, you will experience data loss on your local data as the local file will be over-written with data from OneDrive online. Use with care and caution. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `bypass_data_preservation = "false"` or `bypass_data_preservation = "true"` + +### check_nomount +_**Description:**_ This config option is useful to prevent application startup & ongoing use in 'Monitor Mode' if the configured 'sync_dir' is a separate disk that is being mounted by your system. This option will check for the presence of a `.nosync` file in your mount point, and if present, abort any sync process to preserve data. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `check_nomount = "false"` or `check_nomount = "true"` + +_**CLI Option:**_ `--check-for-nomount` + +_**Additional Usage Requirement:**_ Create a `.nosync` file in your mount point *before* you mount your disk so that this is visible, in your mount point if your disk is unmounted. + +### check_nosync +_**Description:**_ This config option is useful to prevent the sync of a *local* directory to Microsoft OneDrive. It will *not* check for this file online to prevent the download of directories to your local system. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `check_nosync = "false"` or `check_nosync = "true"` + +_**CLI Option Use:**_ `--check-for-nosync` + +_**Additional Usage Requirement:**_ Create a `.nosync` file in any *local* directory that you wish to not sync to Microsoft OneDrive when you enable this option. + +### classify_as_big_delete +_**Description:**_ This config option defines the number of children in a path that is locally removed which will be classified as a 'big data delete' to safeguard large data removals - which are typically accidental local delete events. + +_**Value Type:**_ Integer + +_**Default Value:**_ 1000 + +_**Config Example:**_ `classify_as_big_delete = "2000"` + +_**CLI Option Use:**_ `--classify-as-big-delete 2000` + +_**Additional Usage Requirement:**_ If this option is triggered, you will need to add `--force` to force a sync to occur. + +### cleanup_local_files +_**Description:**_ This config option provides the capability to cleanup local files and folders if they are removed online. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `cleanup_local_files = "false"` or `cleanup_local_files = "true"` + +_**CLI Option Use:**_ `--cleanup-local-files` + +_**Additional Usage Requirement:**_ This configuration option can only be used with 'download_only'. It cannot be used with any other application option. + +### connect_timeout +_**Description:**_ This configuration setting manages the TCP connection timeout duration in seconds for HTTPS connections to Microsoft OneDrive when using the curl library. + +_**Value Type:**_ Integer + +_**Default Value:**_ 30 + +_**Config Example:**_ `connect_timeout = "20"` + +### data_timeout +_**Description:**_ This setting controls the timeout duration, in seconds, for when data is not received on an active connection to Microsoft OneDrive over HTTPS when using the curl library, before that connection is timeout out. + +_**Value Type:**_ Integer + +_**Default Value:**_ 240 + +_**Config Example:**_ `data_timeout = "300"` + +### debug_https +_**Description:**_ This setting controls whether the curl library is configured to output additional data to assist with diagnosing HTTPS issues and problems. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `debug_https = "false"` or `debug_https = "true"` + +_**CLI Option Use:**_ `--debug-https` + +_**Additional Usage Notes:**_ Whilst this option can be used at any time, it is advisable that you only use this option when advised as this will output your `Authorization: bearer` - which is your authentication token to Microsoft OneDrive. + +### disable_download_validation +_**Description:**_ This option determines whether the client will conduct integrity validation on files downloaded from Microsoft OneDrive. Sometimes, when downloading files, particularly from SharePoint, there is a discrepancy between the file size reported by the OneDrive API and the byte count received from the SharePoint HTTP Server for the same file. Enable this option to disable the integrity checks performed by this client. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_download_validation = "false"` or `disable_download_validation = "true"` + +_**CLI Option Use:**_ `--disable-download-validation` + +_**Additional Usage Notes:**_ If you're downloading data from SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. + +### disable_notifications +_**Description:**_ This setting controls whether GUI notifications are sent from the client to your display manager session. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_notifications = "false"` or `disable_notifications = "true"` + +_**CLI Option Use:**_ `--disable-notifications` + +### disable_upload_validation +_**Description:**_ This option determines whether the client will conduct integrity validation on files uploaded to Microsoft OneDrive. Sometimes, when uploading files, particularly to SharePoint, SharePoint will modify your file post upload by adding new data to your file which breaks the integrity checking of the upload performed by this client. Enable this option to disable the integrity checks performed by this client. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_upload_validation = "false"` or `disable_upload_validation = "true"` + +_**CLI Option Use:**_ `--disable-upload-validation` + +_**Additional Usage Notes:**_ If you're uploading data to SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. + +### display_running_config +_**Description:**_ This option will include the running config of the application at application startup. This may be desirable to enable when running in containerised environments so that any application logging that is occuring, will have the application configuration being consumed at startup, written out to any applicable log file. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `display_running_config = "false"` or `display_running_config = "true"` + +_**CLI Option Use:**_ `--display-running-config` + +### dns_timeout +_**Description:**_ This setting controls the libcurl DNS cache value. By default, libcurl caches this info for 60 seconds. This libcurl DNS cache timeout is entirely speculative that a name resolves to the same address for a small amount of time into the future as libcurl does not use DNS TTL properties. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ Integer + +_**Default Value:**_ 60 + +_**Config Example:**_ `dns_timeout = "90"` + +### download_only +_**Description:**_ This setting forces the client to only download data from Microsoft OneDrive and replicate that data locally. No changes made locally will be uploaded to Microsoft OneDrive when using this option. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `download_only = "false"` or `download_only = "true"` + +_**CLI Option Use:**_ `--download-only` + +### drive_id +_**Description:**_ This setting controls the specific drive identifier the client will use when syncing with Microsoft OneDrive. + +_**Value Type:**_ String + +_**Default Value:**_ *None* + +_**Config Example:**_ `drive_id = "b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB"` + +_**Additional Usage Notes:**_ This option is typically only used when configuring the client to sync a specific SharePoint Library. If this configuration option is specified in your config file, a value must be specified otherwise the application will exit citing a fatal error has occured. + +### dry_run +_**Description:**_ This setting controls the application capability to test your application configuration without actually performing any actual activity (download, upload, move, delete, folder creation). + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `dry_run = "false"` or `dry_run = "true"` + +_**CLI Option Use:**_ `--dry-run` + +### enable_logging +_**Description:**_ This setting controls the application logging all actions to a separate file. By default, all log files will be written to `/var/log/onedrive`, however this can changed by using the 'log_dir' config option + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `enable_logging = "false"` or `enable_logging = "true"` + +_**CLI Option Use:**_ `--enable-logging` + +_**Additional Usage Notes:**_ Additional configuration is potentially required to configure the default log directory. Refer to usage.md for details (ADD LINK) + +### force_http_11 +_**Description:**_ This setting controls the application HTTP protocol version. By default, the application will use libcurl defaults for which HTTP prodocol version will be used to interact with Microsoft OneDrive. Use this setting to downgrade libcurl to only use HTTP/1.1. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `force_http_11 = "false"` or `force_http_11 = "true"` + +_**CLI Option Use:**_ `--force-http-11` + +### ip_protocol_version +_**Description:**_ This setting controls the application IP protocol that should be used when communicating with Microsoft OneDrive. The default is to use IPv4 and IPv6 networks for communicating to Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 + +_**Valid Values:**_ 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + +_**Config Example:**_ `ip_protocol_version = "0"` or `ip_protocol_version = "1"` or `ip_protocol_version = "2"` + +_**Additional Usage Notes:**_ In some environments where IPv4 and IPv6 are configured at the same time, this causes resolution and routing issues to Microsoft OneDrive. If this is the case, it is advisable to change 'ip_protocol_version' to match your environment. + +### local_first +_**Description:**_ This setting controls what the application considers the 'source of truth' for your data. By default, what is stored online will be considered as the 'source of truth' when syncing to your local machine. When using this option, your local data will be considered the 'source of truth'. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `local_first = "false"` or `local_first = "true"` + +_**CLI Option Use:**_ `--local-first` + +### log_dir +_**Description:**_ This setting controls the custom application log path when 'enable_logging' has been enabled. By default, all log files will be written to `/var/log/onedrive`. + +_**Value Type:**_ String + +_**Default Value:**_ *None* + +_**Config Example:**_ `log_dir = "~/logs/"` + +_**CLI Option Use:**_ `--log-dir "~/logs/"` + +### monitor_fullscan_frequency +_**Description:**_ This configuration option controls the number of 'monitor_interval' iterations between when a full scan of your data is performed to ensure data integrity and consistency. + +_**Value Type:**_ Integer + +_**Default Value:**_ 12 + +_**Config Example:**_ `monitor_fullscan_frequency = "24"` + +_**CLI Option Use:**_ `--monitor-fullscan-frequency '24'` + +_**Additional Usage Notes:**_ By default without configuration, 'monitor_fullscan_frequency' is set to 12. In this default state, this means that a full scan is performed every 'monitor_interval' x 'monitor_fullscan_frequency' = 3600 seconds. This setting is only applicable when running in `--monitor` mode. Setting this configuration option to '0' will *disable* the full scan of your data online. + +### monitor_interval +_**Description:**_ This configuration setting determines how often the synchronisation loops run in --monitor mode, measured in seconds. When this time period elapses, the client will check for online changes in Microsoft OneDrive, conduct integrity checks on local data and scan the local 'sync_dir' to identify any new content that hasn't been uploaded yet. + +_**Value Type:**_ Integer + +_**Default Value:**_ 300 + +_**Config Example:**_ `monitor_interval = "600"` + +_**CLI Option Use:**_ `--monitor-interval '600'` + +_**Additional Usage Notes:**_ A minimum value of 300 is enforced for this configuration setting. + +### monitor_log_frequency +_**Description:**_ This configuration option controls the suppression of frequently printed log items to the system console when using `--monitor` mode. The aim of this configuration item is to reduce the log output when near zero sync activity is occuring. + +_**Value Type:**_ Integer + +_**Default Value:**_ 12 + +_**Config Example:**_ `monitor_log_frequency = "24"` + +_**CLI Option Use:**_ `--monitor-log-frequency '24'` + +_**Additional Usage Notes:**_ + +By default, at application start-up when using `--monitor` mode, the following will be logged to indicate that the application has correctly started and has performed all the initial processing steps: +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in: /home/user/OneDrive +OneDrive synchronisation interval (seconds): 300 +Initialising filesystem inotify monitoring ... +Performing initial syncronisation to ensure consistent local state ... +Starting a sync with Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. +Processing changes and items received from Microsoft OneDrive ... +Performing a database consistency and integrity check on locally stored data ... +Scanning the local file system '~/OneDrive' for new data to upload ... +Performing a final true-up scan of online data from Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. +Processing changes and items received from Microsoft OneDrive ... +Sync with Microsoft OneDrive is complete +``` +Then, based on 'monitor_log_frequency', the following output will be logged until the suppression loop value is reached: +```text +Starting a sync with Microsoft OneDrive +Syncing changes from Microsoft OneDrive ... +Sync with Microsoft OneDrive is complete +``` +**Note:** The additional log output `Performing a database consistency and integrity check on locally stored data ...` will only be displayed when this activity is occuring which is triggered by 'monitor_fullscan_frequency'. + +**Note:** If verbose application output is being used (`--verbose`), then this configuration setting has zero effect, as application verbose output takes priority over application output surpression. + +### no_remote_delete +_**Description:**_ This configuration option controls whether local file and folder deletes are actioned on Microsoft OneDrive. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `local_first = "false"` or `local_first = "true"` + +_**CLI Option Use:**_ `--no-remote-delete` + +_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only` + +### operation_timeout +_**Description:**_ This configuration option controls the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ Integer + +_**Default Value:**_ 3600 + +_**Config Example:**_ `operation_timeout = "3600"` + +### rate_limit +_**Description:**_ This configuration option controls the bandwidth used by the application, per thread, when interacting with Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 (unlimited, use available bandwidth per thread) + +_**Valid Values:**_ Valid tested values for this configuration option are as follows: + +* 131072 = 128 KB/s - absolute minimum for basic application operations to prevent timeouts +* 262144 = 256 KB/s +* 524288 = 512 KB/s +* 1048576 = 1 MB/s +* 10485760 = 10 MB/s +* 104857600 = 100 MB/s + +_**Config Example:**_ `rate_limit = "131072"` + +### read_only_auth_scope +_**Description:**_ This configuration option controls whether the OneDrive Client for Linux operates in a totally in read-only operation. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `read_only_auth_scope = "false"` or `read_only_auth_scope = "true"` + +_**Additional Usage Notes:**_ When using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data until you revoke this consent. + +### remove_source_files +_**Description:**_ This configuration option controls whether the OneDrive Client for Linux removes the local file post successful transfer to Microsoft OneDrive. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `remove_source_files = "false"` or `remove_source_files = "true"` + +_**CLI Option Use:**_ `--remove-source-files` + +_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only` + +### resync +_**Description:**_ This configuration option controls whether the known local sync state with Microsoft OneDrive is removed at application startup. When this option is used, a full scan of your data online is performed to ensure that the local sync state is correctly built back up. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `resync = "false"` or `resync = "true"` + +_**CLI Option Use:**_ `--resync` + +_**Additional Usage Notes:**_ It's highly recommended to use this option only if the application prompts you to do so. Don't blindly use this option as a default option. If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: +* drive_id +* sync_dir +* skip_file +* skip_dir +* skip_dotfiles +* skip_symlinks +* sync_business_shared_items +* Creating, Modifying or Deleting the 'sync_list' file + +### resync_auth +_**Description:**_ This configuration option controls the approval of performing a 'resync' which can be beneficial in automated environments. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `resync_auth = "false"` or `resync_auth = "true"` + +_**CLI Option Use:**_ `--resync-auth` + +_**Additional Usage Notes:**_ In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' resync requirement, this option allows you to automatically acknowledge the resync prompt. + +### skip_dir +_**Description:**_ This configuration option controls whether the application skips certain directories from being synced. Directories can be specified in 2 ways: + +* As a single entry. This will search the respective path for this entry and skip all instances where this directory is present, where ever it may exist. +* As a full path entry. This will skip the explicit path as set. + +**Important:** Entries for 'skip_dir' are *relative* to your 'sync_dir' path. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Config Example:**_ + +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. + +```text +skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell|.Rproj-user" +``` + +The 'skip_dir' option can also be specified multiple times within your config file, for example: +```text +skip_dir = "SkipThisDirectoryAnywhere" +skip_dir = ".SkipThisOtherDirectoryAnywhere" +skip_dir = "/Explicit/Path/To/A/Directory" +skip_dir = "/Another/Explicit/Path/To/Different/Directory" +``` + +This will be interpreted the same as: +```text +skip_dir = "SkipThisDirectoryAnywhere|.SkipThisOtherDirectoryAnywhere|/Explicit/Path/To/A/Directory|/Another/Explicit/Path/To/Different/Directory" +``` + +_**CLI Option Use:**_ `--skip-dir 'SkipThisDirectoryAnywhere|.SkipThisOtherDirectoryAnywhere|/Explicit/Path/To/A/Directory|/Another/Explicit/Path/To/Different/Directory'` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. + +### skip_dir_strict_match +_**Description:**_ This configuration option controls whether the application performs strict directory matching when checking 'skip_dir' items. When enabled, the 'skip_dir' item must be a full path match to the path to be skipped. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_dir_strict_match = "false"` or `skip_dir_strict_match = "true"` + +_**CLI Option Use:**_ `--skip-dir-strict-match` + +### skip_dotfiles +_**Description:**_ This configuration option controls whether the application will skip all .files and .folders when performing sync operations. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_dotfiles = "false"` or `skip_dotfiles = "true"` + +_**CLI Option Use:**_ `--skip-dot-files` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### skip_file +_**Description:**_ This configuration option controls whether the application skips certain files from being synced. + +_**Value Type:**_ String + +_**Default Value:**_ `~*|.~*|*.tmp|*.swp|*.partial` + +_**Config Example:**_ + +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. + +By default, the following files will be skipped: +* Files that start with ~ +* Files that start with .~ (like .~lock.* files generated by LibreOffice) +* Files that end in .tmp, .swp and .partial + +Files can be skipped in the following fashion: +* Specify a wildcard, eg: '*.txt' (skip all txt files) +* Explicitly specify the filename and it's full path relative to your sync_dir, eg: '/path/to/file/filename.ext' +* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' + +```text +# When changing a config option below, remove the '#' from the start of the line +# For explanations of all config options below see docs/USAGE.md or the man page. +# +# sync_dir = "~/OneDrive" +skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx" +# monitor_interval = "300" +# skip_dir = "" +# log_dir = "/var/log/onedrive/" +``` +The 'skip_file' option can be specified multiple times within your config file, for example: +```text +skip_file = "~*|.~*|*.tmp|*.swp" +skip_file = "*.blah" +skip_file = "never_sync.file" +skip_file = "/Documents/keepass.kdbx" +``` +This will be interpreted the same as: +```text +skip_file = "~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx" +``` + +_**CLI Option Use:**_ `--skip-file '~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx'` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. + +### skip_size +_**Description:**_ This configuration option controls whether the application skips syncing certain files larger than the specified size. The value specified is in MB. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 (all files, regardless of size, are synced) + +_**Config Example:**_ `skip_size = "50"` + +_**CLI Option Use:**_ `--skip-size '50'` + +### skip_symlinks +_**Description:**_ This configuration option controls whether the application will skip all symbolic links when performing sync operations. Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_symlinks = "false"` or `skip_symlinks = "true"` + +_**CLI Option Use:**_ `--skip-symlinks` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### space_reservation +_**Description:**_ This configuration option controls how much local disk space should be reserved, to prevent the application from filling up your entire disk due to misconfiguration + +_**Value Type:**_ Integer + +_**Default Value:**_ 50 MB (expressesed as Bytes when using `--display-config`) + +_**Config Example:**_ `space_reservation = "100"` + +_**CLI Option Use:**_ `--space-reservation '100'` + +### sync_business_shared_items +_**Description:**_ This configuration option controls whether OneDrive Business | Office 365 Shared Folders, when added as a 'shortcut' to your 'My Files' will be synced to your local system. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `sync_business_shared_items = "false"` or `sync_business_shared_items = "true"` + +_**CLI Option Use:**_ *none* - this is a config file option only + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### sync_dir +_**Description:**_ This configuration option determines the location on your local filesystem where your data from Microsoft OneDrive will be saved. + +_**Value Type:**_ String + +_**Default Value:**_ `~/OneDrive` + +_**Config Example:**_ `sync_dir = "~/MyDirToSync"` + +_**CLI Option Use:**_ `--syncdir '~/MyDirToSync'` + +_**Additional Usage Notes:**_ After changing this option, you will be required to perform a resync. + +### sync_dir_permissions +_**Description:**_ This configuration option defines the directory permissions applied when a new directory is created locally during the process of syncing your data from Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `700` - This provides the following permissions: `drwx------` + +_**Config Example:**_ `sync_dir_permissions = "700"` + +_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. + +### sync_file_permissions +_**Description:**_ This configuration option defines the file permissions applied when a new file is created locally during the process of syncing your data from Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `600` - This provides the following permissions: `-rw-------` + +_**Config Example:**_ `sync_file_permissions = "600"` + +_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. + +### sync_root_files +_**Description:**_ This configuration option manages the synchronisation of files located in the 'sync_dir' root when using a 'sync_list.' It enables you to sync all these files by default, eliminating the need to repeatedly modify your 'sync_list' and initiate resynchronisation. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `sync_root_files = "false"` or `sync_root_files = "true"` + +_**CLI Option Use:**_ `--sync-root-files` + +_**Additional Usage Notes:**_ Although it's not mandatory, it's recommended that after enabling this option, you perform a `--resync`. This ensures that any previously excluded content is now included in your sync process. + +### upload_only +_**Description:**_ This setting forces the client to only upload data to Microsoft OneDrive and replicate the locate state online. By default, this will also remove content online, that has been removed locally. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `upload_only = "false"` or `upload_only = "true"` + +_**CLI Option Use:**_ `--upload-only` + +_**Additional Usage Notes:**_ To ensure that data deleted locally remains accessible online, you can use the 'no_remote_delete' option. If you want to delete the data from your local storage after a successful upload to Microsoft OneDrive, you can use the 'remove_source_files' option. + +### user_agent +_**Description:**_ This configuration option controls the 'User-Agent' request header that is presented to Microsoft Graph API when accessing the Microsoft OneDrive service. This string lets servers and network peers identify the application, operating system, vendor, and/or version of the application making the request. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ String + +_**Default Value:**_ `ISV|abraunegg|OneDrive Client for Linux/vX.Y.Z-A-bcdefghi` + +_**Config Example:**_ `user_agent = "ISV|CompanyName|AppName/Version"` + +_**Additional Usage Notes:**_ The current value conforms the the Microsoft Graph API documentation for presenting an appropriate 'User-Agent' header and aligns to the registered 'application_id' that this application uses. + +### webhook_enabled +_**Description:**_ This configuration option controls the application feature 'webhooks' to allow you to subscribe to remote updates as published by Microsoft OneDrive. This option only operates when the client is using 'Monitor Mode'. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ The following is the minimum working example that needs to be added to your 'config' file to enable 'webhooks' successfully: +```text +webhook_enabled = "true" +webhook_public_url = "http://:8888/" +``` + +_**Additional Usage Notes:**_ + +etting `webhook_enabled = "true"` enables the webhook feature in 'monitor' mode. The onedrive process will listen for incoming updates at a configurable endpoint, which defaults to `0.0.0.0:8888`. The `webhook_public_url` must be set to an public-facing url for Microsoft to send updates to your webhook. + +If your host is directly exposed to the Internet, the `webhook_public_url` can be set to `http://:8888/` to match the default endpoint. In this case, it is also advisable to configure a reverse proxy like `nginx` to proxy the traffic to the client. For example, below is a nginx config snippet to proxy traffic into the webhook: +```text +server { + listen 80; + location /webhooks/onedrive { + proxy_http_version 1.1; + proxy_pass http://127.0.0.1:8888; + } +} +``` + +With nginx running, you can configure 'webhook_public_url' to `https:///webhooks/onedrive` + +**Note:** A valid HTTPS certificate is required for your public-facing URL if using nginx. + +If you receive this application error: `Subscription validation request failed. Response must exactly match validationToken query parameter.` the most likely cause for this error will be your nginx configuration. + +To resolve this configuration issue, potentially investigate the following configuration for nginx: +```text +server { + listen 80; + location /webhooks/onedrive { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-Request-URI $request_uri; + proxy_read_timeout 300s; + proxy_connect_timeout 75s; + proxy_buffering off; + proxy_http_version 1.1; + proxy_pass http://127.0.0.1:8888; + } +} +``` +For any further nginx configuration assistance, please refer to: https://docs.nginx.com/ + +### webhook_expiration_interval +_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription expires. The value is expressed in the number of seconds before expiry. + +_**Value Type:**_ Integer + +_**Default Value:**_ 600 + +_**Config Example:**_ `webhook_expiration_interval = "1200"` + +### webhook_listening_host +_**Description:**_ This configuration option controls the host address that this client binds to, when the webhook feature is enabled. + +_**Value Type:**_ String + +_**Default Value:**_ 0.0.0.0 + +_**Config Example:**_ `webhook_listening_host = ""` - this will use the default value. `webhook_listening_host = "192.168.3.4"` - this will bind the client to use the IP address 192.168.3.4. + +_**Additional Usage Notes:**_ Use in conjunction with 'webhook_listening_port' to change the webhook listening endpoint. + +### webhook_listening_port +_**Description:**_ This configuration option controls the TCP port that this client listens on, when the webhook feature is enabled. + +_**Value Type:**_ Integer + +_**Default Value:**_ 8888 + +_**Config Example:**_ `webhook_listening_port = "9999"` + +_**Additional Usage Notes:**_ Use in conjunction with 'webhook_listening_host' to change the webhook listening endpoint. + +### webhook_public_url +_**Description:**_ This configuration option controls the URL that Microsoft will send subscription notifications to. This must be a valid Internet accessible URL. + +_**Value Type:**_ String + +_**Default Value:**_ *empty* + +_**Config Example:**_ + +* If your host is directly connected to the Internet: `webhook_public_url = "http://:8888/"` +* If you are using nginx to reverse proxy traffic from the Internet: `webhook_public_url = "https:///webhooks/onedrive"` + +### webhook_renewal_interval +_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription is renewed. The value is expressed in the number of seconds before renewal. + +_**Value Type:**_ Integer + +_**Default Value:**_ 300 + +_**Config Example:**_ `webhook_renewal_interval = "600"` + +### webhook_retry_interval +_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription is retried when creating or renewing a subscription failed. The value is expressed in the number of seconds before retry. + +_**Value Type:**_ Integer + +_**Default Value:**_ 60 + +_**Config Example:**_ `webhook_retry_interval = "120"` + +## Command Line Interface (CLI) Only Options + +### CLI Option: --auth-files +_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via specific files that the application uses to read the authentication data from. + +_**Usage Example:**_ `onedrive --auth-files authUrl:responseUrl` + +_**Additional Usage Notes:**_ The authorisation URL is written to the specified 'authUrl' file, then onedrive waits for the file 'responseUrl' to be present, and reads the authentication response from that file. Example: + +```text +onedrive --auth-files '~/onedrive-auth-url:~/onedrive-response-url' +Reading configuration file: /home/alex/.config/onedrive/config +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Client requires authentication before proceeding. Waiting for --auth-files elements to be available. +``` +At this point, the client has written the file `~/onedrive-auth-url` which contains the authentication URL that needs to be visited to perform the authentication process. The client will now wait and watch for the presence of the file `~/onedrive-response-url`. + +Visit the authentication URL, and then create a new file called `~/onedrive-response-url` with the response URI. Once this has been done, the application will acknowledge the presence of this file, read the contents, and authenticate the application. +```text +Sync Engine Initialised with new Onedrive API instance + + --sync or --monitor switches missing from your command line input. Please add one (not both) of these switches to your command line or use 'onedrive --help' for further assistance. + +No OneDrive sync will be performed without one of these two arguments being present. +``` + +### CLI Option: --auth-response +_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via providing the authentication response URI directly. + +_**Usage Example:**_ `onedrive --auth-response https://login.microsoftonline.com/common/oauth2/nativeclient?code=` + +_**Additional Usage Notes:**_ Typically, unless the application client identifier, authentication scopes are being modified or a specific Azure Tenant is being specified, the authentication URL will mostlikely be as follows: +```text +https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient +``` +With this URL being known, it is possible ahead of time to request an authentication token by visiting this URL, and performing the authenticaton access request. + +### CLI Option: --confdir +_**Description:**_ This CLI option allows the user to specify where all the application configuration and relevant components are stored. + +_**Usage Example:**_ `onedrive --confdir '~/.config/onedrive-business/'` + +_**Additional Usage Notes:**_ If using this option, it must be specified each and every time the application is used. If this is ommited, the application default configuration directory will be used. + +### CLI Option: --create-directory +_**Description:**_ This CLI option allows the user to create the specified directory path on Microsoft OneDrive without performing a sync. + +_**Usage Example:**_ `onedrive --create-directory 'path/of/new/folder/structure/to/create/'` + +_**Additional Usage Notes:**_ The specified path to create is relative to your configured 'sync_dir'. + +### CLI Option: --create-share-link +_**Description:**_ This CLI option enables the creation of a shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. By default, the permissions for the file will be 'read-only'. + +_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt'` + +_**Additional Usage Notes:**_ If writable access to the file is required, you must add `--with-editing-perms` to your command. See below for details. + +### CLI Option: --destination-directory +_**Description:**_ This CLI option specifies the 'destination' portion of moving a file or folder online, without performing a sync operation. + +_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` + +_**Additional Usage Notes:**_ All specified paths are relative to your configured 'sync_dir'. + +### CLI Option: --display-config +_**Description:**_ This CLI option will display the effective application configuration + +_**Usage Example:**_ `onedrive --display-config` + +### CLI Option: --display-sync-status +_**Description:**_ This CLI option will display the sync status of the configured 'sync_dir' + +_**Usage Example:**_ `onedrive --display-sync-status` + +_**Additional Usage Notes:**_ This option can also use the `--single-directory` option to determine the sync status of a specific directory within the configured 'sync_dir' + +### CLI Option: ---display-quota +_**Description:**_ This CLI option will display the quota status of the account drive id or the configured 'drive_id' value + +_**Usage Example:**_ `onedrive --display-quota` + +### CLI Option: --force +_**Description:**_ This CLI option enables the force the deletion of data when a 'big delete' is detected. + +_**Usage Example:**_ `onedrive --sync --verbose --force` + +_**Additional Usage Notes:**_ This option should only be used exclusively in cases where you've initiated a 'big delete' and genuinely intend to remove all the data that is set to be deleted online. + +### CLI Option: --force-sync +_**Description:**_ This CLI option enables the syncing of a specific directory, using the Client Side Filtering application defaults, overriding any user application configuration. + +_**Usage Example:**_ `onedrive --sync --verbose --force-sync --single-directory 'Data' + +_**Additional Usage Notes:**_ When this option is used, you will be presented with the following warning and risk acceptance: +```text +WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synch --single-directory --force-sync being used + +The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts. +By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync. + +Are you sure you wish to proceed with --force-sync [Y/N] +``` +To procceed with this sync task, you must risk accept the actions you are taking. If you have any concerns, first use `--dry-run` and evaluate the outcome before proceeding with the actual action. + +### CLI Option: --get-file-link +_**Description:**_ This CLI option queries the OneDrive API and return's the WebURL for the given local file. + +_**Usage Example:**_ `onedrive --get-file-link 'relative/path/to/your/file.txt'` + +_**Additional Usage Notes:**_ The path that you should use must be relative to your 'sync_dir' + +### CLI Option: --get-sharepoint-drive-id +_**Description:**_ This CLI option queries the OneDrive API and return's the Office 365 Drive ID for a given Office 365 SharePoint Shared Library that can then be used with 'drive_id' to sync a specific SharePoint Library. + +_**Usage Example:**_ `onedrive --get-sharepoint-drive-id '*'` or `onedrive --get-sharepoint-drive-id 'PointPublishing Hub Site'` + +### CLI Option: --logout +_**Description:**_ This CLI option removes this clients authentictaion status with Microsoft OneDrive. Any further application use will requrie the application to be re-authenticated with Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --logout` + +### CLI Option: --modified-by +_**Description:**_ This CLI option queries the OneDrive API and return's the last modified details for the given local file. + +_**Usage Example:**_ `onedrive --modified-by 'relative/path/to/your/file.txt'` + +_**Additional Usage Notes:**_ The path that you should use must be relative to your 'sync_dir' + +### CLI Option: --monitor | -m +_**Description:**_ This CLI option controls the 'Monitor Mode' operational aspect of the client. When this option is used, the client will perform on-going syncs of data between Microsoft OneDrive and your local system. Local changes will be uploaded in near-realtime, whilst online changes will be downloaded on the next sync process. The frequency of these checks is governed by the 'monitor_interval' value. + +_**Usage Example:**_ `onedrive --monitor` or `onedrive -m` + +### CLI Option: --print-access-token +_**Description:**_ Print the current access token being used to access Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --verbose --verbose --debug-https --print-access-token` + +_**Additional Usage Notes:**_ Do not use this option if you do not know why you are wanting to use it. Be highly cautious of exposing this object. Change your password if you feel that you have inadvertantly exposed this token. + +### CLI Option: --reauth +_**Description:**_ This CLI option controls the ability to re-authenticate your client with Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --reauth` + +### CLI Option: --remove-directory +_**Description:**_ This CLI option allows the user to remove the specified directory path on Microsoft OneDrive without performing a sync. + +_**Usage Example:**_ `onedrive --remove-directory 'path/of/new/folder/structure/to/remove/'` + +_**Additional Usage Notes:**_ The specified path to remove is relative to your configured 'sync_dir'. + +### CLI Option: --single-directory +_**Description:**_ This CLI option controls the applications ability to sync a specific single directory. + +_**Usage Example:**_ `onedrive --sync --single-directory 'Data'` + +_**Additional Usage Notes:**_ The path specified is relative to your configured 'sync_dir' path. If the physical local path 'Folder' to sync is `~/OneDrive/Data/Folder` then the command would be `--single-directory 'Data/Folder'`. + +### CLI Option: --source-directory +_**Description:**_ This CLI option specifies the 'source' portion of moving a file or folder online, without performing a sync operation. + +_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` + +_**Additional Usage Notes:**_ All specified paths are relative to your configured 'sync_dir'. + +### CLI Option: --sync | -s +_**Description:**_ This CLI option controls the 'Standalone Mode' operational aspect of the client. When this option is used, the client will perform a one-time sync of data between Microsoft OneDrive and your local system. + +_**Usage Example:**_ `onedrive --sync` or `onedrive -s` + +### CLI Option: --verbose | -v+ +_**Description:**_ This CLI option controls the verbosity of the application output. Use the option once, to have normal verbose output, use twice to have debug level application output. + +_**Usage Example:**_ `onedrive --sync --verbose` or `onedrive --monitor --verbose` + +### CLI Option: --with-editing-perms +_**Description:**_ This CLI option enables the creation of a writable shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. This option can only be used in conjunction with `--create-share-link` + +_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt' --with-editing-perms` + +_**Additional Usage Notes:**_ Placement of `--with-editing-perms` is critical. It *must* be placed after the file path as per the example above. + +## Depreciated Configuration File and CLI Options +The following configuration options are no longer supported + +### min_notify_changes +_**Description:**_ Minimum number of pending incoming changes necessary to trigger a GUI desktop notification. + +_**Depreciated Config Example:**_ `min_notify_changes = "50"` + +_**Depreciated CLI Option:**_ `--min-notify-changes '50'` + +_**Reason for depreciation:**_ Application has been totally re-written. When this item was introduced, it was done so to reduce spamming of all events to the GUI desktop. + +### CLI Option: --synchronize +_**Description:**_ Perform a synchronisation with Microsoft OneDrive + +_**Depreciated CLI Option:**_ `--synchronize` + +_**Reason for depreciation:**_ `--synchronize` has been depreciated in favour of `--sync` or `-s` diff --git a/docs/business-shared-folders.md b/docs/business-shared-folders.md new file mode 100644 index 000000000..4282f4ac6 --- /dev/null +++ b/docs/business-shared-folders.md @@ -0,0 +1,40 @@ +# How to configure OneDrive Business Shared Folder Sync +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Important Note +This feature has been 100% re-written from v2.5.0 onwards. A pre-requesite before using this capability in v2.5.0 and above is for you to revert any Shared Business Folder configuration you may be currently using, including, but not limited to: +* Removing `sync_business_shared_folders = "true|false"` from your 'config' file +* Removing the 'business_shared_folders' file +* Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues. + +## Process Overview +Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: +1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you. +2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder +3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement. +4. Test the configuration using '--dry-run' +5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required + + +**NOTE:** This documentation will be updated as this feature progresses. + + +### Enable syncing of OneDrive Business Shared Folders via config file +```text +sync_business_shared_items = "true" +``` + +### Disable syncing of OneDrive Business Shared Folders via config file +```text +sync_business_shared_items = "false" +``` + +## Known Issues +Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. + +Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: + +![shared_with_me](./images/shared_with_me.JPG) + +This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/known-issues.md b/docs/known-issues.md index 6d970ff91..d6ac302a2 100644 --- a/docs/known-issues.md +++ b/docs/known-issues.md @@ -1,54 +1,60 @@ -# Known Issues -The below are known issues with this client: +# List of Identified Known Issues +The following points detail known issues associated with this client: -## Moving files into different folders should not cause data to delete and be re-uploaded -**Issue Tracker:** [#876](https://github.com/abraunegg/onedrive/issues/876) +## Renaming or Moving Files in Standalone Mode causes online deletion and re-upload to occur +**Issue Tracker:** [#876](https://github.com/abraunegg/onedrive/issues/876), [#2579](https://github.com/abraunegg/onedrive/issues/2579) -**Description:** +**Summary:** -When running the client in standalone mode (`--synchronize`) moving folders that are successfully synced around between subsequent standalone syncs causes a deletion & re-upload of data to occur. +Renaming or moving files and/or folders while using the standalone sync option `--sync` this results in unnecessary data deletion online and subsequent re-upload. -**Explanation:** +**Detailed Description:** -Technically, the client is 'working' correctly, as, when moving files, you are 'deleting' them from the current location, but copying them to the 'new location'. As the client is running in standalone sync mode, there is no way to track what OS operations have been done when the client is not running - thus, this is why the 'delete and upload' is occurring. +In standalone mode (`--sync`), the renaming or moving folders locally that have already been synchronized leads to the data being deleted online and then re-uploaded in the next synchronization process. -**Workaround:** +**Technical Explanation:** -If the tracking of moving data to new local directories is requried, it is better to run the client in service mode (`--monitor`) rather than in standalone mode, as the 'move' of files can then be handled at the point when it occurs, so that the data is moved to the new location on OneDrive without the need to be deleted and re-uploaded. +This behavior is expected from the client under these specific conditions. Renaming or moving files is interpreted as deleting them from their original location and creating them in a new location. In standalone sync mode, the client lacks the capability to track file system changes (including renames and moves) that occur when it is not running. This limitation is the root cause of the observed 'deletion and re-upload' cycle. + +**Recommended Workaround:** + +For effective tracking of file and folder renames or moves to new local directories, it is recommended to run the client in service mode (`--monitor`) rather than in standalone mode. This approach allows the client to immediately process these changes, enabling the data to be updated (renamed or moved) in the new location on OneDrive without undergoing deletion and re-upload. ## Application 'stops' running without any visible reason **Issue Tracker:** [#494](https://github.com/abraunegg/onedrive/issues/494), [#753](https://github.com/abraunegg/onedrive/issues/753), [#792](https://github.com/abraunegg/onedrive/issues/792), [#884](https://github.com/abraunegg/onedrive/issues/884), [#1162](https://github.com/abraunegg/onedrive/issues/1162), [#1408](https://github.com/abraunegg/onedrive/issues/1408), [#1520](https://github.com/abraunegg/onedrive/issues/1520), [#1526](https://github.com/abraunegg/onedrive/issues/1526) -**Description:** +**Summary:** + +Users experience sudden shutdowns in a client application during file transfers with Microsoft's Europe Data Centers, likely due to unstable internet or HTTPS inspection issues. This problem, often signaled by an error code of 141, is related to the application's reliance on Curl and OpenSSL. Resolution steps include system updates, seeking support from OS vendors, ISPs, OpenSSL/Curl teams, and providing detailed debug logs to Microsoft for analysis. -When running the client and performing an upload or download operation, the application just stops working without any reason or explanation. If `echo $?` is used after the application has exited without visible reason, an error level of 141 may be provided. +**Detailed Description:** -Additionally, this issue has mainly been seen when the client is operating against Microsoft's Europe Data Centre's. +The application unexpectedly stops functioning during upload or download operations when using the client. This issue occurs without any apparent reason. Running `echo $?` after the unexpected exit may return an error code of 141. -**Explanation:** +This problem predominantly arises when the client interacts with Microsoft's Europe Data Centers. -The client is heavily dependant on Curl and OpenSSL to perform the activities with the Microsoft OneDrive service. Generally, when this issue occurs, the following is found in the HTTPS Debug Log: +**Technical Explanation:** + +The client heavily relies on Curl and OpenSSL for operations with the Microsoft OneDrive service. A common observation during this error is an entry in the HTTPS Debug Log stating: ``` OpenSSL SSL_read: SSL_ERROR_SYSCALL, errno 104 ``` -The only way to determine that this is the cause of the application ceasing to work is to generate a HTTPS debug log using the following additional flags: +To confirm this as the root cause, a detailed HTTPS debug log can be generated with these commands: ``` --verbose --verbose --debug-https ``` -This is indicative of the following: -* Some sort of flaky Internet connection somewhere between you and the OneDrive service -* Some sort of 'broken' HTTPS transparent inspection service inspecting your traffic somewhere between you and the OneDrive service - -**How to resolve:** +This error typically suggests one of the following issues: +* An unstable internet connection between the user and the OneDrive service. +* An issue with HTTPS transparent inspection services that monitor the traffic en route to the OneDrive service. -The best avenue of action here are: -* Ensure your OS is as up-to-date as possible -* Get support from your OS vendor -* Speak to your ISP or Help Desk for assistance -* Open a ticket with OpenSSL and/or Curl teams to better handle this sort of connection failure -* Generate a HTTPS Debug Log for this application and open a new support request with Microsoft and provide the debug log file for their analysis. +**Recommended Resolution:** -If you wish to diagnose this issue further, refer to the following: +Recommended steps to address this issue include: +* Updating your operating system to the latest version. +* Seeking assistance from your OS vendor. +* Contacting your Internet Service Provider (ISP) or your IT Help Desk. +* Reporting the issue to the OpenSSL and/or Curl teams for improved handling of such connection failures. +* Creating a HTTPS Debug Log during the issue and submitting a support request to Microsoft with the log for their analysis. -https://maulwuff.de/research/ssl-debugging.html +For more in-depth SSL troubleshooting, please read: https://maulwuff.de/research/ssl-debugging.html \ No newline at end of file diff --git a/docs/SharePoint-Shared-Libraries.md b/docs/sharepoint-libraries.md similarity index 100% rename from docs/SharePoint-Shared-Libraries.md rename to docs/sharepoint-libraries.md diff --git a/docs/ubuntu-package-install.md b/docs/ubuntu-package-install.md index 292a822cb..df20db923 100644 --- a/docs/ubuntu-package-install.md +++ b/docs/ubuntu-package-install.md @@ -141,6 +141,7 @@ If required, review the table below based on your 'lsb_release' information to p | Debian 10 | You must build from source or upgrade your Operating System to Debian 12 | | Debian 11 | Use [Debian 11](#distribution-debian-11) instructions below | | Debian 12 | Use [Debian 12](#distribution-debian-12) instructions below | +| Debian Sid | Refer to https://packages.debian.org/sid/onedrive for assistance | | Raspbian GNU/Linux 10 | You must build from source or upgrade your Operating System to Raspbian GNU/Linux 12 | | Raspbian GNU/Linux 11 | Use [Debian 11](#distribution-debian-11) instructions below | | Raspbian GNU/Linux 12 | Use [Debian 12](#distribution-debian-12) instructions below | @@ -153,6 +154,11 @@ If required, review the table below based on your 'lsb_release' information to p | Ubuntu 23.04 / Lunar | Use [Ubuntu 23.04](#distribution-ubuntu-2304) instructions below | | Ubuntu 23.10 / Mantic | Use [Ubuntu 23.10](#distribution-ubuntu-2310) instructions below | +**Note:** If your Linux distribution and release is not in the table above, you have 2 options: + +1. Compile the application from source. Refer to install.md (Compilation & Installation) for assistance. +2. Raise a support case with your Linux Distribution to provide you with an applicable package you can use. + ## Distribution Package Install Instructions ### Distribution: Debian 11 diff --git a/onedrive.1.in b/onedrive.1.in index 5caacb0d6..24ddceea6 100644 --- a/onedrive.1.in +++ b/onedrive.1.in @@ -170,11 +170,6 @@ Do not delete local file 'deletes' from OneDrive when using \fB\-\-upload\-only\ .br Configuration file key: \fBno_remote_delete\fP (default: \fBfalse\fP) .TP -\fB\-\-operation\-timeout\fP ARG -Set the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. -.br -Configuration file key: \fBoperation_timeout\fP (default: \fB3600\fP) -.TP \fB\-\-print\-token\fP Print the access token, useful for debugging .TP diff --git a/src/clientSideFiltering.d b/src/clientSideFiltering.d new file mode 100644 index 000000000..d20ba2f89 --- /dev/null +++ b/src/clientSideFiltering.d @@ -0,0 +1,400 @@ +// What is this module called? +module clientSideFiltering; + +// What does this module require to function? +import std.algorithm; +import std.array; +import std.file; +import std.path; +import std.regex; +import std.stdio; +import std.string; +import std.conv; + +// What other modules that we have created do we need to import? +import config; +import util; +import log; + +class ClientSideFiltering { + // Class variables + ApplicationConfig appConfig; + string[] paths; + string[] businessSharedItemsList; + Regex!char fileMask; + Regex!char directoryMask; + bool skipDirStrictMatch = false; + bool skipDotfiles = false; + + this(ApplicationConfig appConfig) { + // Configure the class varaible to consume the application configuration + this.appConfig = appConfig; + } + + // Initialise the required items + bool initialise() { + // Log what is being done + addLogEntry("Configuring Client Side Filtering (Selective Sync)", ["debug"]); + + // Load the sync_list file if it exists + if (exists(appConfig.syncListFilePath)){ + loadSyncList(appConfig.syncListFilePath); + } + + // Load the Business Shared Items file if it exists + if (exists(appConfig.businessSharedItemsFilePath)){ + loadBusinessSharedItems(appConfig.businessSharedItemsFilePath); + } + + // Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries + // Handle skip_dir configuration in config file + addLogEntry("Configuring skip_dir ...", ["debug"]); + addLogEntry("skip_dir: " ~ to!string(appConfig.getValueString("skip_dir")), ["debug"]); + setDirMask(appConfig.getValueString("skip_dir")); + + // Was --skip-dir-strict-match configured? + addLogEntry("Configuring skip_dir_strict_match ...", ["debug"]); + addLogEntry("skip_dir_strict_match: " ~ to!string(appConfig.getValueBool("skip_dir_strict_match")), ["debug"]); + if (appConfig.getValueBool("skip_dir_strict_match")) { + setSkipDirStrictMatch(); + } + + // Was --skip-dot-files configured? + addLogEntry("Configuring skip_dotfiles ...", ["debug"]); + addLogEntry("skip_dotfiles: " ~ to!string(appConfig.getValueBool("skip_dotfiles")), ["debug"]); + if (appConfig.getValueBool("skip_dotfiles")) { + setSkipDotfiles(); + } + + // Handle skip_file configuration in config file + addLogEntry("Configuring skip_file ...", ["debug"]); + + // Validate skip_file to ensure that this does not contain an invalid configuration + // Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process. + foreach(entry; appConfig.getValueString("skip_file").split("|")){ + if (entry == ".*") { + // invalid entry element detected + addLogEntry("ERROR: Invalid skip_file entry '.*' detected"); + return false; + } + } + + // All skip_file entries are valid + addLogEntry("skip_file: " ~ appConfig.getValueString("skip_file"), ["debug"]); + setFileMask(appConfig.getValueString("skip_file")); + + // All configured OK + return true; + } + + // Shutdown components + void shutdown() { + object.destroy(appConfig); + object.destroy(paths); + object.destroy(businessSharedItemsList); + object.destroy(fileMask); + object.destroy(directoryMask); + } + + // Load sync_list file if it exists + void loadSyncList(string filepath) { + // open file as read only + auto file = File(filepath, "r"); + auto range = file.byLine(); + foreach (line; range) { + // Skip comments in file + if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; + paths ~= buildNormalizedPath(line); + } + file.close(); + } + + // load business_shared_folders file + void loadBusinessSharedItems(string filepath) { + // open file as read only + auto file = File(filepath, "r"); + auto range = file.byLine(); + foreach (line; range) { + // Skip comments in file + if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; + businessSharedItemsList ~= buildNormalizedPath(line); + } + file.close(); + } + + // Configure the regex that will be used for 'skip_file' + void setFileMask(const(char)[] mask) { + fileMask = wild2regex(mask); + addLogEntry("Selective Sync File Mask: " ~ to!string(fileMask), ["debug"]); + } + + // Configure the regex that will be used for 'skip_dir' + void setDirMask(const(char)[] dirmask) { + directoryMask = wild2regex(dirmask); + addLogEntry("Selective Sync Directory Mask: " ~ to!string(directoryMask), ["debug"]); + } + + // Configure skipDirStrictMatch if function is called + // By default, skipDirStrictMatch = false; + void setSkipDirStrictMatch() { + skipDirStrictMatch = true; + } + + // Configure skipDotfiles if function is called + // By default, skipDotfiles = false; + void setSkipDotfiles() { + skipDotfiles = true; + } + + // return value of skipDotfiles + bool getSkipDotfiles() { + return skipDotfiles; + } + + // Match against sync_list only + bool isPathExcludedViaSyncList(string path) { + // Debug output that we are performing a 'sync_list' inclusion / exclusion test + return isPathExcluded(path, paths); + } + + // config file skip_dir parameter + bool isDirNameExcluded(string name) { + // Does the directory name match skip_dir config entry? + // Returns true if the name matches a skip_dir config entry + // Returns false if no match + addLogEntry("skip_dir evaluation for: " ~ name, ["debug"]); + + // Try full path match first + if (!name.matchFirst(directoryMask).empty) { + addLogEntry("'!name.matchFirst(directoryMask).empty' returned true = matched", ["debug"]); + return true; + } else { + // Do we check the base name as well? + if (!skipDirStrictMatch) { + addLogEntry("No Strict Matching Enforced", ["debug"]); + + // Test the entire path working backwards from child + string path = buildNormalizedPath(name); + string checkPath; + auto paths = pathSplitter(path); + + foreach_reverse(directory; paths) { + if (directory != "/") { + // This will add a leading '/' but that needs to be stripped to check + checkPath = "/" ~ directory ~ checkPath; + if(!checkPath.strip('/').matchFirst(directoryMask).empty) { + addLogEntry("'!checkPath.matchFirst(directoryMask).empty' returned true = matched", ["debug"]); + return true; + } + } + } + } else { + // No match + addLogEntry("Strict Matching Enforced - No Match", ["debug"]); + } + } + // no match + return false; + } + + // config file skip_file parameter + bool isFileNameExcluded(string name) { + // Does the file name match skip_file config entry? + // Returns true if the name matches a skip_file config entry + // Returns false if no match + addLogEntry("skip_file evaluation for: " ~ name, ["debug"]); + + // Try full path match first + if (!name.matchFirst(fileMask).empty) { + return true; + } else { + // check just the file name + string filename = baseName(name); + if(!filename.matchFirst(fileMask).empty) { + return true; + } + } + // no match + return false; + } + + // test if the given path is not included in the allowed paths + // if there are no allowed paths always return false + private bool isPathExcluded(string path, string[] allowedPaths) { + // function variables + bool exclude = false; + bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry + bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry + bool finalResult = true; // will get updated to false, if pattern match to sync_list entry + int offset; + string wildcard = "*"; + + // always allow the root + if (path == ".") return false; + // if there are no allowed paths always return false + if (allowedPaths.empty) return false; + path = buildNormalizedPath(path); + addLogEntry("Evaluation against 'sync_list' for this path: " ~ path, ["debug"]); + addLogEntry("[S]exclude = " ~ to!string(exclude), ["debug"]); + addLogEntry("[S]exludeDirectMatch = " ~ to!string(exludeDirectMatch), ["debug"]); + addLogEntry("[S]excludeMatched = " ~ to!string(excludeMatched), ["debug"]); + + // unless path is an exact match, entire sync_list entries need to be processed to ensure + // negative matches are also correctly detected + foreach (allowedPath; allowedPaths) { + // is this an inclusion path or finer grained exclusion? + switch (allowedPath[0]) { + case '-': + // sync_list path starts with '-', this user wants to exclude this path + exclude = true; + // If the sync_list entry starts with '-/' offset needs to be 2, else 1 + if (startsWith(allowedPath, "-/")){ + // Offset needs to be 2 + offset = 2; + } else { + // Offset needs to be 1 + offset = 1; + } + break; + case '!': + // sync_list path starts with '!', this user wants to exclude this path + exclude = true; + // If the sync_list entry starts with '!/' offset needs to be 2, else 1 + if (startsWith(allowedPath, "!/")){ + // Offset needs to be 2 + offset = 2; + } else { + // Offset needs to be 1 + offset = 1; + } + break; + case '/': + // sync_list path starts with '/', this user wants to include this path + // but a '/' at the start causes matching issues, so use the offset for comparison + exclude = false; + offset = 1; + break; + + default: + // no negative pattern, default is to not exclude + exclude = false; + offset = 0; + } + + // What are we comparing against? + addLogEntry("Evaluation against 'sync_list' entry: " ~ allowedPath, ["debug"]); + + // Generate the common prefix from the path vs the allowed path + auto comm = commonPrefix(path, allowedPath[offset..$]); + + // Is path is an exact match of the allowed path? + if (comm.length == path.length) { + // we have a potential exact match + // strip any potential '/*' from the allowed path, to avoid a potential lesser common match + string strippedAllowedPath = strip(allowedPath[offset..$], "/*"); + + if (path == strippedAllowedPath) { + // we have an exact path match + addLogEntry("Exact path match with 'sync_list' entry", ["debug"]); + + if (!exclude) { + addLogEntry("Evaluation against 'sync_list' result: direct match", ["debug"]); + finalResult = false; + // direct match, break and go sync + break; + } else { + addLogEntry("Evaluation against 'sync_list' result: direct match - path to be excluded", ["debug"]); + + // do not set excludeMatched = true here, otherwise parental path also gets excluded + // flag exludeDirectMatch so that a 'wildcard match' will not override this exclude + exludeDirectMatch = true; + // final result + finalResult = true; + } + } else { + // no exact path match, but something common does match + addLogEntry("Something 'common' matches the 'sync_list' input path", ["debug"]); + + auto splitAllowedPaths = pathSplitter(strippedAllowedPath); + string pathToEvaluate = ""; + foreach(base; splitAllowedPaths) { + pathToEvaluate ~= base; + if (path == pathToEvaluate) { + // The input path matches what we want to evaluate against as a direct match + if (!exclude) { + addLogEntry("Evaluation against 'sync_list' result: direct match for parental path item", ["debug"]); + finalResult = false; + // direct match, break and go sync + break; + } else { + addLogEntry("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded", ["debug"]); + finalResult = true; + // do not set excludeMatched = true here, otherwise parental path also gets excluded + } + } + pathToEvaluate ~= dirSeparator; + } + } + } + + // Is path is a subitem/sub-folder of the allowed path? + if (comm.length == allowedPath[offset..$].length) { + // The given path is potentially a subitem of an allowed path + // We want to capture sub-folders / files of allowed paths here, but not explicitly match other items + // if there is no wildcard + auto subItemPathCheck = allowedPath[offset..$] ~ "/"; + if (canFind(path, subItemPathCheck)) { + // The 'path' includes the allowed path, and is 'most likely' a sub-path item + if (!exclude) { + addLogEntry("Evaluation against 'sync_list' result: parental path match", ["debug"]); + finalResult = false; + // parental path matches, break and go sync + break; + } else { + addLogEntry("Evaluation against 'sync_list' result: parental path match but must be excluded", ["debug"]); + finalResult = true; + excludeMatched = true; + } + } + } + + // Does the allowed path contain a wildcard? (*) + if (canFind(allowedPath[offset..$], wildcard)) { + // allowed path contains a wildcard + // manually replace '*' for '.*' to be compatible with regex + string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*"); + auto allowedMask = regex(regexCompatiblePath); + if (matchAll(path, allowedMask)) { + // regex wildcard evaluation matches + // if we have a prior pattern match for an exclude, excludeMatched = true + if (!exclude && !excludeMatched && !exludeDirectMatch) { + // nothing triggered an exclusion before evaluation against wildcard match attempt + addLogEntry("Evaluation against 'sync_list' result: wildcard pattern match", ["debug"]); + finalResult = false; + } else { + addLogEntry("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded", ["debug"]); + finalResult = true; + excludeMatched = true; + } + } + } + } + // Interim results + addLogEntry("[F]exclude = " ~ to!string(exclude), ["debug"]); + addLogEntry("[F]exludeDirectMatch = " ~ to!string(exludeDirectMatch), ["debug"]); + addLogEntry("[F]excludeMatched = " ~ to!string(excludeMatched), ["debug"]); + + // If exclude or excludeMatched is true, then finalResult has to be true + if ((exclude) || (excludeMatched) || (exludeDirectMatch)) { + finalResult = true; + } + + // results + if (finalResult) { + addLogEntry("Evaluation against 'sync_list' final result: EXCLUDED", ["debug"]); + } else { + addLogEntry("Evaluation against 'sync_list' final result: included for sync", ["debug"]); + } + return finalResult; + } +} \ No newline at end of file diff --git a/src/config.d b/src/config.d index 8c9ba2ff9..2b7927903 100644 --- a/src/config.d +++ b/src/config.d @@ -1,130 +1,219 @@ +// What is this module called? +module config; + +// What does this module require to function? import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; -import std.file, std.string, std.regex, std.stdio, std.process, std.algorithm.searching, std.getopt, std.conv, std.path; +import std.stdio; +import std.process; +import std.regex; +import std.string; +import std.algorithm.searching; import std.algorithm.sorting: sort; -import selective; -static import log; +import std.file; +import std.conv; +import std.path; +import std.getopt; +import std.format; +import std.ascii; +import std.datetime; -final class Config -{ - // application defaults - public string defaultSyncDir = "~/OneDrive"; - public string defaultSkipFile = "~*|.~*|*.tmp"; - public string defaultSkipDir = ""; - public string defaultLogFileDir = "/var/log/onedrive/"; - // application set items - public string refreshTokenFilePath = ""; - public string deltaLinkFilePath = ""; - public string databaseFilePath = ""; - public string databaseFilePathDryRun = ""; - public string uploadStateFilePath = ""; - public string syncListFilePath = ""; - public string homePath = ""; - public string configDirName = ""; - public string systemConfigDirName = ""; - public string configFileSyncDir = ""; - public string configFileSkipFile = ""; - public string configFileSkipDir = ""; - public string businessSharedFolderFilePath = ""; - private string userConfigFilePath = ""; - private string systemConfigFilePath = ""; - // was the application just authorised - paste of response uri - public bool applicationAuthorizeResponseUri = false; - // hashmap for the values found in the user config file - // ARGGGG D is stupid and cannot make hashmap initializations!!! - // private string[string] foobar = [ "aa": "bb" ] does NOT work!!! - private string[string] stringValues; - private bool[string] boolValues; - private long[string] longValues; - // Compile time regex - this does not change - public auto configRegex = ctRegex!(`^(\w+)\s*=\s*"(.*)"\s*$`); - // Default directory permission mode - public long defaultDirectoryPermissionMode = 700; - public int configuredDirectoryPermissionMode; - // Default file permission mode - public long defaultFilePermissionMode = 600; - public int configuredFilePermissionMode; - - // Bring in v2.5.0 config items +// What other modules that we have created do we need to import? +import log; +import util; + +class ApplicationConfig { + // Application default values - these do not change + // - Compile time regex + immutable auto configRegex = ctRegex!(`^(\w+)\s*=\s*"(.*)"\s*$`); + // - Default directory to store data + immutable string defaultSyncDir = "~/OneDrive"; + // - Default Directory Permissions + immutable long defaultDirectoryPermissionMode = 700; + // - Default File Permissions + immutable long defaultFilePermissionMode = 600; + // - Default types of files to skip + // v2.0.x - 2.4.x: ~*|.~*|*.tmp + // v2.5.x : ~*|.~*|*.tmp|*.swp|*.partial + immutable string defaultSkipFile = "~*|.~*|*.tmp|*.swp|*.partial"; + // - Default directories to skip (default is skip none) + immutable string defaultSkipDir = ""; + // - Default application logging directory + immutable string defaultLogFileDir = "/var/log/onedrive"; + // - Default configuration directory + immutable string defaultConfigDirName = "~/.config/onedrive"; + + // Microsoft Requirements + // - Default Application ID (abraunegg) + immutable string defaultApplicationId = "d50ca740-c83f-4d1b-b616-12c519384f0c"; + // - Microsoft User Agent ISV Tag + immutable string isvTag = "ISV"; + // - Microsoft User Agent Company name + immutable string companyName = "abraunegg"; + // - Microsoft Application name as per Microsoft Azure application registration + immutable string appTitle = "OneDrive Client for Linux"; + // Comply with OneDrive traffic decoration requirements + // https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online + // - Identify as ISV and include Company Name, App Name separated by a pipe character and then adding Version number separated with a slash character + + //immutable string defaultUserAgent = isvTag ~ "|" ~ companyName ~ "|" ~ appTitle ~ "/" ~ strip(import("version")); + immutable string defaultUserAgent = isvTag ~ "|" ~ companyName ~ "|" ~ appTitle ~ "/" ~ "v2.5.0-alpha-5"; // HTTP Struct items, used for configuring HTTP() // Curl Timeout Handling // libcurl dns_cache_timeout timeout immutable int defaultDnsTimeout = 60; // Connect timeout for HTTP|HTTPS connections - immutable int defaultConnectTimeout = 10; - // With the following settings we force - // - if there is no data flow for 10min, abort - // - if the download time for one item exceeds 1h, abort - // - // Timeout for activity on connection - // this translates into Curl's CURLOPT_LOW_SPEED_TIME - // which says: - // It contains the time in number seconds that the - // transfer speed should be below the CURLOPT_LOW_SPEED_LIMIT - // for the library to consider it too slow and abort. - immutable int defaultDataTimeout = 600; + immutable int defaultConnectTimeout = 30; + // Default data timeout for HTTP + // curl.d has a default of: _defaultDataTimeout = dur!"minutes"(2); + immutable int defaultDataTimeout = 240; // Maximum time any operation is allowed to take // This includes dns resolution, connecting, data transfer, etc. immutable int defaultOperationTimeout = 3600; - // Specify how many redirects should be allowed - immutable int defaultMaxRedirects = 5; // Specify what IP protocol version should be used when communicating with OneDrive immutable int defaultIpProtocol = 0; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + // Specify how many redirects should be allowed + immutable int defaultMaxRedirects = 5; + + // Azure Active Directory & Graph Explorer Endpoints + // - Global & Default + immutable string globalAuthEndpoint = "https://login.microsoftonline.com"; + immutable string globalGraphEndpoint = "https://graph.microsoft.com"; + // - US Government L4 + immutable string usl4AuthEndpoint = "https://login.microsoftonline.us"; + immutable string usl4GraphEndpoint = "https://graph.microsoft.us"; + // - US Government L5 + immutable string usl5AuthEndpoint = "https://login.microsoftonline.us"; + immutable string usl5GraphEndpoint = "https://dod-graph.microsoft.us"; + // - Germany + immutable string deAuthEndpoint = "https://login.microsoftonline.de"; + immutable string deGraphEndpoint = "https://graph.microsoft.de"; + // - China + immutable string cnAuthEndpoint = "https://login.chinacloudapi.cn"; + immutable string cnGraphEndpoint = "https://microsoftgraph.chinacloudapi.cn"; + // Application Version + //immutable string applicationVersion = "onedrive " ~ strip(import("version")); + immutable string applicationVersion = "v2.5.0-alpha-5" ~ " GitHub version: " ~ strip(import("version")); - - this(string confdirOption) - { - // default configuration - entries in config file ~/.config/onedrive/config - // an entry here means it can be set via the config file if there is a coresponding entry, read from config and set via update_from_args() - stringValues["sync_dir"] = defaultSyncDir; - stringValues["skip_file"] = defaultSkipFile; - stringValues["skip_dir"] = defaultSkipDir; + // Application items that depend on application run-time environment, thus cannot be immutable + // Public variables + + // Logging output + bool verboseLogging = false; + bool debugLogging = false; + long verbosityCount = 0; + + + // Was the application just authorised - paste of response uri + bool applicationAuthorizeResponseUri = false; + + // Store the refreshToken for use within the application + const(char)[] refreshToken; + // Store the current accessToken for use within the application + const(char)[] accessToken; + // Store the 'refresh_token' file path + string refreshTokenFilePath = ""; + // Store the accessTokenExpiration for use within the application + SysTime accessTokenExpiration; + // Store the 'session_upload.CRC32-HASH' file path + string uploadSessionFilePath = ""; + + bool apiWasInitialised = false; + bool syncEngineWasInitialised = false; + string accountType; + string defaultDriveId; + string defaultRootId; + ulong remainingFreeSpace = 0; + bool quotaAvailable = true; + bool quotaRestricted = false; + + bool fullScanTrueUpRequired = false; + bool surpressLoggingOutput = false; + + // This is the value that needs testing when we are actually downloading and uploading data + ulong concurrentThreads = 16; + + // All application run-time paths are formulated from this as a set of defaults + // - What is the home path of the actual 'user' that is running the application + string defaultHomePath = ""; + // - What is the config path for the application. By default, this is ~/.config/onedrive but can be overridden by using --confdir + string configDirName = defaultConfigDirName; + // - In case we have to use a system config directory such as '/etc/onedrive' or similar, store that path in this variable + private string systemConfigDirName = ""; + // - Store the configured converted octal value for directory permissions + private int configuredDirectoryPermissionMode; + // - Store the configured converted octal value for file permissions + private int configuredFilePermissionMode; + // - Store the 'delta_link' file path + private string deltaLinkFilePath = ""; + // - Store the 'items.sqlite3' file path + string databaseFilePath = ""; + // - Store the 'items-dryrun.sqlite3' file path + string databaseFilePathDryRun = ""; + // - Store the user 'config' file path + private string userConfigFilePath = ""; + // - Store the system 'config' file path + private string systemConfigFilePath = ""; + // - What is the 'config' file path that will be used? + private string applicableConfigFilePath = ""; + // - Store the 'sync_list' file path + string syncListFilePath = ""; + // - Store the 'business_shared_items' file path + string businessSharedItemsFilePath = ""; + + // Hash files so that we can detect when the configuration has changed, in items that will require a --resync + private string configHashFile = ""; + private string configBackupFile = ""; + private string syncListHashFile = ""; + private string businessSharedItemsHashFile = ""; + + // Store the actual 'runtime' hash + private string currentConfigHash = ""; + private string currentSyncListHash = ""; + private string currentBusinessSharedItemsHash = ""; + + // Store the previous config files hash values (file contents) + private string previousConfigHash = ""; + private string previousSyncListHash = ""; + private string previousBusinessSharedItemsHash = ""; + + // Store items that come in from the 'config' file, otherwise these need to be set the the defaults + private string configFileSyncDir = defaultSyncDir; + private string configFileSkipFile = defaultSkipFile; + private string configFileSkipDir = ""; // Default here is no directories are skipped + private string configFileDriveId = ""; // Default here is that no drive id is specified + private bool configFileSkipDotfiles = false; + private bool configFileSkipSymbolicLinks = false; + private bool configFileSyncBusinessSharedItems = false; + + // File permission values (set via initialise function) + private int convertedPermissionValue; + + // Array of values that are the actual application runtime configuration + // The values stored in these array's are the actual application configuration which can then be accessed by getValue & setValue + string[string] stringValues; + long[string] longValues; + bool[string] boolValues; + + bool shellEnvironmentSet = false; + + // Initialise the application configuration + bool initialise(string confdirOption) { + + // Default runtime configuration - entries in config file ~/.config/onedrive/config or derived from variables above + // An entry here means it can be set via the config file if there is a coresponding entry, read from config and set via update_from_args() + // The below becomes the 'default' application configuration before config file and/or cli options are overlayed on top + + // - Set the required default values + stringValues["application_id"] = defaultApplicationId; stringValues["log_dir"] = defaultLogFileDir; + stringValues["skip_dir"] = defaultSkipDir; + stringValues["skip_file"] = defaultSkipFile; + stringValues["sync_dir"] = defaultSyncDir; + stringValues["user_agent"] = defaultUserAgent; + // - The 'drive_id' is used when we specify a specific OneDrive ID when attempting to sync Shared Folders and SharePoint items stringValues["drive_id"] = ""; - stringValues["user_agent"] = ""; - boolValues["upload_only"] = false; - boolValues["check_nomount"] = false; - boolValues["check_nosync"] = false; - boolValues["download_only"] = false; - boolValues["disable_notifications"] = false; - boolValues["disable_download_validation"] = false; - boolValues["disable_upload_validation"] = false; - boolValues["enable_logging"] = false; - boolValues["force_http_11"] = false; - boolValues["local_first"] = false; - boolValues["no_remote_delete"] = false; - boolValues["skip_symlinks"] = false; - boolValues["debug_https"] = false; - boolValues["skip_dotfiles"] = false; - boolValues["dry_run"] = false; - boolValues["sync_root_files"] = false; - longValues["verbose"] = log.verbose; // might be initialized by the first getopt call! - // The amount of time (seconds) between monitor sync loops - longValues["monitor_interval"] = 300; - longValues["skip_size"] = 0; - longValues["min_notify_changes"] = 5; - longValues["monitor_log_frequency"] = 6; - // Number of N sync runs before performing a full local scan of sync_dir - // By default 12 which means every ~60 minutes a full disk scan of sync_dir will occur - // 'monitor_interval' * 'monitor_fullscan_frequency' = 3600 = 1 hour - longValues["monitor_fullscan_frequency"] = 12; - // Number of children in a path that is locally removed which will be classified as a 'big data delete' - longValues["classify_as_big_delete"] = 1000; - // Delete source after successful transfer - boolValues["remove_source_files"] = false; - // Strict matching for skip_dir - boolValues["skip_dir_strict_match"] = false; - // Allow for a custom Client ID / Application ID to be used to replace the inbuilt default - // This is a config file option ONLY - stringValues["application_id"] = ""; - // allow for resync to be set via config file - boolValues["resync"] = false; - // resync now needs to be acknowledged based on the 'risk' of using it - boolValues["resync_auth"] = false; - // Ignore data safety checks and overwrite local data rather than preserve & rename - // This is a config file option ONLY - boolValues["bypass_data_preservation"] = false; // Support National Azure AD endpoints as per https://docs.microsoft.com/en-us/graph/deployments // By default, if empty, use standard Azure AD URL's // Will support the following options: @@ -141,52 +230,31 @@ final class Config // AD Endpoint: https://login.chinacloudapi.cn // Graph Endpoint: https://microsoftgraph.chinacloudapi.cn stringValues["azure_ad_endpoint"] = ""; + // Support single-tenant applications that are not able to use the "common" multiplexer - stringValues["azure_tenant_id"] = "common"; - // Allow enable / disable of the syncing of OneDrive Business Shared Folders via configuration file - boolValues["sync_business_shared_folders"] = false; - // Configure the default folder permission attributes for newly created folders + stringValues["azure_tenant_id"] = ""; + // - Store how many times was --verbose added + longValues["verbose"] = verbosityCount; + // - The amount of time (seconds) between monitor sync loops + longValues["monitor_interval"] = 300; + // - What size of file should be skipped? + longValues["skip_size"] = 0; + // - How many 'loops' when using --monitor, before we print out high frequency recurring items? + longValues["monitor_log_frequency"] = 12; + // - Number of N sync runs before performing a full local scan of sync_dir + // By default 12 which means every ~60 minutes a full disk scan of sync_dir will occur + // 'monitor_interval' * 'monitor_fullscan_frequency' = 3600 = 1 hour + longValues["monitor_fullscan_frequency"] = 12; + // - Number of children in a path that is locally removed which will be classified as a 'big data delete' + longValues["classify_as_big_delete"] = 1000; + // - Configure the default folder permission attributes for newly created folders longValues["sync_dir_permissions"] = defaultDirectoryPermissionMode; - // Configure the default file permission attributes for newly created file + // - Configure the default file permission attributes for newly created file longValues["sync_file_permissions"] = defaultFilePermissionMode; - // Configure download / upload rate limits + // - Configure download / upload rate limits longValues["rate_limit"] = 0; - // To ensure we do not fill up the load disk, how much disk space should be reserved by default + // - To ensure we do not fill up the load disk, how much disk space should be reserved by default longValues["space_reservation"] = 50 * 2^^20; // 50 MB as Bytes - // Webhook options - boolValues["webhook_enabled"] = false; - stringValues["webhook_public_url"] = ""; - stringValues["webhook_listening_host"] = ""; - longValues["webhook_listening_port"] = 8888; - longValues["webhook_expiration_interval"] = 3600 * 24; - longValues["webhook_renewal_interval"] = 3600 * 12; - // Log to application output running configuration values - boolValues["display_running_config"] = false; - // Configure read-only authentication scope - boolValues["read_only_auth_scope"] = false; - // Flag to cleanup local files when using --download-only - boolValues["cleanup_local_files"] = false; - - // DEVELOPER OPTIONS - // display_memory = true | false - // - It may be desirable to display the memory usage of the application to assist with diagnosing memory issues with the application - // - This is especially beneficial when debugging or performing memory tests with Valgrind - boolValues["display_memory"] = false; - // monitor_max_loop = long value - // - It may be desirable to, when running in monitor mode, force monitor mode to 'quit' after X number of loops - // - This is especially beneficial when debugging or performing memory tests with Valgrind - longValues["monitor_max_loop"] = 0; - // display_sync_options = true | false - // - It may be desirable to see what options are being passed in to performSync() without enabling the full verbose debug logging - boolValues["display_sync_options"] = false; - // force_children_scan = true | false - // - Force client to use /children rather than /delta to query changes on OneDrive - // - This option flags nationalCloudDeployment as true, forcing the client to act like it is using a National Cloud Deployment - boolValues["force_children_scan"] = false; - // display_processing_time = true | false - // - Enabling this option will add function processing times to the console output - // - This then enables tracking of where the application is spending most amount of time when processing data when users have questions re performance - boolValues["display_processing_time"] = false; // HTTPS & CURL Operation Settings // - Maximum time an operation is allowed to take @@ -200,71 +268,160 @@ final class Config longValues["data_timeout"] = defaultDataTimeout; // What IP protocol version should be used when communicating with OneDrive longValues["ip_protocol_version"] = defaultIpProtocol; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only - + + // - Do we wish to upload only? + boolValues["upload_only"] = false; + // - Do we need to check for the .nomount file on the mount point? + boolValues["check_nomount"] = false; + // - Do we need to check for the .nosync file anywhere? + boolValues["check_nosync"] = false; + // - Do we wish to download only? + boolValues["download_only"] = false; + // - Do we disable notifications? + boolValues["disable_notifications"] = false; + // - Do we bypass all the download validation? + // This is critically important not to disable, but because of SharePoint 'feature' can be highly desirable to enable + boolValues["disable_download_validation"] = false; + // - Do we bypass all the upload validation? + // This is critically important not to disable, but because of SharePoint 'feature' can be highly desirable to enable + boolValues["disable_upload_validation"] = false; + // - Do we enable logging? + boolValues["enable_logging"] = false; + // - Do we force HTTP 1.1 for connections to the OneDrive API + // By default we use the curl library default, which should be HTTP2 for most operations governed by the OneDrive API + boolValues["force_http_11"] = false; + // - Do we treat the local file system as the source of truth for our data? + boolValues["local_first"] = false; + // - Do we ignore local file deletes, so that all files are retained online? + boolValues["no_remote_delete"] = false; + // - Do we skip symbolic links? + boolValues["skip_symlinks"] = false; + // - Do we enable debugging for all HTTPS flows. Critically important for debugging API issues. + boolValues["debug_https"] = false; + // - Do we skip .files and .folders? + boolValues["skip_dotfiles"] = false; + // - Do we perform a 'dry-run' with no local or remote changes actually being performed? + boolValues["dry_run"] = false; + // - Do we sync all the files in the 'sync_dir' root? + boolValues["sync_root_files"] = false; + // - Do we delete source after successful transfer? + boolValues["remove_source_files"] = false; + // - Do we perform strict matching for skip_dir? + boolValues["skip_dir_strict_match"] = false; + // - Do we perform a --resync? + boolValues["resync"] = false; + // - resync now needs to be acknowledged based on the 'risk' of using it + boolValues["resync_auth"] = false; + // - Ignore data safety checks and overwrite local data rather than preserve & rename + // This is a config file option ONLY + boolValues["bypass_data_preservation"] = false; + // - Allow enable / disable of the syncing of OneDrive Business Shared items (files & folders) via configuration file + boolValues["sync_business_shared_items"] = false; + // - Log to application output running configuration values + boolValues["display_running_config"] = false; + // - Configure read-only authentication scope + boolValues["read_only_auth_scope"] = false; + // - Flag to cleanup local files when using --download-only + boolValues["cleanup_local_files"] = false; + + // Webhook Feature Options + boolValues["webhook_enabled"] = false; + stringValues["webhook_public_url"] = ""; + stringValues["webhook_listening_host"] = ""; + longValues["webhook_listening_port"] = 8888; + longValues["webhook_expiration_interval"] = 600; + longValues["webhook_renewal_interval"] = 300; + longValues["webhook_retry_interval"] = 60; + // EXPAND USERS HOME DIRECTORY // Determine the users home directory. // Need to avoid using ~ here as expandTilde() below does not interpret correctly when running under init.d or systemd scripts // Check for HOME environment variable if (environment.get("HOME") != ""){ // Use HOME environment variable - log.vdebug("homePath: HOME environment variable set"); - homePath = environment.get("HOME"); + addLogEntry("runtime_environment: HOME environment variable detected, expansion of '~' should be possible", ["debug"]); + defaultHomePath = environment.get("HOME"); + shellEnvironmentSet = true; } else { if ((environment.get("SHELL") == "") && (environment.get("USER") == "")){ // No shell is set or username - observed case when running as systemd service under CentOS 7.x - log.vdebug("homePath: WARNING - no HOME environment variable set"); - log.vdebug("homePath: WARNING - no SHELL environment variable set"); - log.vdebug("homePath: WARNING - no USER environment variable set"); - homePath = "/root"; + addLogEntry("runtime_environment: No HOME, SHELL or USER environment variable configuration detected. Expansion of '~' not possible", ["debug"]); + defaultHomePath = "/root"; + shellEnvironmentSet = false; } else { // A shell & valid user is set, but no HOME is set, use ~ which can be expanded - log.vdebug("homePath: WARNING - no HOME environment variable set"); - homePath = "~"; + addLogEntry("runtime_environment: SHELL and USER environment variable detected, expansion of '~' should be possible", ["debug"]); + defaultHomePath = "~"; + shellEnvironmentSet = true; } } - - // Output homePath calculation - log.vdebug("homePath: ", homePath); - - // Determine the correct configuration directory to use + // outcome of setting defaultHomePath + addLogEntry("runtime_environment: Calculated defaultHomePath: " ~ defaultHomePath, ["debug"]); + + // DEVELOPER OPTIONS + // display_memory = true | false + // - It may be desirable to display the memory usage of the application to assist with diagnosing memory issues with the application + // - This is especially beneficial when debugging or performing memory tests with Valgrind + boolValues["display_memory"] = false; + // monitor_max_loop = long value + // - It may be desirable to, when running in monitor mode, force monitor mode to 'quit' after X number of loops + // - This is especially beneficial when debugging or performing memory tests with Valgrind + longValues["monitor_max_loop"] = 0; + // display_sync_options = true | false + // - It may be desirable to see what options are being passed in to performSync() without enabling the full verbose debug logging + boolValues["display_sync_options"] = false; + // force_children_scan = true | false + // - Force client to use /children rather than /delta to query changes on OneDrive + // - This option flags nationalCloudDeployment as true, forcing the client to act like it is using a National Cloud Deployment model + boolValues["force_children_scan"] = false; + // display_processing_time = true | false + // - Enabling this option will add function processing times to the console output + // - This then enables tracking of where the application is spending most amount of time when processing data when users have questions re performance + boolValues["display_processing_time"] = false; + + // Function variables string configDirBase; string systemConfigDirBase; - if (confdirOption != "") { + bool configurationInitialised = false; + + // Initialise the application configuration, using the provided --confdir option was passed in + if (!confdirOption.empty) { // A CLI 'confdir' was passed in - // Clean up any stray " .. these should not be there ... + // Clean up any stray " .. these should not be there for correct process handling of the configuration option confdirOption = strip(confdirOption,"\""); - log.vdebug("configDirName: CLI override to set configDirName to: ", confdirOption); + addLogEntry("configDirName: CLI override to set configDirName to: " ~ confdirOption, ["debug"]); + if (canFind(confdirOption,"~")) { // A ~ was found - log.vdebug("configDirName: A '~' was found in configDirName, using the calculated 'homePath' to replace '~'"); - configDirName = homePath ~ strip(confdirOption,"~","~"); + addLogEntry("configDirName: A '~' was found in configDirName, using the calculated 'defaultHomePath' to replace '~'", ["debug"]); + configDirName = defaultHomePath ~ strip(confdirOption,"~","~"); } else { configDirName = confdirOption; } } else { - // Determine the base directory relative to which user specific configuration files should be stored. + // Determine the base directory relative to which user specific configuration files should be stored if (environment.get("XDG_CONFIG_HOME") != ""){ - log.vdebug("configDirBase: XDG_CONFIG_HOME environment variable set"); + addLogEntry("configDirBase: XDG_CONFIG_HOME environment variable set", ["debug"]); configDirBase = environment.get("XDG_CONFIG_HOME"); } else { // XDG_CONFIG_HOME does not exist on systems where X11 is not present - ie - headless systems / servers - log.vdebug("configDirBase: WARNING - no XDG_CONFIG_HOME environment variable set"); - configDirBase = homePath ~ "/.config"; + addLogEntry("configDirBase: WARNING - no XDG_CONFIG_HOME environment variable set", ["debug"]); + configDirBase = buildNormalizedPath(buildPath(defaultHomePath, ".config")); // Also set up a path to pre-shipped shared configs (which can be overridden by supplying a config file in userspace) systemConfigDirBase = "/etc"; } - + // Output configDirBase calculation - log.vdebug("configDirBase: ", configDirBase); - // Set the default application configuration directory - log.vdebug("configDirName: Configuring application to use default config path"); + addLogEntry("configDirBase: " ~ configDirBase, ["debug"]); + // Set the calculated application configuration directory + addLogEntry("configDirName: Configuring application to use calculated config path", ["debug"]); // configDirBase contains the correct path so we do not need to check for presence of '~' - configDirName = configDirBase ~ "/onedrive"; + configDirName = buildNormalizedPath(buildPath(configDirBase, "onedrive")); // systemConfigDirBase contains the correct path so we do not need to check for presence of '~' - systemConfigDirName = systemConfigDirBase ~ "/onedrive"; + systemConfigDirName = buildNormalizedPath(buildPath(systemConfigDirBase, "onedrive")); } - - // Config directory options all determined + + // Configuration directory should now have been correctly identified if (!exists(configDirName)) { // create the directory mkdirRecurse(configDirName); @@ -276,369 +433,250 @@ final class Config if (!isDir(configDirName)) { if (!confdirOption.empty) { // the configuration path was passed in by the user .. user error - writeln("ERROR: --confdir entered value is an existing file instead of an existing directory"); + addLogEntry("ERROR: --confdir entered value is an existing file instead of an existing directory"); } else { // other error - writeln("ERROR: ~/.config/onedrive is a file rather than a directory"); + addLogEntry("ERROR: " ~ confdirOption ~ " is a file rather than a directory"); } // Must exit exit(EXIT_FAILURE); } } - - // configDirName has a trailing / - if (!configDirName.empty) log.vlog("Using 'user' Config Dir: ", configDirName); - if (!systemConfigDirName.empty) log.vlog("Using 'system' Config Dir: ", systemConfigDirName); - + // Update application set variables based on configDirName - refreshTokenFilePath = buildNormalizedPath(configDirName ~ "/refresh_token"); - deltaLinkFilePath = buildNormalizedPath(configDirName ~ "/delta_link"); - databaseFilePath = buildNormalizedPath(configDirName ~ "/items.sqlite3"); - databaseFilePathDryRun = buildNormalizedPath(configDirName ~ "/items-dryrun.sqlite3"); - uploadStateFilePath = buildNormalizedPath(configDirName ~ "/resume_upload"); - userConfigFilePath = buildNormalizedPath(configDirName ~ "/config"); - syncListFilePath = buildNormalizedPath(configDirName ~ "/sync_list"); - systemConfigFilePath = buildNormalizedPath(systemConfigDirName ~ "/config"); - businessSharedFolderFilePath = buildNormalizedPath(configDirName ~ "/business_shared_folders"); - + // - What is the full path for the 'refresh_token' + refreshTokenFilePath = buildNormalizedPath(buildPath(configDirName, "refresh_token")); + // - What is the full path for the 'delta_link' + deltaLinkFilePath = buildNormalizedPath(buildPath(configDirName, "delta_link")); + // - What is the full path for the 'items.sqlite3' - the database cache file + databaseFilePath = buildNormalizedPath(buildPath(configDirName, "items.sqlite3")); + // - What is the full path for the 'items-dryrun.sqlite3' - the dry-run database cache file + databaseFilePathDryRun = buildNormalizedPath(buildPath(configDirName, "items-dryrun.sqlite3")); + // - What is the full path for the 'resume_upload' + uploadSessionFilePath = buildNormalizedPath(buildPath(configDirName, "session_upload")); + // - What is the full path for the 'sync_list' file + syncListFilePath = buildNormalizedPath(buildPath(configDirName, "sync_list")); + // - What is the full path for the 'config' - the user file to configure the application + userConfigFilePath = buildNormalizedPath(buildPath(configDirName, "config")); + // - What is the full path for the system 'config' file if it is required + systemConfigFilePath = buildNormalizedPath(buildPath(systemConfigDirName, "config")); + + // - What is the full path for the 'business_shared_items' + businessSharedItemsFilePath = buildNormalizedPath(buildPath(configDirName, "business_shared_items")); + + // To determine if any configuration items has changed, where a --resync would be required, we need to have a hash file for the following items + // - 'config.backup' file + // - applicable 'config' file + // - 'sync_list' file + // - 'business_shared_items' file + configBackupFile = buildNormalizedPath(buildPath(configDirName, ".config.backup")); + configHashFile = buildNormalizedPath(buildPath(configDirName, ".config.hash")); + syncListHashFile = buildNormalizedPath(buildPath(configDirName, ".sync_list.hash")); + businessSharedItemsHashFile = buildNormalizedPath(buildPath(configDirName, ".business_shared_items.hash")); + // Debug Output for application set variables based on configDirName - log.vdebug("refreshTokenFilePath = ", refreshTokenFilePath); - log.vdebug("deltaLinkFilePath = ", deltaLinkFilePath); - log.vdebug("databaseFilePath = ", databaseFilePath); - log.vdebug("databaseFilePathDryRun = ", databaseFilePathDryRun); - log.vdebug("uploadStateFilePath = ", uploadStateFilePath); - log.vdebug("userConfigFilePath = ", userConfigFilePath); - log.vdebug("syncListFilePath = ", syncListFilePath); - log.vdebug("systemConfigFilePath = ", systemConfigFilePath); - log.vdebug("businessSharedFolderFilePath = ", businessSharedFolderFilePath); - } - - bool initialize() - { - // Initialise the application + addLogEntry("refreshTokenFilePath = " ~ refreshTokenFilePath, ["debug"]); + addLogEntry("deltaLinkFilePath = " ~ deltaLinkFilePath, ["debug"]); + addLogEntry("databaseFilePath = " ~ databaseFilePath, ["debug"]); + addLogEntry("databaseFilePathDryRun = " ~ databaseFilePathDryRun, ["debug"]); + addLogEntry("uploadSessionFilePath = " ~ uploadSessionFilePath, ["debug"]); + addLogEntry("userConfigFilePath = " ~ userConfigFilePath, ["debug"]); + addLogEntry("syncListFilePath = " ~ syncListFilePath, ["debug"]); + addLogEntry("systemConfigFilePath = " ~ systemConfigFilePath, ["debug"]); + addLogEntry("configBackupFile = " ~ configBackupFile, ["debug"]); + addLogEntry("configHashFile = " ~ configHashFile, ["debug"]); + addLogEntry("syncListHashFile = " ~ syncListHashFile, ["debug"]); + addLogEntry("businessSharedItemsFilePath = " ~ businessSharedItemsFilePath, ["debug"]); + addLogEntry("businessSharedItemsHashFile = " ~ businessSharedItemsHashFile, ["debug"]); + + // Configure the Hash and Backup File Permission Value + string valueToConvert = to!string(defaultFilePermissionMode); + auto convertedValue = parse!long(valueToConvert, 8); + convertedPermissionValue = to!int(convertedValue); + + // Initialise the application using the configuration file if it exists if (!exists(userConfigFilePath)) { // 'user' configuration file does not exist // Is there a system configuration file? if (!exists(systemConfigFilePath)) { // 'system' configuration file does not exist - log.vlog("No user or system config file found, using application defaults"); - return true; + addLogEntry("No user or system config file found, using application defaults", ["verbose"]); + applicableConfigFilePath = userConfigFilePath; + configurationInitialised = true; } else { // 'system' configuration file exists // can we load the configuration file without error? - if (load(systemConfigFilePath)) { + if (loadConfigFile(systemConfigFilePath)) { // configuration file loaded without error - log.log("System configuration file successfully loaded"); - return true; + addLogEntry("System configuration file successfully loaded"); + + // Set 'applicableConfigFilePath' to equal the 'config' we loaded + applicableConfigFilePath = systemConfigFilePath; + // Update the configHashFile path value to ensure we are using the system 'config' file for the hash + configHashFile = buildNormalizedPath(buildPath(systemConfigDirName, ".config.hash")); + configurationInitialised = true; } else { // there was a problem loading the configuration file - log.log("System configuration file has errors - please check your configuration"); - return false; + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("System configuration file has errors - please check your configuration"); } - } + } } else { // 'user' configuration file exists // can we load the configuration file without error? - if (load(userConfigFilePath)) { + if (loadConfigFile(userConfigFilePath)) { // configuration file loaded without error - log.log("Configuration file successfully loaded"); - return true; + addLogEntry("Configuration file successfully loaded"); + + // Set 'applicableConfigFilePath' to equal the 'config' we loaded + applicableConfigFilePath = userConfigFilePath; + configurationInitialised = true; } else { // there was a problem loading the configuration file - log.log("Configuration file has errors - please check your configuration"); - return false; + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Configuration file has errors - please check your configuration"); + } + } + + // Advise the user path that we will use for the application state data + if (canFind(applicableConfigFilePath, configDirName)) { + addLogEntry("Using 'user' configuration path for application state data: " ~ configDirName, ["verbose"]); + } else { + if (canFind(applicableConfigFilePath, systemConfigDirName)) { + addLogEntry("Using 'system' configuration path for application state data: " ~ systemConfigDirName, ["verbose"]); + } + } + + // return if the configuration was initialised + return configurationInitialised; + } + + // Create a backup of the 'config' file if it does not exist + void createBackupConfigFile() { + if (!getValueBool("dry_run")) { + // Is there a backup of the config file if the config file exists? + if (exists(applicableConfigFilePath)) { + addLogEntry("Creating a backup of the applicable config file", ["debug"]); + // create backup copy of current config file + std.file.copy(applicableConfigFilePath, configBackupFile); + // File Copy should only be readable by the user who created it - 0600 permissions needed + configBackupFile.setAttributes(convertedPermissionValue); } + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not creating backup config file as --dry-run has been used"); + } + } + + // Return a given string value based on the provided key + string getValueString(string key) { + auto p = key in stringValues; + if (p) { + return *p; + } else { + throw new Exception("Missing config value: " ~ key); } } - void update_from_args(string[] args) - { - // Add additional options that are NOT configurable via config file - stringValues["create_directory"] = ""; - stringValues["create_share_link"] = ""; - stringValues["destination_directory"] = ""; - stringValues["get_file_link"] = ""; - stringValues["modified_by"] = ""; - stringValues["get_o365_drive_id"] = ""; - stringValues["remove_directory"] = ""; - stringValues["single_directory"] = ""; - stringValues["source_directory"] = ""; - stringValues["auth_files"] = ""; - stringValues["auth_response"] = ""; - boolValues["display_config"] = false; - boolValues["display_sync_status"] = false; - boolValues["print_token"] = false; - boolValues["logout"] = false; - boolValues["reauth"] = false; - boolValues["monitor"] = false; - boolValues["synchronize"] = false; - boolValues["force"] = false; - boolValues["list_business_shared_folders"] = false; - boolValues["force_sync"] = false; - boolValues["with_editing_perms"] = false; + // Return a given long value based on the provided key + long getValueLong(string key) { + auto p = key in longValues; + if (p) { + return *p; + } else { + throw new Exception("Missing config value: " ~ key); + } + } - // Application Startup option validation - try { - string tmpStr; - bool tmpBol; - long tmpVerb; - // duplicated from main.d to get full help output! - auto opt = getopt( + // Return a given bool value based on the provided key + bool getValueBool(string key) { + auto p = key in boolValues; + if (p) { + return *p; + } else { + throw new Exception("Missing config value: " ~ key); + } + } + + // Set a given string value based on the provided key + void setValueString(string key, string value) { + stringValues[key] = value; + } - args, - std.getopt.config.bundling, - std.getopt.config.caseSensitive, - "auth-files", - "Perform authentication not via interactive dialog but via files read/writes to these files.", - &stringValues["auth_files"], - "auth-response", - "Perform authentication not via interactive dialog but via providing the response url directly.", - &stringValues["auth_response"], - "check-for-nomount", - "Check for the presence of .nosync in the syncdir root. If found, do not perform sync.", - &boolValues["check_nomount"], - "check-for-nosync", - "Check for the presence of .nosync in each directory. If found, skip directory from sync.", - &boolValues["check_nosync"], - "classify-as-big-delete", - "Number of children in a path that is locally removed which will be classified as a 'big data delete'", - &longValues["classify_as_big_delete"], - "cleanup-local-files", - "Cleanup additional local files when using --download-only. This will remove local data.", - &boolValues["cleanup_local_files"], - "create-directory", - "Create a directory on OneDrive - no sync will be performed.", - &stringValues["create_directory"], - "create-share-link", - "Create a shareable link for an existing file on OneDrive", - &stringValues["create_share_link"], - "debug-https", - "Debug OneDrive HTTPS communication.", - &boolValues["debug_https"], - "destination-directory", - "Destination directory for renamed or move on OneDrive - no sync will be performed.", - &stringValues["destination_directory"], - "disable-notifications", - "Do not use desktop notifications in monitor mode.", - &boolValues["disable_notifications"], - "disable-download-validation", - "Disable download validation when downloading from OneDrive", - &boolValues["disable_download_validation"], - "disable-upload-validation", - "Disable upload validation when uploading to OneDrive", - &boolValues["disable_upload_validation"], - "display-config", - "Display what options the client will use as currently configured - no sync will be performed.", - &boolValues["display_config"], - "display-running-config", - "Display what options the client has been configured to use on application startup.", - &boolValues["display_running_config"], - "display-sync-status", - "Display the sync status of the client - no sync will be performed.", - &boolValues["display_sync_status"], - "download-only", - "Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive.", - &boolValues["download_only"], - "dry-run", - "Perform a trial sync with no changes made", - &boolValues["dry_run"], - "enable-logging", - "Enable client activity to a separate log file", - &boolValues["enable_logging"], - "force-http-11", - "Force the use of HTTP 1.1 for all operations", - &boolValues["force_http_11"], - "force", - "Force the deletion of data when a 'big delete' is detected", - &boolValues["force"], - "force-sync", - "Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignore all non-default skip_dir and skip_file rules", - &boolValues["force_sync"], - "get-file-link", - "Display the file link of a synced file", - &stringValues["get_file_link"], - "get-O365-drive-id", - "Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library", - &stringValues["get_o365_drive_id"], - "local-first", - "Synchronize from the local directory source first, before downloading changes from OneDrive.", - &boolValues["local_first"], - "log-dir", - "Directory where logging output is saved to, needs to end with a slash.", - &stringValues["log_dir"], - "logout", - "Logout the current user", - &boolValues["logout"], - "min-notify-changes", - "Minimum number of pending incoming changes necessary to trigger a desktop notification", - &longValues["min_notify_changes"], - "modified-by", - "Display the last modified by details of a given path", - &stringValues["modified_by"], - "monitor|m", - "Keep monitoring for local and remote changes", - &boolValues["monitor"], - "monitor-interval", - "Number of seconds by which each sync operation is undertaken when idle under monitor mode.", - &longValues["monitor_interval"], - "monitor-fullscan-frequency", - "Number of sync runs before performing a full local scan of the synced directory", - &longValues["monitor_fullscan_frequency"], - "monitor-log-frequency", - "Frequency of logging in monitor mode", - &longValues["monitor_log_frequency"], - "no-remote-delete", - "Do not delete local file 'deletes' from OneDrive when using --upload-only", - &boolValues["no_remote_delete"], - "print-token", - "Print the access token, useful for debugging", - &boolValues["print_token"], - "reauth", - "Reauthenticate the client with OneDrive", - &boolValues["reauth"], - "resync", - "Forget the last saved state, perform a full sync", - &boolValues["resync"], - "resync-auth", - "Approve the use of performing a --resync action", - &boolValues["resync_auth"], - "remove-directory", - "Remove a directory on OneDrive - no sync will be performed.", - &stringValues["remove_directory"], - "remove-source-files", - "Remove source file after successful transfer to OneDrive when using --upload-only", - &boolValues["remove_source_files"], - "single-directory", - "Specify a single local directory within the OneDrive root to sync.", - &stringValues["single_directory"], - "skip-dot-files", - "Skip dot files and folders from syncing", - &boolValues["skip_dotfiles"], - "skip-file", - "Skip any files that match this pattern from syncing", - &stringValues["skip_file"], - "skip-dir", - "Skip any directories that match this pattern from syncing", - &stringValues["skip_dir"], - "skip-size", - "Skip new files larger than this size (in MB)", - &longValues["skip_size"], - "skip-dir-strict-match", - "When matching skip_dir directories, only match explicit matches", - &boolValues["skip_dir_strict_match"], - "skip-symlinks", - "Skip syncing of symlinks", - &boolValues["skip_symlinks"], - "source-directory", - "Source directory to rename or move on OneDrive - no sync will be performed.", - &stringValues["source_directory"], - "space-reservation", - "The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation", - &longValues["space_reservation"], - "syncdir", - "Specify the local directory used for synchronization to OneDrive", - &stringValues["sync_dir"], - "synchronize", - "Perform a synchronization", - &boolValues["synchronize"], - "sync-root-files", - "Sync all files in sync_dir root when using sync_list.", - &boolValues["sync_root_files"], - "upload-only", - "Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive.", - &boolValues["upload_only"], - "user-agent", - "Specify a User Agent string to the http client", - &stringValues["user_agent"], - "confdir", - "Set the directory used to store the configuration files", - &tmpStr, - "verbose|v+", - "Print more details, useful for debugging (repeat for extra debugging)", - &tmpVerb, - "version", - "Print the version and exit", - &tmpBol, - "list-shared-folders", - "List OneDrive Business Shared Folders", - &boolValues["list_business_shared_folders"], - "sync-shared-folders", - "Sync OneDrive Business Shared Folders", - &boolValues["sync_business_shared_folders"], - "with-editing-perms", - "Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link ", - &boolValues["with_editing_perms"] - ); - if (opt.helpWanted) { - outputLongHelp(opt.options); - exit(EXIT_SUCCESS); - } - } catch (GetOptException e) { - log.error(e.msg); - log.error("Try 'onedrive -h' for more information"); - exit(EXIT_FAILURE); - } catch (Exception e) { - // error - log.error(e.msg); - log.error("Try 'onedrive -h' for more information"); - exit(EXIT_FAILURE); - } + // Set a given long value based on the provided key + void setValueLong(string key, long value) { + longValues[key] = value; } - string getValueString(string key) - { - auto p = key in stringValues; - if (p) { - return *p; - } else { - throw new Exception("Missing config value: " ~ key); - } + // Set a given long value based on the provided key + void setValueBool(string key, bool value) { + boolValues[key] = value; } - - long getValueLong(string key) - { - auto p = key in longValues; - if (p) { - return *p; + + // Configure the directory octal permission value + void configureRequiredDirectoryPermisions() { + // return the directory permission mode required + // - return octal!defaultDirectoryPermissionMode; ... cant be used .. which is odd + // Error: variable defaultDirectoryPermissionMode cannot be read at compile time + if (getValueLong("sync_dir_permissions") != defaultDirectoryPermissionMode) { + // return user configured permissions as octal integer + string valueToConvert = to!string(getValueLong("sync_dir_permissions")); + auto convertedValue = parse!long(valueToConvert, 8); + configuredDirectoryPermissionMode = to!int(convertedValue); } else { - throw new Exception("Missing config value: " ~ key); + // return default as octal integer + string valueToConvert = to!string(defaultDirectoryPermissionMode); + auto convertedValue = parse!long(valueToConvert, 8); + configuredDirectoryPermissionMode = to!int(convertedValue); } } - bool getValueBool(string key) - { - auto p = key in boolValues; - if (p) { - return *p; + // Configure the file octal permission value + void configureRequiredFilePermisions() { + // return the file permission mode required + // - return octal!defaultFilePermissionMode; ... cant be used .. which is odd + // Error: variable defaultFilePermissionMode cannot be read at compile time + if (getValueLong("sync_file_permissions") != defaultFilePermissionMode) { + // return user configured permissions as octal integer + string valueToConvert = to!string(getValueLong("sync_file_permissions")); + auto convertedValue = parse!long(valueToConvert, 8); + configuredFilePermissionMode = to!int(convertedValue); } else { - throw new Exception("Missing config value: " ~ key); + // return default as octal integer + string valueToConvert = to!string(defaultFilePermissionMode); + auto convertedValue = parse!long(valueToConvert, 8); + configuredFilePermissionMode = to!int(convertedValue); } } - void setValueBool(string key, bool value) - { - boolValues[key] = value; - } - - void setValueString(string key, string value) - { - stringValues[key] = value; + // Read the configuredDirectoryPermissionMode and return + int returnRequiredDirectoryPermisions() { + if (configuredDirectoryPermissionMode == 0) { + // the configured value is zero, this means that directories would get + // values of d--------- + configureRequiredDirectoryPermisions(); + } + return configuredDirectoryPermissionMode; } - void setValueLong(string key, long value) - { - longValues[key] = value; + // Read the configuredFilePermissionMode and return + int returnRequiredFilePermisions() { + if (configuredFilePermissionMode == 0) { + // the configured value is zero + configureRequiredFilePermisions(); + } + return configuredFilePermissionMode; } - - // load a configuration file - private bool load(string filename) - { + + // Load a configuration file from the provided filename + private bool loadConfigFile(string filename) { // configure function variables try { + addLogEntry("Reading configuration file: " ~ filename); readText(filename); } catch (std.file.FileException e) { // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); + addLogEntry("ERROR: Unable to access " ~ e.msg); // Use exit scopes to shutdown API return false; } @@ -679,6 +717,22 @@ final class Config c.popFront(); // only accept "true" as true value. TODO Should we support other formats? setValueBool(key, c.front.dup == "true" ? true : false); + + // skip_dotfiles tracking for change + if (key == "skip_dotfiles") { + configFileSkipDotfiles = true; + } + + // skip_symlinks tracking for change + if (key == "skip_symlinks") { + configFileSkipSymbolicLinks = true; + } + + // sync_business_shared_items tracking for change + if (key == "sync_business_shared_items") { + configFileSyncBusinessSharedItems = true; + } + } else { auto pp = key in stringValues; if (pp) { @@ -688,7 +742,23 @@ final class Config // --syncdir ARG // --skip-file ARG // --skip-dir ARG - if (key == "sync_dir") configFileSyncDir = c.front.dup; + + // sync_dir + if (key == "sync_dir") { + // configure a temp variable + string tempSyncDirValue = c.front.dup; + // is this empty ? + if (!strip(tempSyncDirValue).empty) { + configFileSyncDir = tempSyncDirValue; + } else { + // sync_dir cannot be empty + addLogEntry("Invalid value for key in config file: " ~ key); + addLogEntry("ERROR: sync_dir in config file cannot be empty - this is a fatal error and must be corrected"); + exit(EXIT_FAILURE); + } + } + + // skip_file if (key == "skip_file") { // Handle multiple entries of skip_file if (configFileSkipFile.empty) { @@ -700,6 +770,8 @@ final class Config setValueString("skip_file", configFileSkipFile); } } + + // skip_dir if (key == "skip_dir") { // Handle multiple entries of skip_dir if (configFileSkipDir.empty) { @@ -711,6 +783,7 @@ final class Config setValueString("skip_dir", configFileSkipDir); } } + // --single-directory Strip quotation marks from path // This is an issue when using ONEDRIVE_SINGLE_DIRECTORY with Docker if (key == "single_directory") { @@ -718,184 +791,1621 @@ final class Config string configSingleDirectory = strip(to!string(c.front.dup), "\""); setValueString("single_directory", configSingleDirectory); } + // Azure AD Configuration if (key == "azure_ad_endpoint") { - string azureConfigValue = c.front.dup; + string azureConfigValue = strip(c.front.dup); switch(azureConfigValue) { case "": - log.log("Using config option for Global Azure AD Endpoints"); + addLogEntry("Using detault config option for Global Azure AD Endpoints"); break; case "USL4": - log.log("Using config option for Azure AD for US Government Endpoints"); + addLogEntry("Using config option for Azure AD for US Government Endpoints"); break; case "USL5": - log.log("Using config option for Azure AD for US Government Endpoints (DOD)"); + addLogEntry("Using config option for Azure AD for US Government Endpoints (DOD)"); break; case "DE": - log.log("Using config option for Azure AD Germany"); + addLogEntry("Using config option for Azure AD Germany"); break; case "CN": - log.log("Using config option for Azure AD China operated by 21Vianet"); + addLogEntry("Using config option for Azure AD China operated by 21Vianet"); break; // Default - all other entries default: - log.log("Unknown Azure AD Endpoint - using Global Azure AD Endpoints"); + addLogEntry("Unknown Azure AD Endpoint - using Global Azure AD Endpoints"); + } + } + + // Application ID + if (key == "application_id") { + // This key cannot be empty + string tempApplicationId = strip(c.front.dup); + if (tempApplicationId.empty) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + addLogEntry("application_id in config file cannot be empty - using default application_id", ["debug"]); + setValueString("application_id", defaultApplicationId); + } else { + setValueString("application_id", tempApplicationId); + } + } + + // Drive ID + if (key == "drive_id") { + // This key cannot be empty + string tempApplicationId = strip(c.front.dup); + if (tempApplicationId.empty) { + addLogEntry("Invalid value for key in config file: " ~ key); + addLogEntry("drive_id in config file cannot be empty - this is a fatal error and must be corrected by removing this entry from your config file", ["debug"]); + exit(EXIT_FAILURE); + } else { + setValueString("drive_id", tempApplicationId); + configFileDriveId = tempApplicationId; } } + + // Log Directory + if (key == "log_dir") { + // This key cannot be empty + string tempLogDir = strip(c.front.dup); + if (tempLogDir.empty) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + addLogEntry("log_dir in config file cannot be empty - using default log_dir", ["debug"]); + setValueString("log_dir", defaultLogFileDir); + } else { + setValueString("log_dir", tempLogDir); + } + } + } else { auto ppp = key in longValues; if (ppp) { c.popFront(); - setValueLong(key, to!long(c.front.dup)); - // if key is space_reservation we have to calculate MB -> bytes + ulong thisConfigValue; + + // Can this value actually be converted to an integer? + try { + thisConfigValue = to!long(c.front.dup); + } catch (std.conv.ConvException) { + addLogEntry("Invalid value for key in config file: " ~ key); + return false; + } + + setValueLong(key, thisConfigValue); + + // if key is 'monitor_interval' the value must be 300 or greater + if (key == "monitor_interval") { + // temp value + ulong tempValue = thisConfigValue; + // the temp value needs to be greater than 300 + if (tempValue < 300) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = 300; + } + setValueLong("monitor_interval", to!long(tempValue)); + } + + // if key is 'monitor_fullscan_frequency' the value must be 12 or greater + if (key == "monitor_fullscan_frequency") { + // temp value + ulong tempValue = thisConfigValue; + // the temp value needs to be greater than 12 + if (tempValue < 12) { + // If this is not set to zero (0) then we are not disabling 'monitor_fullscan_frequency' + if (tempValue != 0) { + // invalid value + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = 12; + } + } + setValueLong("monitor_fullscan_frequency", to!long(tempValue)); + } + + // if key is 'space_reservation' we have to calculate MB -> bytes if (key == "space_reservation") { // temp value - ulong tempValue = to!long(c.front.dup); + ulong tempValue = thisConfigValue; // a value of 0 needs to be made at least 1MB .. if (tempValue == 0) { + addLogEntry("Invalid value for key in config file - using 1MB: " ~ key); tempValue = 1; } setValueLong("space_reservation", to!long(tempValue * 2^^20)); } + + // if key is 'ip_protocol_version' this has to be a value of 0 or 1 or 2 .. nothing else + if (key == "ip_protocol_version") { + // temp value + ulong tempValue = thisConfigValue; + // If greater than 2, set to default + if (tempValue > 2) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + // Set to default of 0 + tempValue = 0; + } + setValueLong("ip_protocol_version", to!long(tempValue)); + } + } else { - log.log("Unknown key in config file: ", key); - return false; + // unknown key + addLogEntry("Unknown key in config file: " ~ key); + + // handle depreciation + bool ignore_depreciation = false; + + // min_notify_changes has been depreciated + if (key == "min_notify_changes") { + addLogEntry(); + addLogEntry("The option 'min_notify_changes' has been depreciated and will be ignored. Please read the updated documentation and update your client configuration."); + addLogEntry(); + ignore_depreciation = true; + } + + // force_http_2 has been depreciated + if (key == "force_http_2") { + addLogEntry(); + addLogEntry("The option 'force_http_2' has been depreciated and will be ignored. Please read the updated documentation and update your client configuration."); + addLogEntry(); + ignore_depreciation = true; + } + + // Application configuration update required for Business Shared Folders + if (key == "sync_business_shared_folders") { + addLogEntry(); + addLogEntry("The process for synchronising Microsoft OneDrive Business Shared Folders has changed."); + addLogEntry("Please review the revised documentation on how to configure this application feature. You must update your client configuration and make any necessary online adjustments accordingly."); + addLogEntry(); + } + // Return false + return ignore_depreciation; } } } } else { - log.log("Malformed config line: ", lineBuffer); + // malformed config line + addLogEntry("Malformed config line: " ~ lineBuffer); return false; } } + + // Close the file access + file.close(); + // Free object and memory + object.destroy(file); + object.destroy(range); + object.destroy(lineBuffer); return true; } + + // Update the application configuration based on CLI passed in parameters + void updateFromArgs(string[] cliArgs) { + // Add additional options that are NOT configurable via config file + stringValues["create_directory"] = ""; + stringValues["create_share_link"] = ""; + stringValues["destination_directory"] = ""; + stringValues["get_file_link"] = ""; + stringValues["modified_by"] = ""; + stringValues["sharepoint_library_name"] = ""; + stringValues["remove_directory"] = ""; + stringValues["single_directory"] = ""; + stringValues["source_directory"] = ""; + stringValues["auth_files"] = ""; + stringValues["auth_response"] = ""; + boolValues["display_config"] = false; + boolValues["display_sync_status"] = false; + boolValues["display_quota"] = false; + boolValues["print_token"] = false; + boolValues["logout"] = false; + boolValues["reauth"] = false; + boolValues["monitor"] = false; + boolValues["synchronize"] = false; + boolValues["force"] = false; + boolValues["list_business_shared_items"] = false; + boolValues["force_sync"] = false; + boolValues["with_editing_perms"] = false; + + // Specific options for CLI input handling + stringValues["sync_dir_cli"] = ""; + + // Application Startup option validation + try { + string tmpStr; + bool tmpBol; + long tmpVerb; + // duplicated from main.d to get full help output! + auto opt = getopt( - void configureRequiredDirectoryPermisions() { - // return the directory permission mode required - // - return octal!defaultDirectoryPermissionMode; ... cant be used .. which is odd - // Error: variable defaultDirectoryPermissionMode cannot be read at compile time - if (getValueLong("sync_dir_permissions") != defaultDirectoryPermissionMode) { - // return user configured permissions as octal integer - string valueToConvert = to!string(getValueLong("sync_dir_permissions")); - auto convertedValue = parse!long(valueToConvert, 8); - configuredDirectoryPermissionMode = to!int(convertedValue); - } else { - // return default as octal integer - string valueToConvert = to!string(defaultDirectoryPermissionMode); - auto convertedValue = parse!long(valueToConvert, 8); - configuredDirectoryPermissionMode = to!int(convertedValue); - } - } - - void configureRequiredFilePermisions() { - // return the file permission mode required - // - return octal!defaultFilePermissionMode; ... cant be used .. which is odd - // Error: variable defaultFilePermissionMode cannot be read at compile time - if (getValueLong("sync_file_permissions") != defaultFilePermissionMode) { - // return user configured permissions as octal integer - string valueToConvert = to!string(getValueLong("sync_file_permissions")); - auto convertedValue = parse!long(valueToConvert, 8); - configuredFilePermissionMode = to!int(convertedValue); - } else { - // return default as octal integer - string valueToConvert = to!string(defaultFilePermissionMode); - auto convertedValue = parse!long(valueToConvert, 8); - configuredFilePermissionMode = to!int(convertedValue); + cliArgs, + std.getopt.config.bundling, + std.getopt.config.caseSensitive, + "auth-files", + "Perform authentication not via interactive dialog but via files read/writes to these files.", + &stringValues["auth_files"], + "auth-response", + "Perform authentication not via interactive dialog but via providing the response url directly.", + &stringValues["auth_response"], + "check-for-nomount", + "Check for the presence of .nosync in the syncdir root. If found, do not perform sync.", + &boolValues["check_nomount"], + "check-for-nosync", + "Check for the presence of .nosync in each directory. If found, skip directory from sync.", + &boolValues["check_nosync"], + "classify-as-big-delete", + "Number of children in a path that is locally removed which will be classified as a 'big data delete'", + &longValues["classify_as_big_delete"], + "cleanup-local-files", + "Cleanup additional local files when using --download-only. This will remove local data.", + &boolValues["cleanup_local_files"], + "create-directory", + "Create a directory on OneDrive - no sync will be performed.", + &stringValues["create_directory"], + "create-share-link", + "Create a shareable link for an existing file on OneDrive", + &stringValues["create_share_link"], + "debug-https", + "Debug OneDrive HTTPS communication.", + &boolValues["debug_https"], + "destination-directory", + "Destination directory for renamed or move on OneDrive - no sync will be performed.", + &stringValues["destination_directory"], + "disable-notifications", + "Do not use desktop notifications in monitor mode.", + &boolValues["disable_notifications"], + "disable-download-validation", + "Disable download validation when downloading from OneDrive", + &boolValues["disable_download_validation"], + "disable-upload-validation", + "Disable upload validation when uploading to OneDrive", + &boolValues["disable_upload_validation"], + "display-config", + "Display what options the client will use as currently configured - no sync will be performed.", + &boolValues["display_config"], + "display-running-config", + "Display what options the client has been configured to use on application startup.", + &boolValues["display_running_config"], + "display-sync-status", + "Display the sync status of the client - no sync will be performed.", + &boolValues["display_sync_status"], + "display-quota", + "Display the quota status of the client - no sync will be performed.", + &boolValues["display_quota"], + "download-only", + "Replicate the OneDrive online state locally, by only downloading changes from OneDrive. Do not upload local changes to OneDrive.", + &boolValues["download_only"], + "dry-run", + "Perform a trial sync with no changes made", + &boolValues["dry_run"], + "enable-logging", + "Enable client activity to a separate log file", + &boolValues["enable_logging"], + "force-http-11", + "Force the use of HTTP 1.1 for all operations", + &boolValues["force_http_11"], + "force", + "Force the deletion of data when a 'big delete' is detected", + &boolValues["force"], + "force-sync", + "Force a synchronization of a specific folder, only when using --sync --single-directory and ignore all non-default skip_dir and skip_file rules", + &boolValues["force_sync"], + "get-file-link", + "Display the file link of a synced file", + &stringValues["get_file_link"], + "get-sharepoint-drive-id", + "Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library", + &stringValues["sharepoint_library_name"], + "get-O365-drive-id", + "Query and return the Office 365 Drive ID for a given Office 365 SharePoint Shared Library (DEPRECIATED)", + &stringValues["sharepoint_library_name"], + "local-first", + "Synchronize from the local directory source first, before downloading changes from OneDrive.", + &boolValues["local_first"], + "log-dir", + "Directory where logging output is saved to, needs to end with a slash.", + &stringValues["log_dir"], + "logout", + "Logout the current user", + &boolValues["logout"], + "modified-by", + "Display the last modified by details of a given path", + &stringValues["modified_by"], + "monitor|m", + "Keep monitoring for local and remote changes", + &boolValues["monitor"], + "monitor-interval", + "Number of seconds by which each sync operation is undertaken when idle under monitor mode.", + &longValues["monitor_interval"], + "monitor-fullscan-frequency", + "Number of sync runs before performing a full local scan of the synced directory", + &longValues["monitor_fullscan_frequency"], + "monitor-log-frequency", + "Frequency of logging in monitor mode", + &longValues["monitor_log_frequency"], + "no-remote-delete", + "Do not delete local file 'deletes' from OneDrive when using --upload-only", + &boolValues["no_remote_delete"], + "print-access-token", + "Print the access token, useful for debugging", + &boolValues["print_token"], + "reauth", + "Reauthenticate the client with OneDrive", + &boolValues["reauth"], + "resync", + "Forget the last saved state, perform a full sync", + &boolValues["resync"], + "resync-auth", + "Approve the use of performing a --resync action", + &boolValues["resync_auth"], + "remove-directory", + "Remove a directory on OneDrive - no sync will be performed.", + &stringValues["remove_directory"], + "remove-source-files", + "Remove source file after successful transfer to OneDrive when using --upload-only", + &boolValues["remove_source_files"], + "single-directory", + "Specify a single local directory within the OneDrive root to sync.", + &stringValues["single_directory"], + "skip-dot-files", + "Skip dot files and folders from syncing", + &boolValues["skip_dotfiles"], + "skip-file", + "Skip any files that match this pattern from syncing", + &stringValues["skip_file"], + "skip-dir", + "Skip any directories that match this pattern from syncing", + &stringValues["skip_dir"], + "skip-size", + "Skip new files larger than this size (in MB)", + &longValues["skip_size"], + "skip-dir-strict-match", + "When matching skip_dir directories, only match explicit matches", + &boolValues["skip_dir_strict_match"], + "skip-symlinks", + "Skip syncing of symlinks", + &boolValues["skip_symlinks"], + "source-directory", + "Source directory to rename or move on OneDrive - no sync will be performed.", + &stringValues["source_directory"], + "space-reservation", + "The amount of disk space to reserve (in MB) to avoid 100% disk space utilisation", + &longValues["space_reservation"], + "syncdir", + "Specify the local directory used for synchronisation to OneDrive", + &stringValues["sync_dir_cli"], + "sync|s", + "Perform a synchronisation with Microsoft OneDrive", + &boolValues["synchronize"], + "synchronize", + "Perform a synchronisation with Microsoft OneDrive (DEPRECIATED)", + &boolValues["synchronize"], + "sync-root-files", + "Sync all files in sync_dir root when using sync_list.", + &boolValues["sync_root_files"], + "upload-only", + "Replicate the locally configured sync_dir state to OneDrive, by only uploading local changes to OneDrive. Do not download changes from OneDrive.", + &boolValues["upload_only"], + "confdir", + "Set the directory used to store the configuration files", + &tmpStr, + "verbose|v+", + "Print more details, useful for debugging (repeat for extra debugging)", + &tmpVerb, + "version", + "Print the version and exit", + &tmpBol, + "with-editing-perms", + "Create a read-write shareable link for an existing file on OneDrive when used with --create-share-link ", + &boolValues["with_editing_perms"] + ); + + // Was --syncdir used? + if (!getValueString("sync_dir_cli").empty) { + // Build the line we need to update and/or write out + string newConfigOptionSyncDirLine = "sync_dir = \"" ~ getValueString("sync_dir_cli") ~ "\""; + + // Does a 'config' file exist? + if (!exists(applicableConfigFilePath)) { + // No existing 'config' file exists, create it, and write the 'sync_dir' configuration to it + if (!getValueBool("dry_run")) { + std.file.write(applicableConfigFilePath, newConfigOptionSyncDirLine); + // Config file should only be readable by the user who created it - 0600 permissions needed + applicableConfigFilePath.setAttributes(convertedPermissionValue); + } + } else { + // an existing config file exists .. so this now becomes tricky + // string replace 'sync_dir' if it exists, in the existing 'config' file, but only if 'sync_dir' (already read in) is different from 'sync_dir_cli' + if ( (getValueString("sync_dir")) != (getValueString("sync_dir_cli")) ) { + // values are different + File applicableConfigFilePathFileHandle = File(applicableConfigFilePath, "r"); + string lineBuffer; + string[] newConfigFileEntries; + + // read applicableConfigFilePath line by line + auto range = applicableConfigFilePathFileHandle.byLine(); + + // for each 'config' file line + foreach (line; range) { + lineBuffer = stripLeft(line).to!string; + if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') { + newConfigFileEntries ~= [lineBuffer]; + } else { + auto c = lineBuffer.matchFirst(configRegex); + if (!c.empty) { + c.popFront(); // skip the whole match + string key = c.front.dup; + if (key == "sync_dir") { + // lineBuffer is the line we want to keep + newConfigFileEntries ~= [newConfigOptionSyncDirLine]; + } else { + newConfigFileEntries ~= [lineBuffer]; + } + } + } + } + + // close original 'config' file if still open + if (applicableConfigFilePathFileHandle.isOpen()) { + // close open file + applicableConfigFilePathFileHandle.close(); + } + + // free memory from file open + object.destroy(applicableConfigFilePathFileHandle); + + // Update the existing item in the file line array + if (!getValueBool("dry_run")) { + // Open the file with write access using 'w' mode to overwrite existing content + File applicableConfigFilePathFileHandleWrite = File(applicableConfigFilePath, "w"); + + // Write each line from the 'newConfigFileEntries' array to the file + foreach (line; newConfigFileEntries) { + applicableConfigFilePathFileHandleWrite.writeln(line); + } + + // Flush and close the file handle to ensure all data is written + if (applicableConfigFilePathFileHandleWrite.isOpen()) { + applicableConfigFilePathFileHandleWrite.flush(); + applicableConfigFilePathFileHandleWrite.close(); + } + + // free memory from file open + object.destroy(applicableConfigFilePathFileHandleWrite); + } + } + } + + // Final - configure sync_dir with the value of sync_dir_cli so that it can be used as part of the application configuration and detect change + setValueString("sync_dir", getValueString("sync_dir_cli")); + } + + // Was --auth-files used? + if (!getValueString("auth_files").empty) { + // --auth-files used, need to validate that '~' was not used as a path identifier, and if yes, perform the correct expansion + string[] tempAuthFiles = getValueString("auth_files").split(":"); + string tempAuthUrl = tempAuthFiles[0]; + string tempResponseUrl = tempAuthFiles[1]; + string newAuthFilesString; + + // shell expansion if required + if (!shellEnvironmentSet){ + // No shell environment is set, no automatic expansion of '~' if present is possible + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempAuthUrl, "~")) { + // A ~ was found in auth_files(authURL) + addLogEntry("auth_files: A '~' was found in 'auth_files(authURL)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + tempAuthUrl = buildNormalizedPath(buildPath(defaultHomePath, strip(tempAuthUrl, "~"))); + } + + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempResponseUrl, "~")) { + // A ~ was found in auth_files(authURL) + addLogEntry("auth_files: A '~' was found in 'auth_files(tempResponseUrl)', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + tempResponseUrl = buildNormalizedPath(buildPath(defaultHomePath, strip(tempResponseUrl, "~"))); + } + } else { + // Shell environment is set, automatic expansion of '~' if present is possible + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempAuthUrl, "~")) { + // A ~ was found in auth_files(authURL) + addLogEntry("auth_files: A '~' was found in the configured 'auth_files(authURL)', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + tempAuthUrl = expandTilde(tempAuthUrl); + } + + // Does the 'currently configured' tempAuthUrl include a ~ + if (canFind(tempResponseUrl, "~")) { + // A ~ was found in auth_files(authURL) + addLogEntry("auth_files: A '~' was found in the configured 'auth_files(tempResponseUrl)', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + tempResponseUrl = expandTilde(tempResponseUrl); + } + } + + // Build new string + newAuthFilesString = tempAuthUrl ~ ":" ~ tempResponseUrl; + addLogEntry("auth_files - updated value: " ~ newAuthFilesString, ["debug"]); + setValueString("auth_files", newAuthFilesString); + } + + if (opt.helpWanted) { + outputLongHelp(opt.options); + exit(EXIT_SUCCESS); + } + } catch (GetOptException e) { + // getOpt error - must use writeln() here + writeln(e.msg); + writeln("Try 'onedrive -h' for more information"); + exit(EXIT_FAILURE); + } catch (Exception e) { + // general error - must use writeln() here + writeln(e.msg); + writeln("Try 'onedrive -h' for more information"); + exit(EXIT_FAILURE); } } + + // Check the arguments passed in for any that will be depreciated + void checkDepreciatedOptions(string[] cliArgs) { + + bool depreciatedCommandsFound = false; + + foreach (cliArg; cliArgs) { + // Check each CLI arg for items that have been depreciated + + // --synchronize depreciated in v2.5.0, will be removed in future version + if (cliArg == "--synchronize") { + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("DEPRECIATION WARNING: --synchronize has been depreciated in favour of --sync or -s"); + depreciatedCommandsFound = true; + } + + // --get-O365-drive-id depreciated in v2.5.0, will be removed in future version + if (cliArg == "--get-O365-drive-id") { + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("DEPRECIATION WARNING: --get-O365-drive-id has been depreciated in favour of --get-sharepoint-drive-id"); + depreciatedCommandsFound = true; + } + } + + if (depreciatedCommandsFound) { + addLogEntry("DEPRECIATION WARNING: Depreciated commands will be removed in a future release."); + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + } + } + + // Display the applicable application configuration + void displayApplicationConfiguration() { + if (getValueBool("display_running_config")) { + addLogEntry("--------------- Application Runtime Configuration ---------------"); + } + + // Display application version + addLogEntry("onedrive version = " ~ applicationVersion); + + // Display all of the pertinent configuration options + addLogEntry("Config path = " ~ configDirName); + // Does a config file exist or are we using application defaults + addLogEntry("Config file found in config path = " ~ to!string(exists(applicableConfigFilePath))); + + // Is config option drive_id configured? + addLogEntry("Config option 'drive_id' = " ~ getValueString("drive_id")); + + // Config Options as per 'config' file + addLogEntry("Config option 'sync_dir' = " ~ getValueString("sync_dir")); + + // logging and notifications + addLogEntry("Config option 'enable_logging' = " ~ to!string(getValueBool("enable_logging"))); + addLogEntry("Config option 'log_dir' = " ~ getValueString("log_dir")); + addLogEntry("Config option 'disable_notifications' = " ~ to!string(getValueBool("disable_notifications"))); + + // skip files and directory and 'matching' policy + addLogEntry("Config option 'skip_dir' = " ~ getValueString("skip_dir")); + addLogEntry("Config option 'skip_dir_strict_match' = " ~ to!string(getValueBool("skip_dir_strict_match"))); + addLogEntry("Config option 'skip_file' = " ~ getValueString("skip_file")); + addLogEntry("Config option 'skip_dotfiles' = " ~ to!string(getValueBool("skip_dotfiles"))); + addLogEntry("Config option 'skip_symlinks' = " ~ to!string(getValueBool("skip_symlinks"))); + + // --monitor sync process options + addLogEntry("Config option 'monitor_interval' = " ~ to!string(getValueLong("monitor_interval"))); + addLogEntry("Config option 'monitor_log_frequency' = " ~ to!string(getValueLong("monitor_log_frequency"))); + addLogEntry("Config option 'monitor_fullscan_frequency' = " ~ to!string(getValueLong("monitor_fullscan_frequency"))); + + // sync process and method + addLogEntry("Config option 'read_only_auth_scope' = " ~ to!string(getValueBool("read_only_auth_scope"))); + addLogEntry("Config option 'dry_run' = " ~ to!string(getValueBool("dry_run"))); + addLogEntry("Config option 'upload_only' = " ~ to!string(getValueBool("upload_only"))); + addLogEntry("Config option 'download_only' = " ~ to!string(getValueBool("download_only"))); + addLogEntry("Config option 'local_first' = " ~ to!string(getValueBool("local_first"))); + addLogEntry("Config option 'check_nosync' = " ~ to!string(getValueBool("check_nosync"))); + addLogEntry("Config option 'check_nomount' = " ~ to!string(getValueBool("check_nomount"))); + addLogEntry("Config option 'resync' = " ~ to!string(getValueBool("resync"))); + addLogEntry("Config option 'resync_auth' = " ~ to!string(getValueBool("resync_auth"))); + addLogEntry("Config option 'cleanup_local_files' = " ~ to!string(getValueBool("cleanup_local_files"))); - int returnRequiredDirectoryPermisions() { - // read the configuredDirectoryPermissionMode and return - if (configuredDirectoryPermissionMode == 0) { - // the configured value is zero, this means that directories would get - // values of d--------- - configureRequiredDirectoryPermisions(); + // data integrity + addLogEntry("Config option 'classify_as_big_delete' = " ~ to!string(getValueLong("classify_as_big_delete"))); + addLogEntry("Config option 'disable_upload_validation' = " ~ to!string(getValueBool("disable_upload_validation"))); + addLogEntry("Config option 'disable_download_validation' = " ~ to!string(getValueBool("disable_download_validation"))); + addLogEntry("Config option 'bypass_data_preservation' = " ~ to!string(getValueBool("bypass_data_preservation"))); + addLogEntry("Config option 'no_remote_delete' = " ~ to!string(getValueBool("no_remote_delete"))); + addLogEntry("Config option 'remove_source_files' = " ~ to!string(getValueBool("remove_source_files"))); + addLogEntry("Config option 'sync_dir_permissions' = " ~ to!string(getValueLong("sync_dir_permissions"))); + addLogEntry("Config option 'sync_file_permissions' = " ~ to!string(getValueLong("sync_file_permissions"))); + addLogEntry("Config option 'space_reservation' = " ~ to!string(getValueLong("space_reservation"))); + + // curl operations + addLogEntry("Config option 'application_id' = " ~ getValueString("application_id")); + addLogEntry("Config option 'azure_ad_endpoint' = " ~ getValueString("azure_ad_endpoint")); + addLogEntry("Config option 'azure_tenant_id' = " ~ getValueString("azure_tenant_id")); + addLogEntry("Config option 'user_agent' = " ~ getValueString("user_agent")); + addLogEntry("Config option 'force_http_11' = " ~ to!string(getValueBool("force_http_11"))); + addLogEntry("Config option 'debug_https' = " ~ to!string(getValueBool("debug_https"))); + addLogEntry("Config option 'rate_limit' = " ~ to!string(getValueLong("rate_limit"))); + addLogEntry("Config option 'operation_timeout' = " ~ to!string(getValueLong("operation_timeout"))); + addLogEntry("Config option 'dns_timeout' = " ~ to!string(getValueLong("dns_timeout"))); + addLogEntry("Config option 'connect_timeout' = " ~ to!string(getValueLong("connect_timeout"))); + addLogEntry("Config option 'data_timeout' = " ~ to!string(getValueLong("data_timeout"))); + addLogEntry("Config option 'ip_protocol_version' = " ~ to!string(getValueLong("ip_protocol_version"))); + + // Is sync_list configured ? + if (exists(syncListFilePath)){ + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Selective sync 'sync_list' configured = true"); + addLogEntry("sync_list config option 'sync_root_files' = " ~ to!string(getValueBool("sync_root_files"))); + addLogEntry("sync_list contents:"); + // Output the sync_list contents + auto syncListFile = File(syncListFilePath, "r"); + auto range = syncListFile.byLine(); + foreach (line; range) + { + addLogEntry(to!string(line)); + } + } else { + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Selective sync 'sync_list' configured = false"); } - return configuredDirectoryPermissionMode; + + // Is sync_business_shared_items enabled and configured ? + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Config option 'sync_business_shared_items' = " ~ to!string(getValueBool("sync_business_shared_items"))); + + if (exists(businessSharedItemsFilePath)){ + addLogEntry("Selective Business Shared Items configured = true"); + addLogEntry("sync_business_shared_items contents:"); + // Output the sync_business_shared_items contents + auto businessSharedItemsFileList = File(businessSharedItemsFilePath, "r"); + auto range = businessSharedItemsFileList.byLine(); + foreach (line; range) + { + addLogEntry(to!string(line)); + } + } else { + addLogEntry("Selective Business Shared Items configured = false"); + } + + // Are webhooks enabled? + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("Config option 'webhook_enabled' = " ~ to!string(getValueBool("webhook_enabled"))); + if (getValueBool("webhook_enabled")) { + addLogEntry("Config option 'webhook_public_url' = " ~ getValueString("webhook_public_url")); + addLogEntry("Config option 'webhook_listening_host' = " ~ getValueString("webhook_listening_host")); + addLogEntry("Config option 'webhook_listening_port' = " ~ to!string(getValueLong("webhook_listening_port"))); + addLogEntry("Config option 'webhook_expiration_interval' = " ~ to!string(getValueLong("webhook_expiration_interval"))); + addLogEntry("Config option 'webhook_renewal_interval' = " ~ to!string(getValueLong("webhook_renewal_interval"))); + addLogEntry("Config option 'webhook_retry_interval' = " ~ to!string(getValueLong("webhook_retry_interval"))); + } + + if (getValueBool("display_running_config")) { + addLogEntry("-----------------------------------------------------------------"); + } + } + + // Prompt the user to accept the risk of using --resync + bool displayResyncRiskForAcceptance() { + // what is the user risk acceptance? + bool userRiskAcceptance = false; + + // Did the user use --resync-auth or 'resync_auth' in the config file to negate presenting this message? + if (!getValueBool("resync_auth")) { + // need to prompt user + char response; + + // --resync warning message + addLogEntry("", ["consoleOnly"]); // new line, console only + addLogEntry("The usage of --resync will delete your local 'onedrive' client state, thus no record of your current 'sync status' will exist.", ["consoleOnly"]); + addLogEntry("This has the potential to overwrite local versions of files with perhaps older versions of documents downloaded from OneDrive, resulting in local data loss.", ["consoleOnly"]); + addLogEntry("If in doubt, backup your local data before using --resync", ["consoleOnly"]); + addLogEntry("", ["consoleOnly"]); // new line, console only + addLogEntry("Are you sure you wish to proceed with --resync? [Y/N] ", ["consoleOnlyNoNewLine"]); + + try { + // Attempt to read user response + string input = readln().strip; + if (input.length > 0) { + response = std.ascii.toUpper(input[0]); + } + } catch (std.format.FormatException e) { + userRiskAcceptance = false; + // Caught an error + return EXIT_FAILURE; + } + + // What did the user enter? + addLogEntry("--resync warning User Response Entered: " ~ to!string(response), ["debug"]); + + // Evaluate user repsonse + if ((to!string(response) == "y") || (to!string(response) == "Y")) { + // User has accepted --resync risk to proceed + userRiskAcceptance = true; + // Are you sure you wish .. does not use writeln(); + write("\n"); + } + } else { + // resync_auth is true + userRiskAcceptance = true; + } + + // Return the --resync acceptance or not + return userRiskAcceptance; + } + + // Prompt the user to accept the risk of using --force-sync + bool displayForceSyncRiskForAcceptance() { + // what is the user risk acceptance? + bool userRiskAcceptance = false; + + // need to prompt user + char response; + + // --force-sync warning message + addLogEntry("", ["consoleOnly"]); // new line, console only + addLogEntry("The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts.", ["consoleOnly"]); + addLogEntry("By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync.", ["consoleOnly"]); + addLogEntry("", ["consoleOnly"]); // new line, console only + addLogEntry("Are you sure you wish to proceed with --force-sync [Y/N] ", ["consoleOnlyNoNewLine"]); + + try { + // Attempt to read user response + string input = readln().strip; + if (input.length > 0) { + response = std.ascii.toUpper(input[0]); + } + } catch (std.format.FormatException e) { + userRiskAcceptance = false; + // Caught an error + return EXIT_FAILURE; + } + + // What did the user enter? + addLogEntry("--force-sync warning User Response Entered: " ~ to!string(response), ["debug"]); + + // Evaluate user repsonse + if ((to!string(response) == "y") || (to!string(response) == "Y")) { + // User has accepted --force-sync risk to proceed + userRiskAcceptance = true; + // Are you sure you wish .. does not use writeln(); + write("\n"); + } + + // Return the --resync acceptance or not + return userRiskAcceptance; } + + // Check the application configuration for any changes that need to trigger a --resync + // This function is only called if --resync is not present + bool applicationChangeWhereResyncRequired() { + // Default is that no resync is required + bool resyncRequired = false; + + // Configuration File Flags + bool configFileOptionsDifferent = false; + bool syncListFileDifferent = false; + bool syncDirDifferent = false; + bool skipFileDifferent = false; + bool skipDirDifferent = false; + bool skipDotFilesDifferent = false; + bool skipSymbolicLinksDifferent = false; + bool driveIdDifferent = false; + bool syncBusinessSharedItemsDifferent = false; + bool businessSharedItemsFileDifferent = false; + + // Create the required initial hash files + createRequiredInitialConfigurationHashFiles(); + + // Read in the existing hash file values + readExistingConfigurationHashFiles(); + + // Was the 'sync_list' file updated? + if (currentSyncListHash != previousSyncListHash) { + // Debugging output to assist what changed + addLogEntry("sync_list file has been updated, --resync needed", ["debug"]); + syncListFileDifferent = true; + } + + // Was the 'business_shared_items' file updated? + if (currentBusinessSharedItemsHash != previousBusinessSharedItemsHash) { + // Debugging output to assist what changed + addLogEntry("business_shared_folders file has been updated, --resync needed", ["debug"]); + businessSharedItemsFileDifferent = true; + } + + // Was the 'config' file updated between last execution and this execution? + if (currentConfigHash != previousConfigHash) { + // config file was updated, however we only want to trigger a --resync requirement if sync_dir, skip_dir, skip_file or drive_id was modified + addLogEntry("Application configuration file has been updated, checking if --resync needed"); + addLogEntry("Using this configBackupFile: " ~ configBackupFile, ["debug"]); + + if (exists(configBackupFile)) { + // check backup config what has changed for these configuration options if anything + // # drive_id = "" + // # sync_dir = "~/OneDrive" + // # skip_file = "~*|.~*|*.tmp|*.swp|*.partial" + // # skip_dir = "" + // # skip_dotfiles = "" + // # skip_symlinks = "" + // # sync_business_shared_items = "" + string[string] backupConfigStringValues; + backupConfigStringValues["drive_id"] = ""; + backupConfigStringValues["sync_dir"] = ""; + backupConfigStringValues["skip_file"] = ""; + backupConfigStringValues["skip_dir"] = ""; + backupConfigStringValues["skip_dotfiles"] = ""; + backupConfigStringValues["skip_symlinks"] = ""; + backupConfigStringValues["sync_business_shared_items"] = ""; + + // bool flags to trigger if the entries that trigger a --resync were found in the backup config file + // if these were not in the backup file, they may have been added ... thus new, thus we need to double check the existing + // config file to see if this was a newly added config option + bool drive_id_present = false; + bool sync_dir_present = false; + bool skip_file_present = false; + bool skip_dir_present = false; + bool skip_dotfiles_present = false; + bool skip_symlinks_present = false; + bool sync_business_shared_items_present = false; + + // Common debug message if an element is different + string configOptionModifiedMessage = " was modified since the last time the application was successfully run, --resync required"; + + auto configBackupFileHandle = File(configBackupFile, "r"); + string lineBuffer; + + // read configBackupFile line by line + auto range = configBackupFileHandle.byLine(); + // for each line + foreach (line; range) { + addLogEntry("Backup Config Line: " ~ lineBuffer, ["debug"]); + + lineBuffer = stripLeft(line).to!string; + if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; + auto c = lineBuffer.matchFirst(configRegex); + if (!c.empty) { + c.popFront(); // skip the whole match + string key = c.front.dup; + addLogEntry("Backup Config Key: " ~ key, ["debug"]); + + auto p = key in backupConfigStringValues; + if (p) { + c.popFront(); + // compare this key + if (key == "drive_id") { + drive_id_present = true; + if (c.front.dup != getValueString("drive_id")) { + addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); + configFileOptionsDifferent = true; + } + } + + if (key == "sync_dir") { + sync_dir_present = true; + if (c.front.dup != getValueString("sync_dir")) { + addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); + configFileOptionsDifferent = true; + } + } + + if (key == "skip_file") { + skip_file_present = true; + string computedBackupSkipFile = defaultSkipFile ~ "|" ~ to!string(c.front.dup); + if (computedBackupSkipFile != getValueString("skip_file")) { + addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); + configFileOptionsDifferent = true; + } + } + + if (key == "skip_dir") { + skip_dir_present = true; + if (c.front.dup != getValueString("skip_dir")) { + addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); + configFileOptionsDifferent = true; + } + } + + if (key == "skip_dotfiles") { + skip_dotfiles_present = true; + if (c.front.dup != to!string(getValueBool("skip_dotfiles"))) { + addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); + configFileOptionsDifferent = true; + } + } + + if (key == "skip_symlinks") { + skip_symlinks_present = true; + if (c.front.dup != to!string(getValueBool("skip_symlinks"))) { + addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); + configFileOptionsDifferent = true; + } + } + + if (key == "sync_business_shared_items") { + sync_business_shared_items_present = true; + if (c.front.dup != to!string(getValueBool("sync_business_shared_items"))) { + addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); + configFileOptionsDifferent = true; + } + } + } + } + } + + // close file if open + if (configBackupFileHandle.isOpen()) { + // close open file + configBackupFileHandle.close(); + } + + // Were any of the items that trigger a --resync not in the existing backup 'config' file .. thus newly added? + if ((!drive_id_present) || (!sync_dir_present) || (! skip_file_present) || (!skip_dir_present) || (!skip_dotfiles_present) || (!skip_symlinks_present)) { + addLogEntry("drive_id present in config backup: " ~ drive_id_present, ["debug"]); + addLogEntry("sync_dir present in config backup: " ~ sync_dir_present, ["debug"]); + addLogEntry("skip_file present in config backup: " ~ skip_file_present, ["debug"]); + addLogEntry("skip_dir present in config backup: " ~ skip_dir_present, ["debug"]); + addLogEntry("skip_dotfiles present in config backup: " ~ skip_dotfiles_present, ["debug"]); + addLogEntry("skip_symlinks present in config backup: " ~ skip_symlinks_present, ["debug"]); + addLogEntry("sync_business_shared_items present in config backup: " ~ sync_business_shared_items_present, ["debug"]); + + if ((!drive_id_present) && (configFileDriveId != "")) { + addLogEntry("drive_id newly added ... --resync needed"); + configFileOptionsDifferent = true; + driveIdDifferent = true; + } + + if ((!sync_dir_present) && (configFileSyncDir != defaultSyncDir)) { + addLogEntry("sync_dir newly added ... --resync needed"); + configFileOptionsDifferent = true; + syncDirDifferent = true; + } + + if ((!skip_file_present) && (configFileSkipFile != defaultSkipFile)) { + addLogEntry("skip_file newly added ... --resync needed"); + configFileOptionsDifferent = true; + skipFileDifferent = true; + } + + if ((!skip_dir_present) && (configFileSkipDir != "")) { + addLogEntry("skip_dir newly added ... --resync needed"); + configFileOptionsDifferent = true; + skipFileDifferent = true; + } + + if ((!skip_dotfiles_present) && (configFileSkipDotfiles)) { + addLogEntry("skip_dotfiles newly added ... --resync needed"); + configFileOptionsDifferent = true; + skipDotFilesDifferent = true; + } + + if ((!skip_symlinks_present) && (configFileSkipSymbolicLinks)) { + addLogEntry("skip_symlinks newly added ... --resync needed"); + configFileOptionsDifferent = true; + skipSymbolicLinksDifferent = true; + } + + if ((!sync_business_shared_items_present) && (configFileSyncBusinessSharedItems)) { + addLogEntry("sync_business_shared_items newly added ... --resync needed"); + configFileOptionsDifferent = true; + syncBusinessSharedItemsDifferent = true; + } + } + + object.destroy(configBackupFileHandle); + object.destroy(range); + object.destroy(lineBuffer); + + } else { + // no backup to check + addLogEntry("WARNING: no backup config file was found, unable to validate if any changes made"); + } + } + + // config file set options can be changed via CLI input, specifically these will impact sync and a --resync will be needed: + // --syncdir ARG + // --skip-file ARG + // --skip-dir ARG + // --skip-dot-files + // --skip-symlinks + + if (exists(applicableConfigFilePath)) { + // config file exists + // was the sync_dir updated by CLI? + if (configFileSyncDir != "") { + // sync_dir was set in config file + if (configFileSyncDir != getValueString("sync_dir")) { + // config file was set and CLI input changed this + + // Is this potentially running as a Docker container? + if (entrypointExists) { + // entrypoint.sh exists + addLogEntry("sync_dir: CLI override of config file option, however entrypoint.sh exists, thus most likely first run of Docker container", ["debug"]); + } else { + // entrypoint.sh does not exist + addLogEntry("sync_dir: CLI override of config file option, --resync needed", ["debug"]); + syncDirDifferent = true; + } + } + } + + // was the skip_file updated by CLI? + if (configFileSkipFile != "") { + // skip_file was set in config file + if (configFileSkipFile != getValueString("skip_file")) { + // config file was set and CLI input changed this + addLogEntry("skip_file: CLI override of config file option, --resync needed", ["debug"]); + skipFileDifferent = true; + } + } - int returnRequiredFilePermisions() { - // read the configuredFilePermissionMode and return - if (configuredFilePermissionMode == 0) { - // the configured value is zero + // was the skip_dir updated by CLI? + if (configFileSkipDir != "") { + // skip_dir was set in config file + if (configFileSkipDir != getValueString("skip_dir")) { + // config file was set and CLI input changed this + addLogEntry("skip_dir: CLI override of config file option, --resync needed", ["debug"]); + skipDirDifferent = true; + } + } + + // was skip_dotfiles updated by --skip-dot-files ? + if (!configFileSkipDotfiles) { + // was not set in config file + if (getValueBool("skip_dotfiles")) { + // --skip-dot-files passed in + addLogEntry("skip_dotfiles: CLI override of config file option, --resync needed", ["debug"]); + skipDotFilesDifferent = true; + } + } + + // was skip_symlinks updated by --skip-symlinks ? + if (!configFileSkipSymbolicLinks) { + // was not set in config file + if (getValueBool("skip_symlinks")) { + // --skip-symlinks passed in + addLogEntry("skip_symlinks: CLI override of config file option, --resync needed", ["debug"]); + skipSymbolicLinksDifferent = true; + } + } + } + + // Did any of the config files or CLI options trigger a --resync requirement? + addLogEntry("configFileOptionsDifferent: " ~ to!string(configFileOptionsDifferent), ["debug"]); + + // Options + addLogEntry("driveIdDifferent: " ~ to!string(driveIdDifferent), ["debug"]); + addLogEntry("syncDirDifferent: " ~ to!string(syncDirDifferent), ["debug"]); + addLogEntry("skipFileDifferent: " ~ to!string(skipFileDifferent), ["debug"]); + addLogEntry("skipDirDifferent: " ~ to!string(skipDirDifferent), ["debug"]); + addLogEntry("skipDotFilesDifferent: " ~ to!string(skipDotFilesDifferent), ["debug"]); + addLogEntry("skipSymbolicLinksDifferent: " ~ to!string(skipSymbolicLinksDifferent), ["debug"]); + addLogEntry("syncBusinessSharedItemsDifferent: " ~ to!string(syncBusinessSharedItemsDifferent), ["debug"]); + + // Files with change + addLogEntry("syncListFileDifferent: " ~ to!string(syncListFileDifferent), ["debug"]); + addLogEntry("businessSharedItemsFileDifferent: " ~ to!string(businessSharedItemsFileDifferent), ["debug"]); + + if ((configFileOptionsDifferent) || (syncListFileDifferent) || (businessSharedItemsFileDifferent) || (syncDirDifferent) || (skipFileDifferent) || (skipDirDifferent) || (driveIdDifferent) || (skipDotFilesDifferent) || (skipSymbolicLinksDifferent) || (syncBusinessSharedItemsDifferent) ) { + // set the flag + resyncRequired = true; + } + return resyncRequired; + } + + // Cleanup hash files that require to be cleaned up when a --resync is issued + void cleanupHashFilesDueToResync() { + if (!getValueBool("dry_run")) { + // cleanup hash files + addLogEntry("Cleaning up configuration hash files", ["debug"]); + safeRemove(configHashFile); + safeRemove(syncListHashFile); + safeRemove(businessSharedItemsHashFile); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not removing hash files as --dry-run has been used"); + } + } + + // For each of the config files, update the hash data in the hash files + void updateHashContentsForConfigFiles() { + // Are we in a --dry-run scenario? + if (!getValueBool("dry_run")) { + // Not a dry-run scenario, update the applicable files + // Update applicable 'config' files + if (exists(applicableConfigFilePath)) { + // Update the hash of the applicable config file + addLogEntry("Updating applicable config file hash", ["debug"]); + std.file.write(configHashFile, computeQuickXorHash(applicableConfigFilePath)); + // Hash file should only be readable by the user who created it - 0600 permissions needed + configHashFile.setAttributes(convertedPermissionValue); + } + // Update 'sync_list' files + if (exists(syncListFilePath)) { + // update sync_list hash + addLogEntry("Updating sync_list hash", ["debug"]); + std.file.write(syncListHashFile, computeQuickXorHash(syncListFilePath)); + // Hash file should only be readable by the user who created it - 0600 permissions needed + syncListHashFile.setAttributes(convertedPermissionValue); + } + + + // Update 'update business_shared_items' files + if (exists(businessSharedItemsFilePath)) { + // update business_shared_folders hash + addLogEntry("Updating business_shared_items hash", ["debug"]); + std.file.write(businessSharedItemsHashFile, computeQuickXorHash(businessSharedItemsFilePath)); + // Hash file should only be readable by the user who created it - 0600 permissions needed + businessSharedItemsHashFile.setAttributes(convertedPermissionValue); + } + + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not updating hash files as --dry-run has been used"); + } + } + + // Create any required hash files for files that help us determine if the configuration has changed since last run + void createRequiredInitialConfigurationHashFiles() { + // Does a 'config' file exist with a valid hash file + if (exists(applicableConfigFilePath)) { + if (!exists(configHashFile)) { + // no existing hash file exists + std.file.write(configHashFile, "initial-hash"); + // Hash file should only be readable by the user who created it - 0600 permissions needed + configHashFile.setAttributes(convertedPermissionValue); + } + // Generate the runtime hash for the 'config' file + currentConfigHash = computeQuickXorHash(applicableConfigFilePath); + } + + // Does a 'sync_list' file exist with a valid hash file + if (exists(syncListFilePath)) { + if (!exists(syncListHashFile)) { + // no existing hash file exists + std.file.write(syncListHashFile, "initial-hash"); + // Hash file should only be readable by the user who created it - 0600 permissions needed + syncListHashFile.setAttributes(convertedPermissionValue); + } + // Generate the runtime hash for the 'sync_list' file + currentSyncListHash = computeQuickXorHash(syncListFilePath); + } + + // Does a 'business_shared_items' file exist with a valid hash file + if (exists(businessSharedItemsFilePath)) { + if (!exists(businessSharedItemsHashFile)) { + // no existing hash file exists + std.file.write(businessSharedItemsHashFile, "initial-hash"); + // Hash file should only be readable by the user who created it - 0600 permissions needed + businessSharedItemsHashFile.setAttributes(convertedPermissionValue); + } + // Generate the runtime hash for the 'sync_list' file + currentBusinessSharedItemsHash = computeQuickXorHash(businessSharedItemsFilePath); + } + } + + // Read in the text values of the previous configurations + int readExistingConfigurationHashFiles() { + if (exists(configHashFile)) { + try { + previousConfigHash = readText(configHashFile); + } catch (std.file.FileException e) { + // Unable to access required hash file + addLogEntry("ERROR: Unable to access " ~ e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } + + if (exists(syncListHashFile)) { + try { + previousSyncListHash = readText(syncListHashFile); + } catch (std.file.FileException e) { + // Unable to access required hash file + addLogEntry("ERROR: Unable to access " ~ e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } + if (exists(businessSharedItemsHashFile)) { + try { + previousBusinessSharedItemsHash = readText(businessSharedItemsHashFile); + } catch (std.file.FileException e) { + // Unable to access required hash file + addLogEntry("ERROR: Unable to access " ~ e.msg); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } + return 0; + } + + // Check for basic option conflicts - flags that should not be used together and/or flag combinations that conflict with each other + bool checkForBasicOptionConflicts() { + + bool operationalConflictDetected = false; + + // What are the permission that have been set for the application? + // These are relevant for: + // - The ~/OneDrive parent folder or 'sync_dir' configured item + // - Any new folder created under ~/OneDrive or 'sync_dir' + // - Any new file created under ~/OneDrive or 'sync_dir' + // valid permissions are 000 -> 777 - anything else is invalid + if ((getValueLong("sync_dir_permissions") < 0) || (getValueLong("sync_file_permissions") < 0) || (getValueLong("sync_dir_permissions") > 777) || (getValueLong("sync_file_permissions") > 777)) { + addLogEntry("ERROR: Invalid 'User|Group|Other' permissions set within config file. Please check your configuration"); + operationalConflictDetected = true; + } else { + // Debug log output what permissions are being set to + addLogEntry("Configuring default new folder permissions as: " ~ to!string(getValueLong("sync_dir_permissions")), ["debug"]); + configureRequiredDirectoryPermisions(); + addLogEntry("Configuring default new file permissions as: " ~ to!string(getValueLong("sync_file_permissions")), ["debug"]); configureRequiredFilePermisions(); } - return configuredFilePermissionMode; + + // --upload-only and --download-only cannot be used together + if ((getValueBool("upload_only")) && (getValueBool("download_only"))) { + addLogEntry("ERROR: --upload-only and --download-only cannot be used together. Use one, not both at the same time"); + operationalConflictDetected = true; + } + + // --sync and --monitor cannot be used together + if ((getValueBool("synchronize")) && (getValueBool("monitor"))) { + addLogEntry("ERROR: --sync and --monitor cannot be used together. Only use one of these options, not both at the same time"); + operationalConflictDetected = true; + } + + // --no-remote-delete can ONLY be enabled when --upload-only is used + if ((getValueBool("no_remote_delete")) && (!getValueBool("upload_only"))) { + addLogEntry("ERROR: --no-remote-delete can only be used with --upload-only"); + operationalConflictDetected = true; + } + + // --remove-source-files can ONLY be enabled when --upload-only is used + if ((getValueBool("remove_source_files")) && (!getValueBool("upload_only"))) { + addLogEntry("ERROR: --remove-source-files can only be used with --upload-only"); + operationalConflictDetected = true; + } + + // --cleanup-local-files can ONLY be enabled when --download-only is used + if ((getValueBool("cleanup_local_files")) && (!getValueBool("download_only"))) { + addLogEntry("ERROR: --cleanup-local-files can only be used with --download-only"); + operationalConflictDetected = true; + } + + // --list-shared-folders cannot be used with --resync and/or --resync-auth + if ((getValueBool("list_business_shared_items")) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --list-shared-folders cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --display-sync-status cannot be used with --resync and/or --resync-auth + if ((getValueBool("display_sync_status")) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --display-sync-status cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --modified-by cannot be used with --resync and/or --resync-auth + if ((!getValueString("modified_by").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --modified-by cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --get-file-link cannot be used with --resync and/or --resync-auth + if ((!getValueString("get_file_link").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --get-file-link cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --create-share-link cannot be used with --resync and/or --resync-auth + if ((!getValueString("create_share_link").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --create-share-link cannot be used with --resync or --resync-auth"); + + operationalConflictDetected = true; + } + + // --get-sharepoint-drive-id cannot be used with --resync and/or --resync-auth + if ((!getValueString("sharepoint_library_name").empty) && ((getValueBool("resync")) || (getValueBool("resync_auth")))) { + addLogEntry("ERROR: --get-sharepoint-drive-id cannot be used with --resync or --resync-auth"); + operationalConflictDetected = true; + } + + // --monitor and --display-sync-status cannot be used together + if ((getValueBool("monitor")) && (getValueBool("display_sync_status"))) { + addLogEntry("ERROR: --monitor and --display-sync-status cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and and --display-sync-status cannot be used together + if ((getValueBool("synchronize")) && (getValueBool("display_sync_status"))) { + addLogEntry("ERROR: --sync and and --display-sync-status cannot be used together"); + operationalConflictDetected = true; + } + + // --monitor and --display-quota cannot be used together + if ((getValueBool("monitor")) && (getValueBool("display_quota"))) { + addLogEntry("ERROR: --monitor and --display-quota cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and and --display-quota cannot be used together + if ((getValueBool("synchronize")) && (getValueBool("display_quota"))) { + addLogEntry("ERROR: --sync and and --display-quota cannot be used together"); + operationalConflictDetected = true; + } + + // --force-sync can only be used when using --sync --single-directory + if (getValueBool("force_sync")) { + + bool conflict = false; + // Should not be used with --monitor + if (getValueBool("monitor")) conflict = true; + // single_directory must not be empty + if (getValueString("single_directory").empty) conflict = true; + if (conflict) { + addLogEntry("ERROR: --force-sync can only be used with --sync --single-directory"); + operationalConflictDetected = true; + } + } + + // When using 'azure_ad_endpoint', 'azure_tenant_id' cannot be empty + if ((!getValueString("azure_ad_endpoint").empty) && (getValueString("azure_tenant_id").empty)) { + addLogEntry("ERROR: config option 'azure_tenant_id' cannot be empty when 'azure_ad_endpoint' is configured"); + operationalConflictDetected = true; + } + + // When using --enable-logging the 'log_dir' cannot be empty + if ((getValueBool("enable_logging")) && (getValueString("log_dir").empty)) { + addLogEntry("ERROR: config option 'log_dir' cannot be empty when 'enable_logging' is configured"); + operationalConflictDetected = true; + } + + // When using --syncdir, the value cannot be empty. + if (strip(getValueString("sync_dir")).empty) { + addLogEntry("ERROR: --syncdir value cannot be empty"); + operationalConflictDetected = true; + } + + // --monitor and --create-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("create_directory").empty)) { + addLogEntry("ERROR: --monitor and --create-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and --create-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("create_directory").empty)) { + addLogEntry("ERROR: --sync and --create-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --monitor and --remove-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("remove_directory").empty)) { + addLogEntry("ERROR: --monitor and --remove-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and --remove-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("remove_directory").empty)) { + addLogEntry("ERROR: --sync and --remove-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --monitor and --source-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("source_directory").empty)) { + addLogEntry("ERROR: --monitor and --source-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and --source-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("source_directory").empty)) { + addLogEntry("ERROR: --sync and --source-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --monitor and --destination-directory cannot be used together + if ((getValueBool("monitor")) && (!getValueString("destination_directory").empty)) { + addLogEntry("ERROR: --monitor and --destination-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --sync and --destination-directory cannot be used together + if ((getValueBool("synchronize")) && (!getValueString("destination_directory").empty)) { + addLogEntry("ERROR: --sync and --destination-directory cannot be used together"); + operationalConflictDetected = true; + } + + // --download-only and --local-first cannot be used together + if ((getValueBool("download_only")) && (getValueBool("local_first"))) { + addLogEntry("ERROR: --download-only cannot be used with --local-first"); + operationalConflictDetected = true; + } + + // Return bool value indicating if we have an operational conflict + return operationalConflictDetected; } + // Reset skip_file and skip_dir to application defaults when --force-sync is used void resetSkipToDefaults() { - // reset skip_file and skip_dir to application defaults // skip_file - log.vdebug("original skip_file: ", getValueString("skip_file")); - log.vdebug("resetting skip_file"); + addLogEntry("original skip_file: " ~ getValueString("skip_file"), ["debug"]); + addLogEntry("resetting skip_file to application defaults", ["debug"]); setValueString("skip_file", defaultSkipFile); - log.vdebug("reset skip_file: ", getValueString("skip_file")); + addLogEntry("reset skip_file: " ~ getValueString("skip_file"), ["debug"]); + // skip_dir - log.vdebug("original skip_dir: ", getValueString("skip_dir")); - log.vdebug("resetting skip_dir"); + addLogEntry("original skip_dir: " ~ getValueString("skip_dir"), ["debug"]); + addLogEntry("resetting skip_dir to application defaults", ["debug"]); setValueString("skip_dir", defaultSkipDir); - log.vdebug("reset skip_dir: ", getValueString("skip_dir")); + addLogEntry("reset skip_dir: " ~ getValueString("skip_dir"), ["debug"]); + } + + // Initialise the correct 'sync_dir' expanding any '~' if present + string initialiseRuntimeSyncDirectory() { + + string runtimeSyncDirectory; + + addLogEntry("sync_dir: Setting runtimeSyncDirectory from config value 'sync_dir'", ["debug"]); + + if (!shellEnvironmentSet){ + addLogEntry("sync_dir: No SHELL or USER environment variable configuration detected", ["debug"]); + + // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker + // Does the 'currently configured' sync_dir include a ~ + if (canFind(getValueString("sync_dir"), "~")) { + // A ~ was found in sync_dir + addLogEntry("sync_dir: A '~' was found in 'sync_dir', using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + runtimeSyncDirectory = buildNormalizedPath(buildPath(defaultHomePath, strip(getValueString("sync_dir"), "~"))); + } else { + // No ~ found in sync_dir, use as is + addLogEntry("sync_dir: Using configured 'sync_dir' path as-is as no SHELL or USER environment variable configuration detected", ["debug"]); + runtimeSyncDirectory = getValueString("sync_dir"); + } + } else { + // A shell and user environment variable is set, expand any ~ as this will be expanded correctly if present + if (canFind(getValueString("sync_dir"), "~")) { + addLogEntry("sync_dir: A '~' was found in the configured 'sync_dir', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + runtimeSyncDirectory = expandTilde(getValueString("sync_dir")); + } else { + // No ~ found in sync_dir, does the path begin with a '/' ? + addLogEntry("sync_dir: Using configured 'sync_dir' path as-is as however SHELL or USER environment variable configuration detected - should be placed in USER home directory", ["debug"]); + if (!startsWith(getValueString("sync_dir"), "/")) { + addLogEntry("Configured 'sync_dir' does not start with a '/' or '~/' - adjusting configured 'sync_dir' to use User Home Directory as base for 'sync_dir' path", ["debug"]); + string updatedPathWithHome = "~/" ~ getValueString("sync_dir"); + runtimeSyncDirectory = expandTilde(updatedPathWithHome); + } else { + addLogEntry("use 'sync_dir' as is - no touch", ["debug"]); + runtimeSyncDirectory = getValueString("sync_dir"); + } + } + } + + // What will runtimeSyncDirectory be actually set to? + addLogEntry("sync_dir: runtimeSyncDirectory set to: " ~ runtimeSyncDirectory, ["debug"]); + + return runtimeSyncDirectory; + } + + // Initialise the correct 'log_dir' when application logging to a separate file is enabled with 'enable_logging' and expanding any '~' if present + string calculateLogDirectory() { + + string configuredLogDirPath; + + addLogEntry("log_dir: Setting runtime application log from config value 'log_dir'", ["debug"]); + + if (getValueString("log_dir") != defaultLogFileDir) { + // User modified 'log_dir' to be used with 'enable_logging' + // if 'log_dir' contains a '~' this needs to be expanded correctly + if (canFind(getValueString("log_dir"), "~")) { + // ~ needs to be expanded correctly + if (!shellEnvironmentSet) { + // No shell or user environment variable set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker + addLogEntry("log_dir: A '~' was found in log_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set", ["debug"]); + configuredLogDirPath = buildNormalizedPath(buildPath(defaultHomePath, strip(getValueString("log_dir"), "~"))); + } else { + // A shell and user environment variable is set, expand any ~ as this will be expanded correctly if present + addLogEntry("log_dir: A '~' was found in the configured 'log_dir', automatically expanding as SHELL and USER environment variable is set", ["debug"]); + configuredLogDirPath = expandTilde(getValueString("log_dir")); + } + } else { + // '~' not found in log_dir entry, use as is + configuredLogDirPath = getValueString("log_dir"); + } + } else { + // Default 'log_dir' to be used with 'enable_logging' + configuredLogDirPath = defaultLogFileDir; + } + + // Attempt to create 'configuredLogDirPath' otherwise we need to fall back to the users home directory + if (!exists(configuredLogDirPath)) { + // 'configuredLogDirPath' path does not exist - try and create it + try { + mkdirRecurse(configuredLogDirPath); + } catch (std.file.FileException e) { + // We got an error when attempting to create the directory .. + addLogEntry(); + addLogEntry("ERROR: Unable to create " ~ configuredLogDirPath); + addLogEntry("ERROR: Please manually create '" ~ configuredLogDirPath ~ "' and set appropriate permissions to allow write access for your user to this location."); + addLogEntry("ERROR: The requested client activity log will instead be located in your users home directory"); + addLogEntry(); + + // Reconfigure 'configuredLogDirPath' to use environment.get("HOME") value, which we have already calculated + configuredLogDirPath = defaultHomePath; + } + } + + // Return the initialised application log path + return configuredLogDirPath; + } + + void setConfigLoggingLevels(bool verboseLoggingInput, bool debugLoggingInput, long verbosityCountInput) { + // set the appConfig logging values + verboseLogging = verboseLoggingInput; + debugLogging = debugLoggingInput; + verbosityCount = verbosityCountInput; } } -void outputLongHelp(Option[] opt) -{ - auto argsNeedingOptions = [ - "--auth-files", - "--auth-response", - "--confdir", - "--create-directory", - "--create-share-link", - "--destination-directory", - "--get-file-link", - "--get-O365-drive-id", - "--log-dir", - "--min-notify-changes", - "--modified-by", - "--monitor-interval", - "--monitor-log-frequency", - "--monitor-fullscan-frequency", - "--operation-timeout", - "--remove-directory", - "--single-directory", - "--skip-dir", - "--skip-file", - "--skip-size", - "--source-directory", - "--space-reservation", - "--syncdir", - "--user-agent" ]; - writeln(`OneDrive - a client for OneDrive Cloud Services +// Output the full application help when --help is passed in +void outputLongHelp(Option[] opt) { + auto argsNeedingOptions = [ + "--auth-files", + "--auth-response", + "--confdir", + "--create-directory", + "--classify-as-big-delete", + "--create-share-link", + "--destination-directory", + "--get-file-link", + "--get-O365-drive-id", + "--log-dir", + "--min-notify-changes", + "--modified-by", + "--monitor-interval", + "--monitor-log-frequency", + "--monitor-fullscan-frequency", + "--remove-directory", + "--single-directory", + "--skip-dir", + "--skip-file", + "--skip-size", + "--source-directory", + "--space-reservation", + "--syncdir", + "--user-agent" ]; + writeln(`onedrive - A client for the Microsoft OneDrive Cloud Service -Usage: - onedrive [options] --synchronize + Usage: + onedrive [options] --sync Do a one time synchronization - onedrive [options] --monitor + onedrive [options] --monitor Monitor filesystem and sync regularly - onedrive [options] --display-config + onedrive [options] --display-config Display the currently used configuration - onedrive [options] --display-sync-status + onedrive [options] --display-sync-status Query OneDrive service and report on pending changes - onedrive -h | --help + onedrive -h | --help Show this help screen - onedrive --version + onedrive --version Show version -Options: -`); - foreach (it; opt.sort!("a.optLong < b.optLong")) { - writefln(" %s%s%s%s\n %s", - it.optLong, - it.optShort == "" ? "" : " " ~ it.optShort, - argsNeedingOptions.canFind(it.optLong) ? " ARG" : "", - it.required ? " (required)" : "", it.help); - } -} - -unittest -{ - auto cfg = new Config(""); - cfg.load("config"); - assert(cfg.getValueString("sync_dir") == "~/OneDrive"); -} + Options: + `); + foreach (it; opt.sort!("a.optLong < b.optLong")) { + writefln(" %s%s%s%s\n %s", + it.optLong, + it.optShort == "" ? "" : " " ~ it.optShort, + argsNeedingOptions.canFind(it.optLong) ? " ARG" : "", + it.required ? " (required)" : "", it.help); + } +} \ No newline at end of file diff --git a/src/curlEngine.d b/src/curlEngine.d new file mode 100644 index 000000000..1a0db204e --- /dev/null +++ b/src/curlEngine.d @@ -0,0 +1,110 @@ +// What is this module called? +module curlEngine; + +// What does this module require to function? +import std.net.curl; +import etc.c.curl: CurlOption; +import std.datetime; +import std.conv; +import std.stdio; + +// What other modules that we have created do we need to import? +import log; + +class CurlEngine { + HTTP http; + bool keepAlive; + ulong dnsTimeout; + + this() { + http = HTTP(); + } + + void initialise(ulong dnsTimeout, ulong connectTimeout, ulong dataTimeout, ulong operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, ulong userRateLimit, ulong protocolVersion, bool keepAlive=false) { + // Setting this to false ensures that when we close the curl instance, any open sockets are closed - which we need to do when running + // multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly + this.keepAlive = keepAlive; + this.dnsTimeout = dnsTimeout; + + // Curl Timeout Handling + + // libcurl dns_cache_timeout timeout + // https://curl.se/libcurl/c/CURLOPT_DNS_CACHE_TIMEOUT.html + // https://dlang.org/library/std/net/curl/http.dns_timeout.html + http.dnsTimeout = (dur!"seconds"(dnsTimeout)); + + // Timeout for HTTPS connections + // https://curl.se/libcurl/c/CURLOPT_CONNECTTIMEOUT.html + // https://dlang.org/library/std/net/curl/http.connect_timeout.html + http.connectTimeout = (dur!"seconds"(connectTimeout)); + + // Timeout for activity on connection + // This is a DMD | DLANG specific item, not a libcurl item + // https://dlang.org/library/std/net/curl/http.data_timeout.html + // https://raw.githubusercontent.com/dlang/phobos/master/std/net/curl.d - private enum _defaultDataTimeout = dur!"minutes"(2); + http.dataTimeout = (dur!"seconds"(dataTimeout)); + + // Maximum time any operation is allowed to take + // This includes dns resolution, connecting, data transfer, etc. + // https://curl.se/libcurl/c/CURLOPT_TIMEOUT_MS.html + // https://dlang.org/library/std/net/curl/http.operation_timeout.html + http.operationTimeout = (dur!"seconds"(operationTimeout)); + + // Specify how many redirects should be allowed + http.maxRedirects(maxRedirects); + // Debug HTTPS + http.verbose = httpsDebug; + // Use the configured 'user_agent' value + http.setUserAgent = userAgent; + // What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6 + http.handle.set(CurlOption.ipresolve,protocolVersion); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + + // What version of HTTP protocol do we use? + // Curl >= 7.62.0 defaults to http2 for a significant number of operations + if (httpProtocol) { + // Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1 + http.handle.set(CurlOption.http_version,2); + } + + // Configure upload / download rate limits if configured + // 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts + // A 0 value means rate is unlimited, and is the curl default + if (userRateLimit > 0) { + // set rate limit + http.handle.set(CurlOption.max_send_speed_large,userRateLimit); + http.handle.set(CurlOption.max_recv_speed_large,userRateLimit); + } + + // Explicitly set these libcurl options + // https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html + // Ensure that nosignal is set to 0 - Setting CURLOPT_NOSIGNAL to 0 makes libcurl ask the system to ignore SIGPIPE signals + http.handle.set(CurlOption.nosignal,0); + + // https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html + // Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled + http.handle.set(CurlOption.tcp_nodelay,0); + + if (httpsDebug) { + // Output what options we are using so that in the debug log this can be tracked + addLogEntry("http.dnsTimeout = " ~ to!string(dnsTimeout), ["debug"]); + addLogEntry("http.connectTimeout = " ~ to!string(connectTimeout), ["debug"]); + addLogEntry("http.dataTimeout = " ~ to!string(dataTimeout), ["debug"]); + addLogEntry("http.operationTimeout = " ~ to!string(operationTimeout), ["debug"]); + addLogEntry("http.maxRedirects = " ~ to!string(maxRedirects), ["debug"]); + addLogEntry("http.CurlOption.ipresolve = " ~ to!string(protocolVersion), ["debug"]); + addLogEntry("http.header.Connection.keepAlive = " ~ to!string(keepAlive), ["debug"]); + } + } + + void connect(HTTP.Method method, const(char)[] url) { + if (!keepAlive) + http.addRequestHeader("Connection", "close"); + http.method = method; + http.url = url; + } + + void setDisableSSLVerifyPeer() { + addLogEntry("CAUTION: Switching off CurlOption.ssl_verifypeer ... this makes the application insecure.", ["debug"]); + http.handle.set(CurlOption.ssl_verifypeer, 0); + } +} \ No newline at end of file diff --git a/src/itemdb.d b/src/itemdb.d index 28fc47121..ab68b55b2 100644 --- a/src/itemdb.d +++ b/src/itemdb.d @@ -1,3 +1,7 @@ +// What is this module called? +module itemdb; + +// What does this module require to function? import std.datetime; import std.exception; import std.path; @@ -5,19 +9,26 @@ import std.string; import std.stdio; import std.algorithm.searching; import core.stdc.stdlib; +import std.json; +import std.conv; + +// What other modules that we have created do we need to import? import sqlite; -static import log; +import util; +import log; enum ItemType { file, dir, - remote + remote, + unknown } struct Item { string driveId; string id; string name; + string remoteName; ItemType type; string eTag; string cTag; @@ -28,23 +39,144 @@ struct Item { string remoteDriveId; string remoteId; string syncStatus; + string size; +} + +// Construct an Item struct from a JSON driveItem +Item makeDatabaseItem(JSONValue driveItem) { + + Item item = { + id: driveItem["id"].str, + name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Business + eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Business + cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Business) + remoteName: "actualOnlineName" in driveItem ? driveItem["actualOnlineName"].str : null, // actualOnlineName is only used with OneDrive Business Shared Folders + }; + + // OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834 + // OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive + if(isItemDeleted(driveItem)) { + // Set mtime to SysTime(0) + item.mtime = SysTime(0); + } else { + // Item is not in a deleted state + // Resolve 'Key not found: fileSystemInfo' when then item is a remote item + // https://github.com/abraunegg/onedrive/issues/11 + if (isItemRemote(driveItem)) { + // remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default + // Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI + // to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash + // See: https://github.com/abraunegg/onedrive/issues/1533 + if ("fileSystemInfo" in driveItem["remoteItem"]) { + // 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases + item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); + } else { + // is a remote item, but 'fileSystemInfo' is missing from 'remoteItem' + if ("fileSystemInfo" in driveItem) { + item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } + } + } else { + // Does fileSystemInfo exist at all ? + if ("fileSystemInfo" in driveItem) { + item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } + } + } + + // Set this item object type + bool typeSet = false; + if (isItemFile(driveItem)) { + // 'file' object exists in the JSON + addLogEntry("Flagging object as a file", ["debug"]); + typeSet = true; + item.type = ItemType.file; + } + + if (isItemFolder(driveItem)) { + // 'folder' object exists in the JSON + addLogEntry("Flagging object as a directory", ["debug"]); + typeSet = true; + item.type = ItemType.dir; + } + + if (isItemRemote(driveItem)) { + // 'remote' object exists in the JSON + addLogEntry("Flagging object as a remote", ["debug"]); + typeSet = true; + item.type = ItemType.remote; + } + + // root and remote items do not have parentReference + if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) { + item.driveId = driveItem["parentReference"]["driveId"].str; + if (hasParentReferenceId(driveItem)) { + item.parentId = driveItem["parentReference"]["id"].str; + } + } + + // extract the file hash and file size + if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) { + // Get file size + if (hasFileSize(driveItem)) { + item.size = to!string(driveItem["size"].integer); + // Get quickXorHash as default + if ("quickXorHash" in driveItem["file"]["hashes"]) { + item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str; + } else { + addLogEntry("quickXorHash is missing from " ~ driveItem["id"].str, ["debug"]); + } + + // If quickXorHash is empty .. + if (item.quickXorHash.empty) { + // Is there a sha256Hash? + if ("sha256Hash" in driveItem["file"]["hashes"]) { + item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str; + } else { + addLogEntry("sha256Hash is missing from " ~ driveItem["id"].str, ["debug"]); + } + } + } else { + // So that we have at least a zero value here as the API provided no 'size' data for this file item + item.size = "0"; + } + } + + // Is the object a remote drive item - living on another driveId ? + if (isItemRemote(driveItem)) { + item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str; + item.remoteId = driveItem["remoteItem"]["id"].str; + } + + // We have 3 different operational modes where 'item.syncStatus' is used to flag if an item is synced or not: + // - National Cloud Deployments do not support /delta as a query + // - When using --single-directory + // - When using --download-only --cleanup-local-files + // + // Thus we need to track in the database that this item is in sync + // As we are making an item, set the syncStatus to Y + // ONLY when either of the three modes above are being used, all the existing DB entries will get set to N + // so when processing /children, it can be identified what the 'deleted' difference is + item.syncStatus = "Y"; + + // Return the created item + return item; } -final class ItemDatabase -{ +final class ItemDatabase { // increment this for every change in the db schema - immutable int itemDatabaseVersion = 11; + immutable int itemDatabaseVersion = 12; Database db; string insertItemStmt; string updateItemStmt; string selectItemByIdStmt; + string selectItemByRemoteIdStmt; string selectItemByParentIdStmt; string deleteItemByIdStmt; bool databaseInitialised = false; - this(const(char)[] filename) - { + this(const(char)[] filename) { db = Database(filename); int dbVersion; try { @@ -52,14 +184,14 @@ final class ItemDatabase } catch (SqliteException e) { // An error was generated - what was the error? if (e.msg == "database is locked") { - writeln(); - log.error("ERROR: onedrive application is already running - check system process list for active application instances"); - log.vlog(" - Use 'sudo ps aufxw | grep onedrive' to potentially determine acive running process"); - writeln(); + addLogEntry(); + addLogEntry("ERROR: onedrive application is already running - check system process list for active application instances"); + addLogEntry(" - Use 'sudo ps aufxw | grep onedrive' to potentially determine acive running process", ["verbose"]); + addLogEntry(); } else { - writeln(); - log.error("ERROR: An internal database error occurred: " ~ e.msg); - writeln(); + addLogEntry(); + addLogEntry("ERROR: An internal database error occurred: " ~ e.msg); + addLogEntry(); } return; } @@ -67,10 +199,15 @@ final class ItemDatabase if (dbVersion == 0) { createTable(); } else if (db.getVersion() != itemDatabaseVersion) { - log.log("The item database is incompatible, re-creating database table structures"); + addLogEntry("The item database is incompatible, re-creating database table structures"); db.exec("DROP TABLE item"); createTable(); } + + // What is the threadsafe value + auto threadsafeValue = db.getThreadsafeValue(); + addLogEntry("Threadsafe database value: " ~ to!string(threadsafeValue), ["debug"]); + // Set the enforcement of foreign key constraints. // https://www.sqlite.org/pragma.html#pragma_foreign_keys // PRAGMA foreign_keys = boolean; @@ -99,12 +236,12 @@ final class ItemDatabase db.exec("PRAGMA locking_mode = EXCLUSIVE"); insertItemStmt = " - INSERT OR REPLACE INTO item (driveId, id, name, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13) + INSERT OR REPLACE INTO item (driveId, id, name, remoteName, type, eTag, cTag, mtime, parentId, quickXorHash, sha256Hash, remoteDriveId, remoteId, syncStatus, size) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15) "; updateItemStmt = " UPDATE item - SET name = ?3, type = ?4, eTag = ?5, cTag = ?6, mtime = ?7, parentId = ?8, quickXorHash = ?9, sha256Hash = ?10, remoteDriveId = ?11, remoteId = ?12, syncStatus = ?13 + SET name = ?3, remoteName = ?4, type = ?5, eTag = ?6, cTag = ?7, mtime = ?8, parentId = ?9, quickXorHash = ?10, sha256Hash = ?11, remoteDriveId = ?12, remoteId = ?13, syncStatus = ?14, size = ?15 WHERE driveId = ?1 AND id = ?2 "; selectItemByIdStmt = " @@ -112,6 +249,11 @@ final class ItemDatabase FROM item WHERE driveId = ?1 AND id = ?2 "; + selectItemByRemoteIdStmt = " + SELECT * + FROM item + WHERE remoteDriveId = ?1 AND remoteId = ?2 + "; selectItemByParentIdStmt = "SELECT * FROM item WHERE driveId = ? AND parentId = ?"; deleteItemByIdStmt = "DELETE FROM item WHERE driveId = ? AND id = ?"; @@ -119,17 +261,16 @@ final class ItemDatabase databaseInitialised = true; } - bool isDatabaseInitialised() - { + bool isDatabaseInitialised() { return databaseInitialised; } - void createTable() - { + void createTable() { db.exec("CREATE TABLE item ( driveId TEXT NOT NULL, id TEXT NOT NULL, name TEXT NOT NULL, + remoteName TEXT, type TEXT NOT NULL, eTag TEXT, cTag TEXT, @@ -141,6 +282,7 @@ final class ItemDatabase remoteId TEXT, deltaLink TEXT, syncStatus TEXT, + size TEXT, PRIMARY KEY (driveId, id), FOREIGN KEY (driveId, parentId) REFERENCES item (driveId, id) @@ -154,32 +296,27 @@ final class ItemDatabase db.setVersion(itemDatabaseVersion); } - void insert(const ref Item item) - { + void insert(const ref Item item) { auto p = db.prepare(insertItemStmt); bindItem(item, p); p.exec(); } - void update(const ref Item item) - { + void update(const ref Item item) { auto p = db.prepare(updateItemStmt); bindItem(item, p); p.exec(); } - void dump_open_statements() - { + void dump_open_statements() { db.dump_open_statements(); } - int db_checkpoint() - { + int db_checkpoint() { return db.db_checkpoint(); } - void upsert(const ref Item item) - { + void upsert(const ref Item item) { auto s = db.prepare("SELECT COUNT(*) FROM item WHERE driveId = ? AND id = ?"); s.bind(1, item.driveId); s.bind(2, item.id); @@ -191,8 +328,7 @@ final class ItemDatabase stmt.exec(); } - Item[] selectChildren(const(char)[] driveId, const(char)[] id) - { + Item[] selectChildren(const(char)[] driveId, const(char)[] id) { auto p = db.prepare(selectItemByParentIdStmt); p.bind(1, driveId); p.bind(2, id); @@ -205,8 +341,7 @@ final class ItemDatabase return items; } - bool selectById(const(char)[] driveId, const(char)[] id, out Item item) - { + bool selectById(const(char)[] driveId, const(char)[] id, out Item item) { auto p = db.prepare(selectItemByIdStmt); p.bind(1, driveId); p.bind(2, id); @@ -218,9 +353,20 @@ final class ItemDatabase return false; } + bool selectByRemoteId(const(char)[] remoteDriveId, const(char)[] remoteId, out Item item) { + auto p = db.prepare(selectItemByRemoteIdStmt); + p.bind(1, remoteDriveId); + p.bind(2, remoteId); + auto r = p.exec(); + if (!r.empty) { + item = buildItem(r); + return true; + } + return false; + } + // returns true if an item id is in the database - bool idInLocalDatabase(const(string) driveId, const(string)id) - { + bool idInLocalDatabase(const(string) driveId, const(string)id) { auto p = db.prepare(selectItemByIdStmt); p.bind(1, driveId); p.bind(2, id); @@ -233,18 +379,11 @@ final class ItemDatabase // returns the item with the given path // the path is relative to the sync directory ex: "./Music/Turbo Killer.mp3" - bool selectByPath(const(char)[] path, string rootDriveId, out Item item) - { + bool selectByPath(const(char)[] path, string rootDriveId, out Item item) { Item currItem = { driveId: rootDriveId }; // Issue https://github.com/abraunegg/onedrive/issues/578 - if (startsWith(path, "./") || path == ".") { - // Need to remove the . from the path prefix - path = "root/" ~ path.chompPrefix("."); - } else { - // Leave path as it is - path = "root/" ~ path; - } + path = "root/" ~ (startsWith(path, "./") || path == "." ? path.chompPrefix(".") : path); auto s = db.prepare("SELECT * FROM item WHERE name = ?1 AND driveId IS ?2 AND parentId IS ?3"); foreach (name; pathSplitter(path)) { @@ -254,12 +393,15 @@ final class ItemDatabase auto r = s.exec(); if (r.empty) return false; currItem = buildItem(r); - // if the item is of type remote substitute it with the child + + // If the item is of type remote substitute it with the child if (currItem.type == ItemType.remote) { + addLogEntry("Record is a Remote Object: " ~ to!string(currItem), ["debug"]); Item child; if (selectById(currItem.remoteDriveId, currItem.remoteId, child)) { assert(child.type != ItemType.remote, "The type of the child cannot be remote"); currItem = child; + addLogEntry("Selecting Record that is NOT Remote Object: " ~ to!string(currItem), ["debug"]); } } } @@ -267,19 +409,12 @@ final class ItemDatabase return true; } - // same as selectByPath() but it does not traverse remote folders - bool selectByPathWithoutRemote(const(char)[] path, string rootDriveId, out Item item) - { + // same as selectByPath() but it does not traverse remote folders, returns the remote element if that is what is required + bool selectByPathIncludingRemoteItems(const(char)[] path, string rootDriveId, out Item item) { Item currItem = { driveId: rootDriveId }; // Issue https://github.com/abraunegg/onedrive/issues/578 - if (startsWith(path, "./") || path == ".") { - // Need to remove the . from the path prefix - path = "root/" ~ path.chompPrefix("."); - } else { - // Leave path as it is - path = "root/" ~ path; - } + path = "root/" ~ (startsWith(path, "./") || path == "." ? path.chompPrefix(".") : path); auto s = db.prepare("SELECT * FROM item WHERE name IS ?1 AND driveId IS ?2 AND parentId IS ?3"); foreach (name; pathSplitter(path)) { @@ -290,62 +425,89 @@ final class ItemDatabase if (r.empty) return false; currItem = buildItem(r); } + + if (currItem.type == ItemType.remote) { + addLogEntry("Record selected is a Remote Object: " ~ to!string(currItem), ["debug"]); + } + item = currItem; return true; } - void deleteById(const(char)[] driveId, const(char)[] id) - { + void deleteById(const(char)[] driveId, const(char)[] id) { auto p = db.prepare(deleteItemByIdStmt); p.bind(1, driveId); p.bind(2, id); p.exec(); } - private void bindItem(const ref Item item, ref Statement stmt) - { + private void bindItem(const ref Item item, ref Statement stmt) { with (stmt) with (item) { bind(1, driveId); bind(2, id); bind(3, name); + bind(4, remoteName); string typeStr = null; final switch (type) with (ItemType) { case file: typeStr = "file"; break; case dir: typeStr = "dir"; break; case remote: typeStr = "remote"; break; + case unknown: typeStr = "unknown"; break; } - bind(4, typeStr); - bind(5, eTag); - bind(6, cTag); - bind(7, mtime.toISOExtString()); - bind(8, parentId); - bind(9, quickXorHash); - bind(10, sha256Hash); - bind(11, remoteDriveId); - bind(12, remoteId); - bind(13, syncStatus); + bind(5, typeStr); + bind(6, eTag); + bind(7, cTag); + bind(8, mtime.toISOExtString()); + bind(9, parentId); + bind(10, quickXorHash); + bind(11, sha256Hash); + bind(12, remoteDriveId); + bind(13, remoteId); + bind(14, syncStatus); + bind(15, size); } } - private Item buildItem(Statement.Result result) - { + private Item buildItem(Statement.Result result) { assert(!result.empty, "The result must not be empty"); - assert(result.front.length == 14, "The result must have 14 columns"); + assert(result.front.length == 16, "The result must have 16 columns"); Item item = { + + // column 0: driveId + // column 1: id + // column 2: name + // column 3: remoteName - only used when there is a difference in the local name & remote shared folder name + // column 4: type + // column 5: eTag + // column 6: cTag + // column 7: mtime + // column 8: parentId + // column 9: quickXorHash + // column 10: sha256Hash + // column 11: remoteDriveId + // column 12: remoteId + // column 13: deltaLink + // column 14: syncStatus + // column 15: size + driveId: result.front[0].dup, id: result.front[1].dup, name: result.front[2].dup, - eTag: result.front[4].dup, - cTag: result.front[5].dup, - mtime: SysTime.fromISOExtString(result.front[6]), - parentId: result.front[7].dup, - quickXorHash: result.front[8].dup, - sha256Hash: result.front[9].dup, - remoteDriveId: result.front[10].dup, - remoteId: result.front[11].dup, - syncStatus: result.front[12].dup + remoteName: result.front[3].dup, + // Column 4 is type - not set here + eTag: result.front[5].dup, + cTag: result.front[6].dup, + mtime: SysTime.fromISOExtString(result.front[7]), + parentId: result.front[8].dup, + quickXorHash: result.front[9].dup, + sha256Hash: result.front[10].dup, + remoteDriveId: result.front[11].dup, + remoteId: result.front[12].dup, + // Column 13 is deltaLink - not set here + syncStatus: result.front[14].dup, + size: result.front[15].dup }; - switch (result.front[3]) { + switch (result.front[4]) { case "file": item.type = ItemType.file; break; case "dir": item.type = ItemType.dir; break; case "remote": item.type = ItemType.remote; break; @@ -357,8 +519,7 @@ final class ItemDatabase // computes the path of the given item id // the path is relative to the sync directory ex: "Music/Turbo Killer.mp3" // the trailing slash is not added even if the item is a directory - string computePath(const(char)[] driveId, const(char)[] id) - { + string computePath(const(char)[] driveId, const(char)[] id) { assert(driveId && id); string path; Item item; @@ -406,9 +567,9 @@ final class ItemDatabase } } else { // broken tree - log.vdebug("The following generated a broken tree query:"); - log.vdebug("Drive ID: ", driveId); - log.vdebug("Item ID: ", id); + addLogEntry("The following generated a broken tree query:", ["debug"]); + addLogEntry("Drive ID: " ~ to!string(driveId), ["debug"]); + addLogEntry("Item ID: " ~ to!string(id), ["debug"]); assert(0); } } @@ -416,8 +577,7 @@ final class ItemDatabase return path; } - Item[] selectRemoteItems() - { + Item[] selectRemoteItems() { Item[] items; auto stmt = db.prepare("SELECT * FROM item WHERE remoteDriveId IS NOT NULL"); auto res = stmt.exec(); @@ -428,8 +588,11 @@ final class ItemDatabase return items; } - string getDeltaLink(const(char)[] driveId, const(char)[] id) - { + string getDeltaLink(const(char)[] driveId, const(char)[] id) { + // Log what we received + addLogEntry("DeltaLink Query (driveId): " ~ to!string(driveId), ["debug"]); + addLogEntry("DeltaLink Query (id): " ~ to!string(id), ["debug"]); + assert(driveId && id); auto stmt = db.prepare("SELECT deltaLink FROM item WHERE driveId = ?1 AND id = ?2"); stmt.bind(1, driveId); @@ -439,8 +602,7 @@ final class ItemDatabase return res.front[0].dup; } - void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) - { + void setDeltaLink(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) { assert(driveId && id); assert(deltaLink); auto stmt = db.prepare("UPDATE item SET deltaLink = ?3 WHERE driveId = ?1 AND id = ?2"); @@ -455,8 +617,7 @@ final class ItemDatabase // As we query /children to get all children from OneDrive, update anything in the database // to be flagged as not-in-sync, thus, we can use that flag to determing what was previously // in-sync, but now deleted on OneDrive - void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) - { + void downgradeSyncStatusFlag(const(char)[] driveId, const(char)[] id) { assert(driveId); auto stmt = db.prepare("UPDATE item SET syncStatus = 'N' WHERE driveId = ?1 AND id = ?2"); stmt.bind(1, driveId); @@ -466,8 +627,7 @@ final class ItemDatabase // National Cloud Deployments (US and DE) do not support /delta as a query // Select items that have a out-of-sync flag set - Item[] selectOutOfSyncItems(const(char)[] driveId) - { + Item[] selectOutOfSyncItems(const(char)[] driveId) { assert(driveId); Item[] items; auto stmt = db.prepare("SELECT * FROM item WHERE syncStatus = 'N' AND driveId = ?1"); @@ -482,8 +642,7 @@ final class ItemDatabase // OneDrive Business Folders are stored in the database potentially without a root | parentRoot link // Select items associated with the provided driveId - Item[] selectByDriveId(const(char)[] driveId) - { + Item[] selectByDriveId(const(char)[] driveId) { assert(driveId); Item[] items; auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1 AND parentId IS NULL"); @@ -496,22 +655,37 @@ final class ItemDatabase return items; } + // Select all items associated with the provided driveId + Item[] selectAllItemsByDriveId(const(char)[] driveId) { + assert(driveId); + Item[] items; + auto stmt = db.prepare("SELECT * FROM item WHERE driveId = ?1"); + stmt.bind(1, driveId); + auto res = stmt.exec(); + while (!res.empty) { + items ~= buildItem(res); + res.step(); + } + return items; + } + // Perform a vacuum on the database, commit WAL / SHM to file - void performVacuum() - { + void performVacuum() { + addLogEntry("Attempting to perform a database vacuum to merge any temporary data", ["debug"]); + try { auto stmt = db.prepare("VACUUM;"); stmt.exec(); + addLogEntry("Database vacuum is complete", ["debug"]); } catch (SqliteException e) { - writeln(); - log.error("ERROR: Unable to perform a database vacuum: " ~ e.msg); - writeln(); + addLogEntry(); + addLogEntry("ERROR: Unable to perform a database vacuum: " ~ e.msg); + addLogEntry(); } } // Select distinct driveId items from database - string[] selectDistinctDriveIds() - { + string[] selectDistinctDriveIds() { string[] driveIdArray; auto stmt = db.prepare("SELECT DISTINCT driveId FROM item;"); auto res = stmt.exec(); @@ -522,4 +696,4 @@ final class ItemDatabase } return driveIdArray; } -} +} \ No newline at end of file diff --git a/src/log.d b/src/log.d index b7aa0da68..2bf6d1546 100644 --- a/src/log.d +++ b/src/log.d @@ -1,239 +1,156 @@ +// What is this module called? +module log; + +// What does this module require to function? import std.stdio; import std.file; import std.datetime; -import std.process; -import std.conv; -import core.memory; -import core.sys.posix.pwd, core.sys.posix.unistd, core.stdc.string : strlen; -import std.algorithm : splitter; +import std.concurrency; +import std.typecons; +import core.sync.mutex; +import core.thread; +import std.format; +import std.string; + version(Notifications) { import dnotify; } -// enable verbose logging -long verbose; -bool writeLogFile = false; -bool logFileWriteFailFlag = false; - -private bool doNotifications; - -// shared string variable for username -string username; -string logFilePath; - -void init(string logDir) -{ - writeLogFile = true; - username = getUserName(); - logFilePath = logDir; - - if (!exists(logFilePath)){ - // logfile path does not exist - try { - mkdirRecurse(logFilePath); - } - catch (std.file.FileException e) { - // we got an error .. - writeln("\nUnable to access ", logFilePath); - writeln("Please manually create '",logFilePath, "' and set appropriate permissions to allow write access"); - writeln("The requested client activity log will instead be located in your users home directory"); - } - } -} - -void setNotifications(bool value) -{ - version(Notifications) { - // if we try to enable notifications, check for server availability - // and disable in case dbus server is not reachable - if (value) { - auto serverAvailable = dnotify.check_availability(); - if (!serverAvailable) { - log("Notification (dbus) server not available, disabling"); - value = false; - } - } - } - doNotifications = value; -} - -void log(T...)(T args) -{ - writeln(args); - if(writeLogFile){ - // Write to log file - logfileWriteLine(args); - } -} - -void logAndNotify(T...)(T args) -{ - notify(args); - log(args); -} - -void fileOnly(T...)(T args) -{ - if(writeLogFile){ - // Write to log file - logfileWriteLine(args); - } -} - -void vlog(T...)(T args) -{ - if (verbose >= 1) { - writeln(args); - if(writeLogFile){ - // Write to log file - logfileWriteLine(args); - } - } -} - -void vdebug(T...)(T args) -{ - if (verbose >= 2) { - writeln("[DEBUG] ", args); - if(writeLogFile){ - // Write to log file - logfileWriteLine("[DEBUG] ", args); - } - } -} - -void vdebugNewLine(T...)(T args) -{ - if (verbose >= 2) { - writeln("\n[DEBUG] ", args); - if(writeLogFile){ - // Write to log file - logfileWriteLine("\n[DEBUG] ", args); - } - } -} - -void error(T...)(T args) -{ - stderr.writeln(args); - if(writeLogFile){ - // Write to log file - logfileWriteLine(args); - } -} - -void errorAndNotify(T...)(T args) -{ - notify(args); - error(args); -} - -void notify(T...)(T args) -{ - version(Notifications) { - if (doNotifications) { - string result; - foreach (index, arg; args) { - result ~= to!string(arg); - if (index != args.length - 1) - result ~= " "; - } - auto n = new Notification("OneDrive", result, "IGNORED"); - try { +// Shared module object +shared LogBuffer logBuffer; + +class LogBuffer { + private: + string[3][] buffer; + Mutex bufferLock; + string logFilePath; + bool writeToFile; + bool verboseLogging; + bool debugLogging; + Thread flushThread; + bool isRunning; + bool sendGUINotification; + + public: + this(bool verboseLogging, bool debugLogging) { + // Initialise the mutex + bufferLock = new Mutex(); + // Initialise other items + this.logFilePath = logFilePath; + this.writeToFile = writeToFile; + this.verboseLogging = verboseLogging; + this.debugLogging = debugLogging; + this.isRunning = true; + this.sendGUINotification = true; + this.flushThread = new Thread(&flushBuffer); + flushThread.isDaemon(true); + flushThread.start(); + } + + ~this() { + isRunning = false; + flushThread.join(); + flush(); + } + + shared void logThisMessage(string message, string[] levels = ["info"]) { + // Generate the timestamp for this log entry + auto timeStamp = leftJustify(Clock.currTime().toString(), 28, '0'); + + synchronized(bufferLock) { + foreach (level; levels) { + // Normal application output + if (!debugLogging) { + if ((level == "info") || ((verboseLogging) && (level == "verbose")) || (level == "logFileOnly") || (level == "consoleOnly") || (level == "consoleOnlyNoNewLine")) { + // Add this message to the buffer, with this format + buffer ~= [timeStamp, level, format("%s", message)]; + } + } else { + // Debug Logging (--verbose --verbose | -v -v | -vv) output + // Add this message, regardless of 'level' to the buffer, with this format + buffer ~= [timeStamp, level, format("DEBUG: %s", message)]; + // If there are multiple 'levels' configured, ignore this and break as we are doing debug logging + break; + } + + // Submit the message to the dbus / notification daemon for display within the GUI being used + // Will not send GUI notifications when running in debug mode + if ((!debugLogging) && (level == "notify")) { + version(Notifications) { + if (sendGUINotification) { + notify(message); + } + } + } + } + } + } + + shared void notify(string message) { + // Use dnotify's functionality for GUI notifications, if GUI notifications is enabled + version(Notifications) { + auto n = new Notification("Log Notification", message, "IGNORED"); n.show(); - // Sent message to notification daemon - if (verbose >= 2) { - writeln("[DEBUG] Sent notification to notification service. If notification is not displayed, check dbus or notification-daemon for errors"); - } - - } catch (Throwable e) { - vlog("Got exception from showing notification: ", e); } - } - } -} - -private void logfileWriteLine(T...)(T args) -{ - static import std.exception; - // Write to log file - string logFileName = .logFilePath ~ .username ~ ".onedrive.log"; - auto currentTime = Clock.currTime(); - auto timeString = currentTime.toString(); - File logFile; - - // Resolve: std.exception.ErrnoException@std/stdio.d(423): Cannot open file `/var/log/onedrive/xxxxx.onedrive.log' in mode `a' (Permission denied) - try { - logFile = File(logFileName, "a"); - } - catch (std.exception.ErrnoException e) { - // We cannot open the log file in logFilePath location for writing - // The user is not part of the standard 'users' group (GID 100) - // Change logfile to ~/onedrive.log putting the log file in the users home directory - - if (!logFileWriteFailFlag) { - // write out error message that we cant log to the requested file - writeln("\nUnable to write activity log to ", logFileName); - writeln("Please set appropriate permissions to allow write access to the logging directory for your user account"); - writeln("The requested client activity log will instead be located in your users home directory\n"); + } + + private void flushBuffer() { + while (isRunning) { + Thread.sleep(dur!("msecs")(200)); + flush(); + } + } - // set the flag so we dont keep printing this error message - logFileWriteFailFlag = true; - } - - string homePath = environment.get("HOME"); - string logFileNameAlternate = homePath ~ "/onedrive.log"; - logFile = File(logFileNameAlternate, "a"); - } - // Write to the log file - logFile.writeln(timeString, "\t", args); - logFile.close(); + private void flush() { + string[3][] messages; + synchronized(bufferLock) { + messages = buffer; + buffer.length = 0; + } + + foreach (msg; messages) { + // timestamp, logLevel, message + // Always write the log line to the console, if level != logFileOnly + if (msg[1] != "logFileOnly") { + // Console output .. what sort of output + if (msg[1] == "consoleOnlyNoNewLine") { + // This is used write out a message to the console only, without a new line + // This is used in non-verbose mode to indicate something is happening when downloading JSON data from OneDrive or when we need user input from --resync + write(msg[2]); + } else { + // write this to the console with a new line + writeln(msg[2]); + } + } + + // Was this just console only output? + if ((msg[1] != "consoleOnlyNoNewLine") && (msg[1] != "consoleOnly")) { + // Write to the logfile only if configured to do so - console only items should not be written out + if (writeToFile) { + string logFileLine = format("[%s] %s", msg[0], msg[2]); + std.file.append(logFilePath, logFileLine ~ "\n"); + } + } + } + } } -private string getUserName() -{ - auto pw = getpwuid(getuid); - - // get required details - auto runtime_pw_name = pw.pw_name[0 .. strlen(pw.pw_name)].splitter(','); - auto runtime_pw_uid = pw.pw_uid; - auto runtime_pw_gid = pw.pw_gid; - - // user identifiers from process - vdebug("Process ID: ", pw); - vdebug("User UID: ", runtime_pw_uid); - vdebug("User GID: ", runtime_pw_gid); - - // What should be returned as username? - if (!runtime_pw_name.empty && runtime_pw_name.front.length){ - // user resolved - vdebug("User Name: ", runtime_pw_name.front.idup); - return runtime_pw_name.front.idup; - } else { - // Unknown user? - vdebug("User Name: unknown"); - return "unknown"; - } +// Function to initialize the logging system +void initialiseLogging(bool verboseLogging = false, bool debugLogging = false) { + logBuffer = cast(shared) new LogBuffer(verboseLogging, debugLogging); } -void displayMemoryUsagePreGC() -{ -// Display memory usage -writeln("\nMemory Usage pre GC (bytes)"); -writeln("--------------------"); -writeln("memory usedSize = ", GC.stats.usedSize); -writeln("memory freeSize = ", GC.stats.freeSize); -// uncomment this if required, if not using LDC 1.16 as this does not exist in that version -//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n"); +// Function to add a log entry with multiple levels +void addLogEntry(string message = "", string[] levels = ["info"]) { + logBuffer.logThisMessage(message, levels); } -void displayMemoryUsagePostGC() -{ -// Display memory usage -writeln("\nMemory Usage post GC (bytes)"); -writeln("--------------------"); -writeln("memory usedSize = ", GC.stats.usedSize); -writeln("memory freeSize = ", GC.stats.freeSize); -// uncomment this if required, if not using LDC 1.16 as this does not exist in that version -//writeln("memory allocatedInCurrentThread = ", GC.stats.allocatedInCurrentThread, "\n"); +// Function to set logFilePath and enable logging to a file +void enableLogFileOutput(string configuredLogFilePath) { + logBuffer.logFilePath = configuredLogFilePath; + logBuffer.writeToFile = true; } + +void disableGUINotifications(bool userConfigDisableNotifications) { + logBuffer.sendGUINotification = userConfigDisableNotifications; +} \ No newline at end of file diff --git a/src/main.d b/src/main.d index 688cd1d57..120aaccfa 100644 --- a/src/main.d +++ b/src/main.d @@ -1,2094 +1,1381 @@ +// What is this module called? +module main; + +// What does this module require to function? import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; -import core.memory, core.time, core.thread; -import std.getopt, std.file, std.path, std.process, std.stdio, std.conv, std.algorithm.searching, std.string, std.regex; -import config, itemdb, monitor, onedrive, selective, sync, util; -import std.net.curl: CurlException; import core.stdc.signal; -import std.traits, std.format; -import std.concurrency: receiveTimeout; +import core.memory; +import core.time; +import core.thread; +import std.stdio; +import std.getopt; +import std.string; +import std.file; +import std.process; +import std.algorithm; +import std.path; +import std.concurrency; +import std.parallelism; +import std.conv; +import std.traits; +import std.net.curl: CurlException; import std.datetime; -static import log; - -OneDriveApi oneDrive; -ItemDatabase itemDb; -bool onedriveInitialised = false; -const int EXIT_UNAUTHORIZED = 3; +// What other modules that we have created do we need to import? +import config; +import log; +import curlEngine; +import util; +import onedrive; +import syncEngine; +import itemdb; +import clientSideFiltering; +import monitor; + +// What other constant variables do we require? const int EXIT_RESYNC_REQUIRED = 126; -enum MONITOR_LOG_SILENT = 2; -enum MONITOR_LOG_QUIET = 1; -enum LOG_NORMAL = 0; - -int main(string[] args) -{ - // Disable buffering on stdout +// Class objects +ApplicationConfig appConfig; +OneDriveApi oneDriveApiInstance; +SyncEngine syncEngineInstance; +ItemDatabase itemDB; +ClientSideFiltering selectiveSync; +Monitor filesystemMonitor; + +int main(string[] cliArgs) { + // Application Start Time - used during monitor loop to detail how long it has been running for + auto applicationStartTime = Clock.currTime(); + // Disable buffering on stdout - this is needed so that when we are using plain write() it will go to the terminal without flushing stdout.setvbuf(0, _IONBF); - - // main function variables - string confdirOption; - string configFilePath; - string syncListFilePath; - string databaseFilePath; - string businessSharedFolderFilePath; - string currentConfigHash; - string currentSyncListHash; - string previousConfigHash; - string previousSyncListHash; - string configHashFile; - string syncListHashFile; - string configBackupFile; - string syncDir; - string logOutputMessage; - string currentBusinessSharedFoldersHash; - string previousBusinessSharedFoldersHash; - string businessSharedFoldersHashFile; - string databaseFilePathDryRunGlobal; - bool configOptionsDifferent = false; - bool businessSharedFoldersDifferent = false; - bool syncListConfigured = false; - bool syncListDifferent = false; - bool syncDirDifferent = false; - bool skipFileDifferent = false; - bool skipDirDifferent = false; + + // Required main function variables + string genericHelpMessage = "Please use 'onedrive --help' for further assistance in regards to running this application."; + // If the user passes in --confdir we need to store this as a variable + string confdirOption = ""; + // running as what user? + string runtimeUserName = ""; + // Are we online? bool online = false; - bool performSyncOK = false; + // Does the operating environment have shell environment variables set + bool shellEnvSet = false; + // What is the runtime syncronisation directory that will be used + // Typically this will be '~/OneDrive' .. however tilde expansion is unreliable + string runtimeSyncDirectory = ""; + // Configure the runtime database file path. Typically this will be the default, but in a --dry-run scenario, we use a separate database file + string runtimeDatabaseFile = ""; + // Verbosity Logging Count - this defines if verbose or debug logging is being used + long verbosityCount = 0; + // Application Logging Level + bool verboseLogging = false; + bool debugLogging = false; + // Monitor loop failures + bool monitorFailures = false; + + // DEVELOPER OPTIONS OUTPUT VARIABLES bool displayMemoryUsage = false; bool displaySyncOptions = false; - bool cleanupLocalFilesGlobal = false; - bool synchronizeConfigured = false; - bool invalidSyncExit = false; - // start and finish messages - string startMessage = "Starting a sync with OneDrive"; - string finishMessage = "Sync with OneDrive is complete"; - string helpMessage = "Please use 'onedrive --help' for further assistance in regards to running this application."; + // Application Version + //immutable string applicationVersion = "onedrive " ~ strip(import("version")); + immutable string applicationVersion = "v2.5.0-alpha-5" ~ " GitHub version: " ~ strip(import("version")); - // hash file permission values - string hashPermissionValue = "600"; - auto convertedPermissionValue = parse!long(hashPermissionValue, 8); - - // Define scopes + // Define 'exit' and 'failure' scopes scope(exit) { - // detail what scope was called - log.vdebug("Exit scope called"); - if (synchronizeConfigured) { - log.log(finishMessage); - } - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePreGC(); - } - // if initialised, shut down the HTTP instance - if (onedriveInitialised) { - oneDrive.shutdown(); - } - // was itemDb initialised? - if (itemDb !is null) { - // Make sure the .wal file is incorporated into the main db before we exit - if(!invalidSyncExit) { - itemDb.performVacuum(); - } - destroy(itemDb); - } - // cleanup any dry-run data - cleanupDryRunDatabase(databaseFilePathDryRunGlobal); - // free API instance - if (oneDrive !is null) { - destroy(oneDrive); - } - // Perform Garbage Cleanup - GC.collect(); - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePostGC(); - } + // Detail what scope was called + addLogEntry("Exit scope was called", ["debug"]); + // Perform exit tasks + performStandardExitProcess("exitScope"); } - + scope(failure) { - // detail what scope was called - log.vdebug("Failure scope called"); - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePreGC(); - } - // if initialised, shut down the HTTP instance - if (onedriveInitialised) { - oneDrive.shutdown(); - } - // was itemDb initialised? - if (itemDb !is null) { - // Make sure the .wal file is incorporated into the main db before we exit - if(!invalidSyncExit) { - itemDb.performVacuum(); - } - destroy(itemDb); - } - // cleanup any dry-run data - cleanupDryRunDatabase(databaseFilePathDryRunGlobal); - // free API instance - if (oneDrive !is null) { - destroy(oneDrive); - } - // Perform Garbage Cleanup - GC.collect(); - // Display memory details - if (displayMemoryUsage) { - log.displayMemoryUsagePostGC(); - } + // Detail what scope was called + addLogEntry("Failure scope was called", ["debug"]); + // Perform exit tasks + performStandardExitProcess("failureScope"); } - - // read in application options as passed in + + // Read in application options as passed in try { bool printVersion = false; - auto opt = getopt( - args, + auto cliOptions = getopt( + cliArgs, std.getopt.config.passThrough, std.getopt.config.bundling, std.getopt.config.caseSensitive, "confdir", "Set the directory used to store the configuration files", &confdirOption, - "verbose|v+", "Print more details, useful for debugging (repeat for extra debugging)", &log.verbose, + "verbose|v+", "Print more details, useful for debugging (repeat for extra debugging)", &verbosityCount, "version", "Print the version and exit", &printVersion ); - // print help and exit - if (opt.helpWanted) { - args ~= "--help"; + // Print help and exit + if (cliOptions.helpWanted) { + cliArgs ~= "--help"; } - // print the version and exit + // Print the version and exit if (printVersion) { - writeln("onedrive ", strip(import("version"))); - return EXIT_SUCCESS; + writeln(applicationVersion); + exit(EXIT_SUCCESS); } } catch (GetOptException e) { - // option errors - log.error(e.msg); - log.error("Try 'onedrive --help' for more information"); + // Option errors + writeln(e.msg); + writeln(genericHelpMessage); return EXIT_FAILURE; } catch (Exception e) { - // generic error - log.error(e.msg); - log.error("Try 'onedrive --help' for more information"); + // Generic error + writeln(e.msg); + writeln(genericHelpMessage); return EXIT_FAILURE; } - - // confdirOption must be a directory, not a file - // - By default ~/.config/onedrive will be used - // - If the user is using --confdir , the confdirOption needs to be evaluated when trying to load any file - // load configuration file if available - auto cfg = new config.Config(confdirOption); - if (!cfg.initialize()) { - // There was an error loading the configuration + + // Determine the application logging verbosity + if (verbosityCount == 1) { verboseLogging = true;} + if (verbosityCount >= 2) { debugLogging = true;} + + // Initialize the application logging class, as we know the application verbosity level + // If we need to enable logging to a file, we can only do this once we know the application configuration which is done slightly later on + initialiseLogging(verboseLogging, debugLogging); + + /** + // most used + addLogEntry("Basic 'info' message", ["info"]); .... or just use addLogEntry("Basic 'info' message"); + addLogEntry("Basic 'verbose' message", ["verbose"]); + addLogEntry("Basic 'debug' message", ["debug"]); + // GUI notify only + addLogEntry("Basic 'notify' ONLY message and displayed in GUI if notifications are enabled", ["notify"]); + // info and notify + addLogEntry("Basic 'info and notify' message and displayed in GUI if notifications are enabled", ["info", "notify"]); + // log file only + addLogEntry("Information sent to the log file only, and only if logging to a file is enabled", ["logFileOnly"]); + // Console only (session based upload|download) + addLogEntry("Basic 'Console only with new line' message", ["consoleOnly"]); + // Console only with no new line + addLogEntry("Basic 'Console only with no new line' message", ["consoleOnlyNoNewLine"]); + **/ + + // Log application start time + addLogEntry("Application started", ["debug"]); + + // Who are we running as? This will print the ProcessID, UID, GID and username the application is running as + runtimeUserName = getUserName(); + + // Print in debug the application version as soon as possible + addLogEntry("Application Version: " ~ applicationVersion, ["debug"]); + + // How was this application started - what options were passed in + addLogEntry("Passed in 'cliArgs': " ~ to!string(cliArgs), ["debug"]); + addLogEntry("Note: --confdir and --verbose are not listed in 'cliArgs' array", ["debug"]); + addLogEntry("Passed in --confdir if present: " ~ confdirOption, ["debug"]); + addLogEntry("Passed in --verbose count if present: " ~ to!string(verbosityCount), ["debug"]); + + // Create a new AppConfig object with default values, + appConfig = new ApplicationConfig(); + // Update the default application configuration with the logging level so these can be used as a config option throughout the application + appConfig.setConfigLoggingLevels(verboseLogging, debugLogging, verbosityCount); + + // Initialise the application configuration, utilising --confdir if it was passed in + // Otherwise application defaults will be used to configure the application + if (!appConfig.initialise(confdirOption)) { + // There was an error loading the user specified application configuration // Error message already printed return EXIT_FAILURE; } - // How was this application started - what options were passed in - log.vdebug("passed in options: ", args); - log.vdebug("note --confdir and --verbose not listed in args"); - - // set memory display - displayMemoryUsage = cfg.getValueBool("display_memory"); - - // set display sync options - displaySyncOptions = cfg.getValueBool("display_sync_options"); - - // update configuration from command line args - cfg.update_from_args(args); + // Update the current runtime application configuration (default or 'config' fileread-in options) from any passed in command line arguments + appConfig.updateFromArgs(cliArgs); - // --resync should be a 'last resort item' .. the user needs to 'accept' to proceed - if ((cfg.getValueBool("resync")) && (!cfg.getValueBool("display_config"))) { - // what is the risk acceptance? - bool resyncRiskAcceptance = false; + // As early as possible, now re-configure the logging class, given that we have read in any applicable 'config' file and updated the application running config from CLI input: + // - Enable logging to a file if this is required + // - Disable GUI notifications if this has been configured - if (!cfg.getValueBool("resync_auth")) { - // need to prompt user - char response; - // warning message - writeln("\nThe use of --resync will remove your local 'onedrive' client state, thus no record will exist regarding your current 'sync status'"); - writeln("This has the potential to overwrite local versions of files with potentially older versions downloaded from OneDrive which can lead to data loss"); - writeln("If in-doubt, backup your local data first before proceeding with --resync"); - write("\nAre you sure you wish to proceed with --resync? [Y/N] "); - - try { - // Attempt to read user response - readf(" %c\n", &response); - } catch (std.format.FormatException e) { - // Caught an error - return EXIT_FAILURE; - } - - // Evaluate user repsonse - if ((to!string(response) == "y") || (to!string(response) == "Y")) { - // User has accepted --resync risk to proceed - resyncRiskAcceptance = true; - // Are you sure you wish .. does not use writeln(); - write("\n"); - } + // Configure application logging to a log file only if this has been enabled + // This is the earliest point that this can be done, as the client configuration has been read in, and any CLI arguments have been processed. + // Either of those ('confif' file, CPU arguments) could be enabling logging, thus this is the earliest point at which this can be validated and enabled. + // The buffered logging also ensures that all 'output' to this point is also captured and written out to the log file + if (appConfig.getValueBool("enable_logging")) { + // Calculate the application logging directory + string calculatedLogDirPath = appConfig.calculateLogDirectory(); + string calculatedLogFilePath; + // Initialise using the configured logging directory + addLogEntry("Using the following path to store the runtime application log: " ~ calculatedLogDirPath, ["verbose"]); + // Calculate the logfile name + if (calculatedLogDirPath != appConfig.defaultHomePath) { + // Log file is not going to the home directory + string logfileName = runtimeUserName ~ ".onedrive.log"; + calculatedLogFilePath = buildNormalizedPath(buildPath(calculatedLogDirPath, logfileName)); } else { - // resync_auth is true - resyncRiskAcceptance = true; - } - - // Action based on response - if (!resyncRiskAcceptance){ - // --resync risk not accepted - return EXIT_FAILURE; + // Log file is going to the users home directory + calculatedLogFilePath = buildNormalizedPath(buildPath(calculatedLogDirPath, "onedrive.log")); } + // Update the logging class to use 'calculatedLogFilePath' for the application log file now that this has been determined + enableLogFileOutput(calculatedLogFilePath); } - - // Initialise normalised file paths - configFilePath = buildNormalizedPath(cfg.configDirName ~ "/config"); - syncListFilePath = buildNormalizedPath(cfg.configDirName ~ "/sync_list"); - databaseFilePath = buildNormalizedPath(cfg.configDirName ~ "/items.db"); - businessSharedFolderFilePath = buildNormalizedPath(cfg.configDirName ~ "/business_shared_folders"); - - // Has any of our configuration that would require a --resync been changed? - // 1. sync_list file modification - // 2. config file modification - but only if sync_dir, skip_dir, skip_file or drive_id was modified - // 3. CLI input overriding configured config file option - configHashFile = buildNormalizedPath(cfg.configDirName ~ "/.config.hash"); - syncListHashFile = buildNormalizedPath(cfg.configDirName ~ "/.sync_list.hash"); - configBackupFile = buildNormalizedPath(cfg.configDirName ~ "/.config.backup"); - businessSharedFoldersHashFile = buildNormalizedPath(cfg.configDirName ~ "/.business_shared_folders.hash"); - - // Does a 'config' file exist with a valid hash file - if (exists(configFilePath)) { - if (!exists(configHashFile)) { - // hash of config file needs to be created, but only if we are not in a --resync scenario - if (!cfg.getValueBool("resync")) { - std.file.write(configHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - configHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } else { - // no 'config' file exists, application defaults being used, no hash file required - if (exists(configHashFile)) { - // remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different - if (cfg.getValueBool("resync")) { - // resync issued, remove hash files - safeRemove(configHashFile); - safeRemove(configBackupFile); - } - } - } - - // Does a 'sync_list' file exist with a valid hash file - if (exists(syncListFilePath)) { - if (!exists(syncListHashFile)) { - // hash of config file needs to be created, but only if we are not in a --resync scenario - if (!cfg.getValueBool("resync")) { - std.file.write(syncListHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - syncListHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } else { - // no 'sync_list' file exists, no hash file required - if (exists(syncListHashFile)) { - // remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different - if (cfg.getValueBool("resync")) { - // resync issued, remove hash files - safeRemove(syncListHashFile); - } - } - } - - // Does a 'business_shared_folders' file exist with a valid hash file - if (exists(businessSharedFolderFilePath)) { - if (!exists(businessSharedFoldersHashFile)) { - // hash of config file needs to be created, but only if we are not in a --resync scenario - if (!cfg.getValueBool("resync")) { - std.file.write(businessSharedFoldersHashFile, "initial-hash"); - // Hash file should only be readable by the user who created it - 0600 permissions needed - businessSharedFoldersHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } else { - // no 'business_shared_folders' file exists, no hash file required - if (exists(businessSharedFoldersHashFile)) { - // remove the hash, but only if --resync was issued as now the application will use 'defaults' which 'may' be different - if (cfg.getValueBool("resync")) { - // resync issued, remove hash files - safeRemove(businessSharedFoldersHashFile); - } - } - } - - // Generate current hashes for the relevant configuration files if they exist - if (exists(configFilePath)) currentConfigHash = computeQuickXorHash(configFilePath); - if (exists(syncListFilePath)) currentSyncListHash = computeQuickXorHash(syncListFilePath); - if (exists(businessSharedFolderFilePath)) currentBusinessSharedFoldersHash = computeQuickXorHash(businessSharedFolderFilePath); - // read the existing hashes for each of the relevant configuration files if they exist - if (exists(configHashFile)) { - try { - previousConfigHash = readText(configHashFile); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - if (exists(syncListHashFile)) { - try { - previousSyncListHash = readText(syncListHashFile); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - if (exists(businessSharedFoldersHashFile)) { - try { - previousBusinessSharedFoldersHash = readText(businessSharedFoldersHashFile); - } catch (std.file.FileException e) { - // Unable to access required file - log.error("ERROR: Unable to access ", e.msg); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } + // Disable GUI Notifications if configured to do so + // - This option is reverse action. If 'disable_notifications' is 'true', we need to send 'false' + if (appConfig.getValueBool("disable_notifications")) { + // disable_notifications is true, ensure GUI notifications is initialised with false so that NO GUI notification is sent + disableGUINotifications(false); + addLogEntry("Disabling GUI notifications as per user configuration"); } - - // Was sync_list file updated? - if (currentSyncListHash != previousSyncListHash) { - // Debugging output to assist what changed - log.vdebug("sync_list file has been updated, --resync needed"); - syncListDifferent = true; - } - - // Was business_shared_folders updated? - if (currentBusinessSharedFoldersHash != previousBusinessSharedFoldersHash) { - // Debugging output to assist what changed - log.vdebug("business_shared_folders file has been updated, --resync needed"); - businessSharedFoldersDifferent = true; + + // Perform a depreciated options check now that the config file (if present) and CLI options have all been parsed to advise the user that their option usage might change + appConfig.checkDepreciatedOptions(cliArgs); + + // Configure Client Side Filtering (selective sync) by parsing and getting a usable regex for skip_file, skip_dir and sync_list config components + selectiveSync = new ClientSideFiltering(appConfig); + if (!selectiveSync.initialise()) { + // exit here as something triggered a selective sync configuration failure + return EXIT_FAILURE; } - - // Was config file updated between last execution ang this execution? - if (currentConfigHash != previousConfigHash) { - // config file was updated, however we only want to trigger a --resync requirement if sync_dir, skip_dir, skip_file or drive_id was modified - if (!cfg.getValueBool("display_config")){ - // only print this message if we are not using --display-config - log.log("config file has been updated, checking if --resync needed"); - } - if (exists(configBackupFile)) { - // check backup config what has changed for these configuration options if anything - // # sync_dir = "~/OneDrive" - // # skip_file = "~*|.~*|*.tmp" - // # skip_dir = "" - // # drive_id = "" - string[string] stringValues; - stringValues["sync_dir"] = ""; - stringValues["skip_file"] = ""; - stringValues["skip_dir"] = ""; - stringValues["drive_id"] = ""; - auto configBackupFileHandle = File(configBackupFile, "r"); - string lineBuffer; - auto range = configBackupFileHandle.byLine(); - // read configBackupFile line by line - foreach (line; range) { - lineBuffer = stripLeft(line).to!string; - if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; - auto c = lineBuffer.matchFirst(cfg.configRegex); - if (!c.empty) { - c.popFront(); // skip the whole match - string key = c.front.dup; - auto p = key in stringValues; - if (p) { - c.popFront(); - // compare this key - if ((key == "sync_dir") && (c.front.dup != cfg.getValueString("sync_dir"))) { - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - - if ((key == "skip_file") && (c.front.dup != cfg.getValueString("skip_file"))){ - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - if ((key == "skip_dir") && (c.front.dup != cfg.getValueString("skip_dir"))){ - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - if ((key == "drive_id") && (c.front.dup != cfg.getValueString("drive_id"))){ - log.vdebug(key, " was modified since the last time the application was successfully run, --resync needed"); - configOptionsDifferent = true; - } - } - } - } - // close file if open - if (configBackupFileHandle.isOpen()){ - // close open file - configBackupFileHandle.close(); - } - } else { - // no backup to check - log.vdebug("WARNING: no backup config file was found, unable to validate if any changes made"); - } - - // If there was a backup, any modified values we need to worry about would been detected - if (!cfg.getValueBool("display_config")) { - // we are not testing the configuration - if (!configOptionsDifferent) { - // no options are different - if (!cfg.getValueBool("dry_run")) { - // we are not in a dry-run scenario - // update config hash - log.vdebug("updating config hash as it is out of date"); - std.file.write(configHashFile, computeQuickXorHash(configFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - configHashFile.setAttributes(to!int(convertedPermissionValue)); - // create backup copy of current config file - log.vdebug("making backup of config file as it is out of date"); - std.file.copy(configFilePath, configBackupFile); - // File Copy should only be readable by the user who created it - 0600 permissions needed - configBackupFile.setAttributes(to!int(convertedPermissionValue)); - } - } + + // Set runtimeDatabaseFile, this will get updated if we are using --dry-run + runtimeDatabaseFile = appConfig.databaseFilePath; + + // Read in 'sync_dir' from appConfig with '~' if present expanded + runtimeSyncDirectory = appConfig.initialiseRuntimeSyncDirectory(); + + // DEVELOPER OPTIONS OUTPUT + // Set to display memory details as early as possible + displayMemoryUsage = appConfig.getValueBool("display_memory"); + // set to display sync options + displaySyncOptions = appConfig.getValueBool("display_sync_options"); + + // Display the current application configuration (based on all defaults, 'config' file parsing and/or options passed in via the CLI) and exit if --display-config has been used + if ((appConfig.getValueBool("display_config")) || (appConfig.getValueBool("display_running_config"))) { + // Display the application configuration + appConfig.displayApplicationConfiguration(); + // Do we exit? We exit only if '--display-config' has been used + if (appConfig.getValueBool("display_config")) { + return EXIT_SUCCESS; } } - - // Is there a backup of the config file if the config file exists? - if ((exists(configFilePath)) && (!exists(configBackupFile))) { - // create backup copy of current config file - std.file.copy(configFilePath, configBackupFile); - // File Copy should only be readable by the user who created it - 0600 permissions needed - configBackupFile.setAttributes(to!int(convertedPermissionValue)); - } - - // config file set options can be changed via CLI input, specifically these will impact sync and --resync will be needed: - // --syncdir ARG - // --skip-file ARG - // --skip-dir ARG - if (exists(configFilePath)) { - // config file exists - // was the sync_dir updated by CLI? - if (cfg.configFileSyncDir != "") { - // sync_dir was set in config file - if (cfg.configFileSyncDir != cfg.getValueString("sync_dir")) { - // config file was set and CLI input changed this - log.vdebug("sync_dir: CLI override of config file option, --resync needed"); - syncDirDifferent = true; - } - } - - // was the skip_file updated by CLI? - if (cfg.configFileSkipFile != "") { - // skip_file was set in config file - if (cfg.configFileSkipFile != cfg.getValueString("skip_file")) { - // config file was set and CLI input changed this - log.vdebug("skip_file: CLI override of config file option, --resync needed"); - skipFileDifferent = true; - } - } - - // was the skip_dir updated by CLI? - if (cfg.configFileSkipDir != "") { - // skip_dir was set in config file - if (cfg.configFileSkipDir != cfg.getValueString("skip_dir")) { - // config file was set and CLI input changed this - log.vdebug("skip_dir: CLI override of config file option, --resync needed"); - skipDirDifferent = true; - } - } + + // Check for basic application option conflicts - flags that should not be used together and/or flag combinations that conflict with each other, values that should be present and are not + if (appConfig.checkForBasicOptionConflicts) { + // Any error will have been printed by the function itself, but we need a small delay here to allow the buffered logging to output any error + return EXIT_FAILURE; } - - // Has anything triggered a --resync requirement? - if (configOptionsDifferent || syncListDifferent || syncDirDifferent || skipFileDifferent || skipDirDifferent || businessSharedFoldersDifferent) { - // --resync needed, is the user performing any operation where a --resync is not required? - // flag to ignore --resync requirement - bool ignoreResyncRequirement = false; - // These flags do not need --resync as no sync operation is needed: --display-config, --list-shared-folders, --get-O365-drive-id, --get-file-link - if (cfg.getValueBool("display_config")) ignoreResyncRequirement = true; - if (cfg.getValueBool("list_business_shared_folders")) ignoreResyncRequirement = true; - if ((!cfg.getValueString("get_o365_drive_id").empty)) ignoreResyncRequirement = true; - if ((!cfg.getValueString("get_file_link").empty)) ignoreResyncRequirement = true; + + // Check for --dry-run operation + // If this has been requested, we need to ensure that all actions are performed against the dry-run database copy, and, + // no actual action takes place - such as deleting files if deleted online, moving files if moved online or local, downloading new & changed files, uploading new & changed files + if (appConfig.getValueBool("dry_run")) { + // this is a --dry-run operation + addLogEntry("DRY-RUN Configured. Output below shows what 'would' have occurred."); - // Do we need to ignore a --resync requirement? - if (!ignoreResyncRequirement) { - // We are not ignoring --requirement - if (!cfg.getValueBool("resync")) { - // --resync not issued, fail fast - log.error("An application configuration change has been detected where a --resync is required"); - return EXIT_RESYNC_REQUIRED; - } else { - // --resync issued, update hashes of config files if they exist - if (!cfg.getValueBool("dry_run")) { - // not doing a dry run, update hash files if config & sync_list exist - if (exists(configFilePath)) { - // update hash - log.vdebug("updating config hash as --resync issued"); - std.file.write(configHashFile, computeQuickXorHash(configFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - configHashFile.setAttributes(to!int(convertedPermissionValue)); - // create backup copy of current config file - log.vdebug("making backup of config file as --resync issued"); - std.file.copy(configFilePath, configBackupFile); - // File copy should only be readable by the user who created it - 0600 permissions needed - configBackupFile.setAttributes(to!int(convertedPermissionValue)); - } - if (exists(syncListFilePath)) { - // update sync_list hash - log.vdebug("updating sync_list hash as --resync issued"); - std.file.write(syncListHashFile, computeQuickXorHash(syncListFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - syncListHashFile.setAttributes(to!int(convertedPermissionValue)); - } - if (exists(businessSharedFolderFilePath)) { - // update business_shared_folders hash - log.vdebug("updating business_shared_folders hash as --resync issued"); - std.file.write(businessSharedFoldersHashFile, computeQuickXorHash(businessSharedFolderFilePath)); - // Hash file should only be readable by the user who created it - 0600 permissions needed - businessSharedFoldersHashFile.setAttributes(to!int(convertedPermissionValue)); - } - } - } - } - } - - // --dry-run operation notification and database setup - // Are we performing any of the following operations? - // --dry-run, --list-shared-folders, --get-O365-drive-id, --get-file-link - if ((cfg.getValueBool("dry_run")) || (cfg.getValueBool("list_business_shared_folders")) || (!cfg.getValueString("get_o365_drive_id").empty) || (!cfg.getValueString("get_file_link").empty)) { - // is this a --list-shared-folders, --get-O365-drive-id, --get-file-link operation - if (cfg.getValueBool("dry_run")) { - // this is a --dry-run operation - log.log("DRY-RUN Configured. Output below shows what 'would' have occurred."); - } else { - // is this a --list-shared-folders, --get-O365-drive-id, --get-file-link operation - log.log("Using dry-run database copy for OneDrive API query"); - } - // configure databaseFilePathDryRunGlobal - databaseFilePathDryRunGlobal = cfg.databaseFilePathDryRun; + // Cleanup any existing dry-run elements ... these should never be left hanging around + cleanupDryRunDatabaseFiles(appConfig.databaseFilePathDryRun); - string dryRunShmFile = databaseFilePathDryRunGlobal ~ "-shm"; - string dryRunWalFile = databaseFilePathDryRunGlobal ~ "-wal"; - // If the dry run database exists, clean this up - if (exists(databaseFilePathDryRunGlobal)) { - // remove the existing file - log.vdebug("Removing items-dryrun.sqlite3 as it still exists for some reason"); - safeRemove(databaseFilePathDryRunGlobal); - } - // silent cleanup of shm and wal files if they exist - if (exists(dryRunShmFile)) { - // remove items-dryrun.sqlite3-shm - safeRemove(dryRunShmFile); - } - if (exists(dryRunWalFile)) { - // remove items-dryrun.sqlite3-wal - safeRemove(dryRunWalFile); - } - // Make a copy of the original items.sqlite3 for use as the dry run copy if it exists - if (exists(cfg.databaseFilePath)) { - // in a --dry-run --resync scenario, we should not copy the existing database file - if (!cfg.getValueBool("resync")) { - // copy the existing DB file to the dry-run copy - log.vdebug("Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations"); - copy(cfg.databaseFilePath,databaseFilePathDryRunGlobal); - } else { - // no database copy due to --resync - log.vdebug("No database copy created for --dry-run due to --resync also being used"); - } - } - } - - // sync_dir environment handling to handle ~ expansion properly - bool shellEnvSet = false; - if ((environment.get("SHELL") == "") && (environment.get("USER") == "")){ - log.vdebug("sync_dir: No SHELL or USER environment variable configuration detected"); - // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker - // Does the 'currently configured' sync_dir include a ~ - if (canFind(cfg.getValueString("sync_dir"), "~")) { - // A ~ was found in sync_dir - log.vdebug("sync_dir: A '~' was found in sync_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); - syncDir = cfg.homePath ~ strip(cfg.getValueString("sync_dir"), "~"); - } else { - // No ~ found in sync_dir, use as is - log.vdebug("sync_dir: Getting syncDir from config value sync_dir"); - syncDir = cfg.getValueString("sync_dir"); - } - } else { - // A shell and user is set, expand any ~ as this will be expanded correctly if present - shellEnvSet = true; - log.vdebug("sync_dir: Getting syncDir from config value sync_dir"); - if (canFind(cfg.getValueString("sync_dir"), "~")) { - log.vdebug("sync_dir: A '~' was found in configured sync_dir, automatically expanding as SHELL and USER environment variable is set"); - syncDir = expandTilde(cfg.getValueString("sync_dir")); - } else { - syncDir = cfg.getValueString("sync_dir"); - } - } - - // vdebug syncDir as set and calculated - log.vdebug("syncDir: ", syncDir); - - // Configure the logging directory if different from application default - // log_dir environment handling to handle ~ expansion properly - string logDir = cfg.getValueString("log_dir"); - if (logDir != cfg.defaultLogFileDir) { - // user modified log_dir entry - // if 'log_dir' contains a '~' this needs to be expanded correctly - if (canFind(cfg.getValueString("log_dir"), "~")) { - // ~ needs to be expanded correctly - if (!shellEnvSet) { - // No shell or user set, so expandTilde() will fail - usually headless system running under init.d / systemd or potentially Docker - log.vdebug("log_dir: A '~' was found in log_dir, using the calculated 'homePath' to replace '~' as no SHELL or USER environment variable set"); - logDir = cfg.homePath ~ strip(cfg.getValueString("log_dir"), "~"); + if (exists(appConfig.databaseFilePath)) { + // In a --dry-run --resync scenario, we should not copy the existing database file + if (!appConfig.getValueBool("resync")) { + // Copy the existing DB file to the dry-run copy + addLogEntry("DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations"); + copy(appConfig.databaseFilePath,appConfig.databaseFilePathDryRun); } else { - // A shell and user is set, expand any ~ as this will be expanded correctly if present - log.vdebug("log_dir: A '~' was found in log_dir, using SHELL or USER environment variable to expand '~'"); - logDir = expandTilde(cfg.getValueString("log_dir")); + // No database copy due to --resync + addLogEntry("DRY-RUN: No database copy created for --dry-run due to --resync also being used"); } - } else { - // '~' not found in log_dir entry, use as is - logDir = cfg.getValueString("log_dir"); - } - // update log_dir with normalised path, with '~' expanded correctly - cfg.setValueString("log_dir", logDir); - } - - // Configure logging only if enabled - if (cfg.getValueBool("enable_logging")){ - // Initialise using the configured logging directory - log.vlog("Using logfile dir: ", logDir); - log.init(logDir); - } - - // Configure whether notifications are used - log.setNotifications(cfg.getValueBool("monitor") && !cfg.getValueBool("disable_notifications")); - - // Application upgrades - skilion version etc - if (exists(databaseFilePath)) { - if (!cfg.getValueBool("dry_run")) { - safeRemove(databaseFilePath); } - log.logAndNotify("Database schema changed, resync needed"); - cfg.setValueBool("resync", true); + // update runtimeDatabaseFile now that we are using the dry run path + runtimeDatabaseFile = appConfig.databaseFilePathDryRun; } - + // Handle --logout as separate item, do not 'resync' on a --logout - if (cfg.getValueBool("logout")) { - log.vdebug("--logout requested"); - log.log("Deleting the saved authentication status ..."); - if (!cfg.getValueBool("dry_run")) { - safeRemove(cfg.refreshTokenFilePath); + if (appConfig.getValueBool("logout")) { + addLogEntry("--logout requested", ["debug"]); + addLogEntry("Deleting the saved authentication status ..."); + if (!appConfig.getValueBool("dry_run")) { + safeRemove(appConfig.refreshTokenFilePath); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not removing the saved authentication status"); } // Exit return EXIT_SUCCESS; } // Handle --reauth to re-authenticate the client - if (cfg.getValueBool("reauth")) { - log.vdebug("--reauth requested"); - log.log("Deleting the saved authentication status ... re-authentication requested"); - if (!cfg.getValueBool("dry_run")) { - safeRemove(cfg.refreshTokenFilePath); + if (appConfig.getValueBool("reauth")) { + addLogEntry("--reauth requested", ["debug"]); + addLogEntry("Deleting the saved authentication status ... re-authentication requested"); + if (!appConfig.getValueBool("dry_run")) { + safeRemove(appConfig.refreshTokenFilePath); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not removing the saved authentication status"); } } - // Display current application configuration - if ((cfg.getValueBool("display_config")) || (cfg.getValueBool("display_running_config"))) { - if (cfg.getValueBool("display_running_config")) { - writeln("--------------- Application Runtime Configuration ---------------"); - } - - // Display application version - writeln("onedrive version = ", strip(import("version"))); - // Display all of the pertinent configuration options - writeln("Config path = ", cfg.configDirName); - // Does a config file exist or are we using application defaults - writeln("Config file found in config path = ", exists(configFilePath)); - - // Is config option drive_id configured? - if (cfg.getValueString("drive_id") != ""){ - writeln("Config option 'drive_id' = ", cfg.getValueString("drive_id")); - } - - // Config Options as per 'config' file - writeln("Config option 'sync_dir' = ", syncDir); - - // logging and notifications - writeln("Config option 'enable_logging' = ", cfg.getValueBool("enable_logging")); - writeln("Config option 'log_dir' = ", cfg.getValueString("log_dir")); - writeln("Config option 'disable_notifications' = ", cfg.getValueBool("disable_notifications")); - writeln("Config option 'min_notify_changes' = ", cfg.getValueLong("min_notify_changes")); + // --resync should be considered a 'last resort item' or if the application configuration has changed, where a resync is needed .. the user needs to 'accept' this warning to proceed + // If --resync has not been used (bool value is false), check the application configuration for 'changes' that require a --resync to ensure that the data locally reflects the users requested configuration + if (appConfig.getValueBool("resync")) { + // what is the risk acceptance for --resync? + bool resyncRiskAcceptance = appConfig.displayResyncRiskForAcceptance(); + addLogEntry("Returned --resync risk acceptance: " ~ resyncRiskAcceptance, ["debug"]); - // skip files and directory and 'matching' policy - writeln("Config option 'skip_dir' = ", cfg.getValueString("skip_dir")); - writeln("Config option 'skip_dir_strict_match' = ", cfg.getValueBool("skip_dir_strict_match")); - writeln("Config option 'skip_file' = ", cfg.getValueString("skip_file")); - writeln("Config option 'skip_dotfiles' = ", cfg.getValueBool("skip_dotfiles")); - writeln("Config option 'skip_symlinks' = ", cfg.getValueBool("skip_symlinks")); - - // --monitor sync process options - writeln("Config option 'monitor_interval' = ", cfg.getValueLong("monitor_interval")); - writeln("Config option 'monitor_log_frequency' = ", cfg.getValueLong("monitor_log_frequency")); - writeln("Config option 'monitor_fullscan_frequency' = ", cfg.getValueLong("monitor_fullscan_frequency")); - - // sync process and method - writeln("Config option 'read_only_auth_scope' = ", cfg.getValueBool("read_only_auth_scope")); - writeln("Config option 'dry_run' = ", cfg.getValueBool("dry_run")); - writeln("Config option 'upload_only' = ", cfg.getValueBool("upload_only")); - writeln("Config option 'download_only' = ", cfg.getValueBool("download_only")); - writeln("Config option 'local_first' = ", cfg.getValueBool("local_first")); - writeln("Config option 'check_nosync' = ", cfg.getValueBool("check_nosync")); - writeln("Config option 'check_nomount' = ", cfg.getValueBool("check_nomount")); - writeln("Config option 'resync' = ", cfg.getValueBool("resync")); - writeln("Config option 'resync_auth' = ", cfg.getValueBool("resync_auth")); - writeln("Config option 'cleanup_local_files' = ", cfg.getValueBool("cleanup_local_files")); - - // data integrity - writeln("Config option 'classify_as_big_delete' = ", cfg.getValueLong("classify_as_big_delete")); - writeln("Config option 'disable_upload_validation' = ", cfg.getValueBool("disable_upload_validation")); - writeln("Config option 'bypass_data_preservation' = ", cfg.getValueBool("bypass_data_preservation")); - writeln("Config option 'no_remote_delete' = ", cfg.getValueBool("no_remote_delete")); - writeln("Config option 'remove_source_files' = ", cfg.getValueBool("remove_source_files")); - writeln("Config option 'sync_dir_permissions' = ", cfg.getValueLong("sync_dir_permissions")); - writeln("Config option 'sync_file_permissions' = ", cfg.getValueLong("sync_file_permissions")); - writeln("Config option 'space_reservation' = ", cfg.getValueLong("space_reservation")); - - // curl operations - writeln("Config option 'application_id' = ", cfg.getValueString("application_id")); - writeln("Config option 'azure_ad_endpoint' = ", cfg.getValueString("azure_ad_endpoint")); - writeln("Config option 'azure_tenant_id' = ", cfg.getValueString("azure_tenant_id")); - writeln("Config option 'user_agent' = ", cfg.getValueString("user_agent")); - writeln("Config option 'force_http_11' = ", cfg.getValueBool("force_http_11")); - writeln("Config option 'debug_https' = ", cfg.getValueBool("debug_https")); - writeln("Config option 'rate_limit' = ", cfg.getValueLong("rate_limit")); - writeln("Config option 'operation_timeout' = ", cfg.getValueLong("operation_timeout")); - writeln("Config option 'dns_timeout' = ", cfg.getValueLong("dns_timeout")); - writeln("Config option 'connect_timeout' = ", cfg.getValueLong("connect_timeout")); - writeln("Config option 'data_timeout' = ", cfg.getValueLong("data_timeout")); - writeln("Config option 'ip_protocol_version' = ", cfg.getValueLong("ip_protocol_version")); - - // Is sync_list configured ? - writeln("Config option 'sync_root_files' = ", cfg.getValueBool("sync_root_files")); - if (exists(syncListFilePath)){ - - writeln("Selective sync 'sync_list' configured = true"); - writeln("sync_list contents:"); - // Output the sync_list contents - auto syncListFile = File(syncListFilePath, "r"); - auto range = syncListFile.byLine(); - foreach (line; range) - { - writeln(line); - } + // Action based on user response + if (!resyncRiskAcceptance){ + // --resync risk not accepted + return EXIT_FAILURE; } else { - writeln("Selective sync 'sync_list' configured = false"); - + addLogEntry("--resync issued and risk accepted", ["debug"]); + // --resync risk accepted, perform a cleanup of items that require a cleanup + appConfig.cleanupHashFilesDueToResync(); + // Make a backup of the applicable configuration file + appConfig.createBackupConfigFile(); + // Update hash files and generate a new config backup + appConfig.updateHashContentsForConfigFiles(); + // Remove the items database + processResyncDatabaseRemoval(runtimeDatabaseFile); } - - // Is business_shared_folders enabled and configured ? - writeln("Config option 'sync_business_shared_folders' = ", cfg.getValueBool("sync_business_shared_folders")); - if (exists(businessSharedFolderFilePath)){ - writeln("Business Shared Folders configured = true"); - writeln("business_shared_folders contents:"); - // Output the business_shared_folders contents - auto businessSharedFolderFileList = File(businessSharedFolderFilePath, "r"); - auto range = businessSharedFolderFileList.byLine(); - foreach (line; range) - { - writeln(line); - } + } else { + // Has any of our application configuration that would require a --resync been changed? + if (appConfig.applicationChangeWhereResyncRequired()) { + // Application configuration has changed however --resync not issued, fail fast + addLogEntry(); + addLogEntry("An application configuration change has been detected where a --resync is required"); + addLogEntry(); + return EXIT_RESYNC_REQUIRED; } else { - writeln("Business Shared Folders configured = false"); - } - - // Are webhooks enabled? - writeln("Config option 'webhook_enabled' = ", cfg.getValueBool("webhook_enabled")); - if (cfg.getValueBool("webhook_enabled")) { - writeln("Config option 'webhook_public_url' = ", cfg.getValueString("webhook_public_url")); - writeln("Config option 'webhook_listening_host' = ", cfg.getValueString("webhook_listening_host")); - writeln("Config option 'webhook_listening_port' = ", cfg.getValueLong("webhook_listening_port")); - writeln("Config option 'webhook_expiration_interval' = ", cfg.getValueLong("webhook_expiration_interval")); - writeln("Config option 'webhook_renewal_interval' = ", cfg.getValueLong("webhook_renewal_interval")); - } - - if (cfg.getValueBool("display_running_config")) { - writeln("-----------------------------------------------------------------"); - } - - // Do we exit? We only exit if --display-config has been used - if (cfg.getValueBool("display_config")) { - return EXIT_SUCCESS; + // No configuration change that requires a --resync to be issued + // Make a backup of the applicable configuration file + appConfig.createBackupConfigFile(); + // Update hash files and generate a new config backup + appConfig.updateHashContentsForConfigFiles(); } } - - // --upload-only and --download-only are mutually exclusive and cannot be used together - if ((cfg.getValueBool("upload_only")) && (cfg.getValueBool("download_only"))) { - // both cannot be true at the same time - writeln("ERROR: --upload-only and --download-only are mutually exclusive and cannot be used together.\n"); - return EXIT_FAILURE; - } - - // Handle the actual --resync to remove local files - if (cfg.getValueBool("resync")) { - log.vdebug("--resync requested"); - log.vdebug("Testing if we have exclusive access to local database file"); - // Are we the only running instance? Test that we can open the database file path - itemDb = new ItemDatabase(cfg.databaseFilePath); + + // Implement https://github.com/abraunegg/onedrive/issues/1129 + // Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignoring all non-default skip_dir and skip_file rules + if (appConfig.getValueBool("force_sync")) { + // appConfig.checkForBasicOptionConflicts() has already checked for the basic requirements for --force-sync + addLogEntry(); + addLogEntry("WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used"); + addLogEntry(); + bool forceSyncRiskAcceptance = appConfig.displayForceSyncRiskForAcceptance(); + addLogEntry("Returned --force-sync risk acceptance: " ~ forceSyncRiskAcceptance, ["debug"]); - // did we successfully initialise the database class? - if (!itemDb.isDatabaseInitialised()) { - // no .. destroy class - itemDb = null; - // exit application + // Action based on user response + if (!forceSyncRiskAcceptance){ + // --force-sync risk not accepted return EXIT_FAILURE; - } - - // If we have exclusive access we will not have exited - // destroy access test - destroy(itemDb); - // delete application sync state - log.log("Deleting the saved application sync status ..."); - if (!cfg.getValueBool("dry_run")) { - safeRemove(cfg.databaseFilePath); - safeRemove(cfg.deltaLinkFilePath); - safeRemove(cfg.uploadStateFilePath); + } else { + // --force-sync risk accepted + // reset set config using function to use application defaults + appConfig.resetSkipToDefaults(); + // update sync engine regex with reset defaults + selectiveSync.setDirMask(appConfig.getValueString("skip_dir")); + selectiveSync.setFileMask(appConfig.getValueString("skip_file")); } } // Test if OneDrive service can be reached, exit if it cant be reached - log.vdebug("Testing network to ensure network connectivity to Microsoft OneDrive Service"); - online = testNetwork(cfg); + addLogEntry("Testing network to ensure network connectivity to Microsoft OneDrive Service", ["debug"]); + online = testInternetReachability(appConfig); + + // If we are not 'online' - how do we handle this situation? if (!online) { - // Cant initialise the API as we are not online - if (!cfg.getValueBool("monitor")) { + // We are unable to initialise the OneDrive API as we are not online + if (!appConfig.getValueBool("monitor")) { // Running as --synchronize - log.error("Unable to reach Microsoft OneDrive API service, unable to initialize application\n"); + addLogEntry(); + addLogEntry("ERROR: Unable to reach Microsoft OneDrive API service, unable to initialise application"); + addLogEntry(); return EXIT_FAILURE; } else { // Running as --monitor - log.error("Unable to reach Microsoft OneDrive API service at this point in time, re-trying network tests\n"); - // re-try network connection to OneDrive - // https://github.com/abraunegg/onedrive/issues/1184 - // Back off & retry with incremental delay - int retryCount = 10000; - int retryAttempts = 1; - int backoffInterval = 1; - int maxBackoffInterval = 3600; - - bool retrySuccess = false; - while (!retrySuccess){ - // retry to access OneDrive API - backoffInterval++; - int thisBackOffInterval = retryAttempts*backoffInterval; - log.vdebug(" Retry Attempt: ", retryAttempts); - if (thisBackOffInterval <= maxBackoffInterval) { - log.vdebug(" Retry In (seconds): ", thisBackOffInterval); - Thread.sleep(dur!"seconds"(thisBackOffInterval)); - } else { - log.vdebug(" Retry In (seconds): ", maxBackoffInterval); - Thread.sleep(dur!"seconds"(maxBackoffInterval)); - } - // perform the re-rty - online = testNetwork(cfg); - if (online) { - // We are now online - log.log("Internet connectivity to Microsoft OneDrive service has been restored"); - retrySuccess = true; - } else { - // We are still offline - if (retryAttempts == retryCount) { - // we have attempted to re-connect X number of times - // false set this to true to break out of while loop - retrySuccess = true; - } - } - // Increment & loop around - retryAttempts++; - } - if (!online) { - // Not online after 1.2 years of trying - log.error("ERROR: Was unable to reconnect to the Microsoft OneDrive service after 10000 attempts lasting over 1.2 years!"); + addLogEntry(); + addLogEntry("Unable to reach the Microsoft OneDrive API service at this point in time, re-trying network tests based on applicable intervals"); + addLogEntry(); + if (!retryInternetConnectivtyTest(appConfig)) { return EXIT_FAILURE; } } } - // Check application version and Initialize OneDrive API, check for authorization + // This needs to be a separate 'if' statement, as, if this was an 'if-else' from above, if we were originally offline and using --monitor, we would never get to this point if (online) { // Check Application Version - log.vlog("Checking Application Version ..."); + addLogEntry("Checking Application Version ...", ["verbose"]); checkApplicationVersion(); - - // we can only initialise if we are online - log.vlog("Initializing the OneDrive API ..."); - oneDrive = new OneDriveApi(cfg); - onedriveInitialised = oneDrive.init(); - oneDrive.printAccessToken = cfg.getValueBool("print_token"); - } - - if (!onedriveInitialised) { - log.error("Could not initialize the OneDrive API"); - // Use exit scopes to shutdown API - return EXIT_UNAUTHORIZED; - } - - // if --synchronize or --monitor not passed in, configure the flag to display help & exit - if (cfg.getValueBool("synchronize") || cfg.getValueBool("monitor")) { - performSyncOK = true; - } - - // --source-directory must only be used with --destination-directory - // neither can (or should) be added individually as they have a no operational impact if they are - if (((cfg.getValueString("source_directory") == "") && (cfg.getValueString("destination_directory") != "")) || ((cfg.getValueString("source_directory") != "") && (cfg.getValueString("destination_directory") == ""))) { - // so either --source-directory or --destination-directory was passed in, without the other required item being passed in - // --source-directory or --destination-directory cannot be used with --synchronize or --monitor - writeln(); - if (performSyncOK) { - // log an error - log.error("ERROR: --source-directory or --destination-directory cannot be used with --synchronize or --monitor"); - } else { - // display issue with using these options - string emptyParameter; - string dataParameter; - if (cfg.getValueString("source_directory").empty) { - emptyParameter = "--source-directory"; - dataParameter = "--destination-directory"; - } else { - emptyParameter = "--destination-directory"; - dataParameter = "--source-directory"; - } - log.error("ERROR: " ~ dataParameter ~ " was passed in without also using " ~ emptyParameter); - } - // Use exit scopes to shutdown API - writeln(); - log.error(helpMessage); - writeln(); - return EXIT_FAILURE; - } - - // --create-directory, --remove-directory, --source-directory, --destination-directory - // these are activities that dont perform a sync, so to not generate an error message for these items either - if (((cfg.getValueString("create_directory") != "") || (cfg.getValueString("remove_directory") != "")) || ((cfg.getValueString("source_directory") != "") && (cfg.getValueString("destination_directory") != "")) || (cfg.getValueString("get_file_link") != "") || (cfg.getValueString("modified_by") != "") || (cfg.getValueString("create_share_link") != "") || (cfg.getValueString("get_o365_drive_id") != "") || cfg.getValueBool("display_sync_status") || cfg.getValueBool("list_business_shared_folders")) { - performSyncOK = true; - } - - // Were acceptable sync operations provided? Was --synchronize or --monitor passed in - if (!performSyncOK) { - // was the application just authorised? - if (cfg.applicationAuthorizeResponseUri) { - // Application was just authorised - if (exists(cfg.refreshTokenFilePath)) { - // OneDrive refresh token exists - log.log("\nApplication has been successfully authorised, however no additional command switches were provided.\n"); - log.log(helpMessage); - writeln(); - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } else { - // we just authorised, but refresh_token does not exist .. probably an auth error - log.log("\nApplication has not been successfully authorised. Please check your URI response entry and try again.\n"); + + // Initialise the OneDrive API + addLogEntry("Attempting to initialise the OneDrive API ...", ["verbose"]); + oneDriveApiInstance = new OneDriveApi(appConfig); + appConfig.apiWasInitialised = oneDriveApiInstance.initialise(); + if (appConfig.apiWasInitialised) { + addLogEntry("The OneDrive API was initialised successfully", ["verbose"]); + + // Flag that we were able to initalise the API in the application config + oneDriveApiInstance.debugOutputConfiguredAPIItems(); + + // Need to configure the itemDB and syncEngineInstance for 'sync' and 'non-sync' operations + addLogEntry("Opening the item database ...", ["verbose"]); + + // Configure the Item Database + itemDB = new ItemDatabase(runtimeDatabaseFile); + // Was the database successfully initialised? + if (!itemDB.isDatabaseInitialised()) { + // no .. destroy class + itemDB = null; + // exit application return EXIT_FAILURE; } + + // Initialise the syncEngine + syncEngineInstance = new SyncEngine(appConfig, itemDB, selectiveSync); + appConfig.syncEngineWasInitialised = syncEngineInstance.initialise(); + + // Are we not doing a --sync or a --monitor operation? Both of these will be false if they are not set + if ((!appConfig.getValueBool("synchronize")) && (!appConfig.getValueBool("monitor"))) { + + // Are we performing some sort of 'no-sync' task? + // - Are we obtaining the Office 365 Drive ID for a given Office 365 SharePoint Shared Library? + // - Are we displaying the sync satus? + // - Are we getting the URL for a file online + // - Are we listing who modified a file last online + // - Are we createing a shareable link for an existing file on OneDrive? + // - Are we just creating a directory online, without any sync being performed? + // - Are we just deleting a directory online, without any sync being performed? + // - Are we renaming or moving a directory? + // - Are we displaying the quota information? + // - Did we just authorise the client? + + // --get-sharepoint-drive-id - Get the SharePoint Library drive_id + if (appConfig.getValueString("sharepoint_library_name") != "") { + // Get the SharePoint Library drive_id + syncEngineInstance.querySiteCollectionForDriveID(appConfig.getValueString("sharepoint_library_name")); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --display-sync-status - Query the sync status + if (appConfig.getValueBool("display_sync_status")) { + // path to query variable + string pathToQueryStatusOn; + // What path do we query? + if (!appConfig.getValueString("single_directory").empty) { + pathToQueryStatusOn = "/" ~ appConfig.getValueString("single_directory"); + } else { + pathToQueryStatusOn = "/"; + } + // Query the sync status + syncEngineInstance.queryOneDriveForSyncStatus(pathToQueryStatusOn); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --get-file-link - Get the URL path for a synced file? + if (appConfig.getValueString("get_file_link") != "") { + // Query the OneDrive API for the file link + syncEngineInstance.queryOneDriveForFileDetails(appConfig.getValueString("get_file_link"), runtimeSyncDirectory, "URL"); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --modified-by - Are we listing the modified-by details of a provided path? + if (appConfig.getValueString("modified_by") != "") { + // Query the OneDrive API for the last modified by details + syncEngineInstance.queryOneDriveForFileDetails(appConfig.getValueString("modified_by"), runtimeSyncDirectory, "ModifiedBy"); + // Exit application + // Use exit scopes to shutdown API and cleanup data + return EXIT_SUCCESS; + } + + // --create-share-link - Are we createing a shareable link for an existing file on OneDrive? + if (appConfig.getValueString("create_share_link") != "") { + // Query OneDrive for the file, and if valid, create a shareable link for the file + + // By default, the shareable link will be read-only. + // If the user adds: + // --with-editing-perms + // this will create a writeable link + syncEngineInstance.queryOneDriveForFileDetails(appConfig.getValueString("create_share_link"), runtimeSyncDirectory, "ShareableLink"); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // --create-directory - Are we just creating a directory online, without any sync being performed? + if ((appConfig.getValueString("create_directory") != "")) { + // Handle the remote path creation and updating of the local database without performing a sync + syncEngineInstance.createDirectoryOnline(appConfig.getValueString("create_directory")); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // --remove-directory - Are we just deleting a directory online, without any sync being performed? + if ((appConfig.getValueString("remove_directory") != "")) { + // Handle the remote path deletion without performing a sync + syncEngineInstance.deleteByPath(appConfig.getValueString("remove_directory")); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // Are we renaming or moving a directory? + // onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination' + if ((appConfig.getValueString("source_directory") != "") && (appConfig.getValueString("destination_directory") != "")) { + // We are renaming or moving a directory + syncEngineInstance.uploadMoveItem(appConfig.getValueString("source_directory"), appConfig.getValueString("destination_directory")); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // Are we displaying the quota information? + if (appConfig.getValueBool("display_quota")) { + // Query and respond with the quota details + syncEngineInstance.queryOneDriveForQuotaDetails(); + // Exit application + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } + + // If we get to this point, we have not performed a 'no-sync' task .. + // Did we just authorise the client? + if (appConfig.applicationAuthorizeResponseUri) { + // Authorisation activity + if (exists(appConfig.refreshTokenFilePath)) { + // OneDrive refresh token exists + addLogEntry(); + addLogEntry("The application has been successfully authorised, but no extra command options have been specified."); + addLogEntry(); + addLogEntry(genericHelpMessage); + addLogEntry(); + // Use exit scopes to shutdown API + return EXIT_SUCCESS; + } else { + // We just authorised, but refresh_token does not exist .. probably an auth error? + addLogEntry(); + addLogEntry("Your application's authorisation was unsuccessful. Please review your URI response entry, then attempt authorisation again with a new URI response."); + addLogEntry(); + // Use exit scopes to shutdown API + return EXIT_FAILURE; + } + } else { + // No authorisation activity + addLogEntry(); + addLogEntry("Your command line input is missing either the '--sync' or '--monitor' switches. Please include one (but not both) of these switches in your command line, or refer to 'onedrive --help' for additional guidance."); + addLogEntry(); + addLogEntry("It is important to note that you must include one of these two arguments in your command line for the application to perform a synchronisation with Microsoft OneDrive"); + addLogEntry(); + // Use exit scopes to shutdown API + // invalidSyncExit = true; + return EXIT_FAILURE; + } + } } else { - // Application was not just authorised - log.log("\n--synchronize or --monitor switches missing from your command line input. Please add one (not both) of these switches to your command line or use 'onedrive --help' for further assistance.\n"); - log.log("No OneDrive sync will be performed without one of these two arguments being present.\n"); - // Use exit scopes to shutdown API - invalidSyncExit = true; + // API could not be initialised + addLogEntry("The OneDrive API could not be initialised"); return EXIT_FAILURE; } } - - // if --synchronize && --monitor passed in, exit & display help as these conflict with each other - if (cfg.getValueBool("synchronize") && cfg.getValueBool("monitor")) { - writeln(); - log.error("ERROR: --synchronize and --monitor cannot be used together"); - writeln(); - log.error(helpMessage); - writeln(); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - - // Initialize the item database - log.vlog("Opening the item database ..."); - // Are we performing any of the following operations? - // --dry-run, --list-shared-folders, --get-O365-drive-id, --get-file-link - if ((cfg.getValueBool("dry_run")) || (cfg.getValueBool("list_business_shared_folders")) || (!cfg.getValueString("get_o365_drive_id").empty) || (!cfg.getValueString("get_file_link").empty)) { - // Load the items-dryrun.sqlite3 file as the database - log.vdebug("Using database file: ", asNormalizedPath(databaseFilePathDryRunGlobal)); - itemDb = new ItemDatabase(databaseFilePathDryRunGlobal); - } else { - // Not a dry-run scenario or trying to query O365 Library - should be the default scenario - // Load the items.sqlite3 file as the database - log.vdebug("Using database file: ", asNormalizedPath(cfg.databaseFilePath)); - itemDb = new ItemDatabase(cfg.databaseFilePath); - } - // did we successfully initialise the database class? - if (!itemDb.isDatabaseInitialised()) { - // no .. destroy class - itemDb = null; - // exit application - return EXIT_FAILURE; - } - - // What are the permission that have been set for the application? - // These are relevant for: - // - The ~/OneDrive parent folder or 'sync_dir' configured item - // - Any new folder created under ~/OneDrive or 'sync_dir' - // - Any new file created under ~/OneDrive or 'sync_dir' - // valid permissions are 000 -> 777 - anything else is invalid - if ((cfg.getValueLong("sync_dir_permissions") < 0) || (cfg.getValueLong("sync_file_permissions") < 0) || (cfg.getValueLong("sync_dir_permissions") > 777) || (cfg.getValueLong("sync_file_permissions") > 777)) { - log.error("ERROR: Invalid 'User|Group|Other' permissions set within config file. Please check."); - return EXIT_FAILURE; - } else { - // debug log output what permissions are being set to - log.vdebug("Configuring default new folder permissions as: ", cfg.getValueLong("sync_dir_permissions")); - cfg.configureRequiredDirectoryPermisions(); - log.vdebug("Configuring default new file permissions as: ", cfg.getValueLong("sync_file_permissions")); - cfg.configureRequiredFilePermisions(); - } - - // configure the sync direcory based on syncDir config option - log.vlog("All operations will be performed in: ", syncDir); + // Configure the sync direcory based on the runtimeSyncDirectory configured directory + addLogEntry("All application operations will be performed in the configured local 'sync_dir' directory: " ~ runtimeSyncDirectory, ["verbose"]); + try { - if (!exists(syncDir)) { - log.vdebug("syncDir: Configured syncDir is missing. Creating: ", syncDir); + if (!exists(runtimeSyncDirectory)) { + addLogEntry("runtimeSyncDirectory: Configured 'sync_dir' is missing locally. Creating: " ~ runtimeSyncDirectory, ["debug"]); + try { // Attempt to create the sync dir we have been configured with - mkdirRecurse(syncDir); + mkdirRecurse(runtimeSyncDirectory); // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", syncDir); - syncDir.setAttributes(cfg.returnRequiredDirectoryPermisions()); + addLogEntry("Setting directory permissions for: " ~ runtimeSyncDirectory, ["debug"]); + runtimeSyncDirectory.setAttributes(appConfig.returnRequiredDirectoryPermisions()); } catch (std.file.FileException e) { // Creating the sync directory failed - log.error("ERROR: Unable to create local OneDrive syncDir - ", e.msg); + addLogEntry("ERROR: Unable to create the configured local 'sync_dir' directory: " ~ e.msg); // Use exit scopes to shutdown API return EXIT_FAILURE; } } } catch (std.file.FileException e) { // Creating the sync directory failed - log.error("ERROR: Unable to test the configured OneDrive syncDir - ", e.msg); + addLogEntry("ERROR: Unable to test for the existence of the configured local 'sync_dir' directory: " ~ e.msg); // Use exit scopes to shutdown API return EXIT_FAILURE; } - - // Change the working directory to the 'sync_dir' configured item - chdir(syncDir); - - // Configure selective sync by parsing and getting a regex for skip_file config component - auto selectiveSync = new SelectiveSync(); - - // load sync_list if it exists - if (exists(syncListFilePath)){ - log.vdebug("Loading user configured sync_list file ..."); - syncListConfigured = true; - // list what will be synced - auto syncListFile = File(syncListFilePath, "r"); - auto range = syncListFile.byLine(); - foreach (line; range) - { - log.vdebug("sync_list: ", line); - } - // close syncListFile if open - if (syncListFile.isOpen()){ - // close open file - syncListFile.close(); - } - } - selectiveSync.load(syncListFilePath); - - // load business_shared_folders if it exists - if (exists(businessSharedFolderFilePath)){ - log.vdebug("Loading user configured business_shared_folders file ..."); - // list what will be synced - auto businessSharedFolderFileList = File(businessSharedFolderFilePath, "r"); - auto range = businessSharedFolderFileList.byLine(); - foreach (line; range) - { - log.vdebug("business_shared_folders: ", line); - } - } - selectiveSync.loadSharedFolders(businessSharedFolderFilePath); - - // Configure skip_dir, skip_file, skip-dir-strict-match & skip_dotfiles from config entries - // Handle skip_dir configuration in config file - log.vdebug("Configuring skip_dir ..."); - log.vdebug("skip_dir: ", cfg.getValueString("skip_dir")); - selectiveSync.setDirMask(cfg.getValueString("skip_dir")); - - // Was --skip-dir-strict-match configured? - log.vdebug("Configuring skip_dir_strict_match ..."); - log.vdebug("skip_dir_strict_match: ", cfg.getValueBool("skip_dir_strict_match")); - if (cfg.getValueBool("skip_dir_strict_match")) { - selectiveSync.setSkipDirStrictMatch(); - } - - // Was --skip-dot-files configured? - log.vdebug("Configuring skip_dotfiles ..."); - log.vdebug("skip_dotfiles: ", cfg.getValueBool("skip_dotfiles")); - if (cfg.getValueBool("skip_dotfiles")) { - selectiveSync.setSkipDotfiles(); - } - - // Handle skip_file configuration in config file - log.vdebug("Configuring skip_file ..."); - // Validate skip_file to ensure that this does not contain an invalid configuration - // Do not use a skip_file entry of .* as this will prevent correct searching of local changes to process. - foreach(entry; cfg.getValueString("skip_file").split("|")){ - if (entry == ".*") { - // invalid entry element detected - log.logAndNotify("ERROR: Invalid skip_file entry '.*' detected"); - return EXIT_FAILURE; - } - } - // All skip_file entries are valid - log.vdebug("skip_file: ", cfg.getValueString("skip_file")); - selectiveSync.setFileMask(cfg.getValueString("skip_file")); - - // Implement https://github.com/abraunegg/onedrive/issues/1129 - // Force a synchronization of a specific folder, only when using --synchronize --single-directory and ignoring all non-default skip_dir and skip_file rules - if ((cfg.getValueBool("synchronize")) && (cfg.getValueString("single_directory") != "") && (cfg.getValueBool("force_sync"))) { - log.log("\nWARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synchronize --single-directory --force-sync being used"); - // performing this action could have undesirable effects .. the user must accept this risk - // what is the risk acceptance? - bool resyncRiskAcceptance = false; - - // need to prompt user - char response; - // warning message - writeln("\nThe use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts."); - writeln("By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync."); - write("\nAre you sure you wish to proceed with --force-sync [Y/N] "); - - try { - // Attempt to read user response - readf(" %c\n", &response); - } catch (std.format.FormatException e) { - // Caught an error - return EXIT_FAILURE; - } - - // Evaluate user repsonse - if ((to!string(response) == "y") || (to!string(response) == "Y")) { - // User has accepted --force-sync risk to proceed - resyncRiskAcceptance = true; - // Are you sure you wish .. does not use writeln(); - write("\n"); - } - - // Action based on response - if (!resyncRiskAcceptance){ - // --force-sync not accepted - return EXIT_FAILURE; - } else { - // --force-sync risk accepted - // reset set config using function to use application defaults - cfg.resetSkipToDefaults(); - // update sync engine regex with reset defaults - selectiveSync.setDirMask(cfg.getValueString("skip_dir")); - selectiveSync.setFileMask(cfg.getValueString("skip_file")); - } - } - - // Initialize the sync engine - auto sync = new SyncEngine(cfg, oneDrive, itemDb, selectiveSync); - try { - if (!initSyncEngine(sync)) { - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } else { - if ((cfg.getValueString("get_file_link") == "") && (cfg.getValueString("create_share_link") == "")) { - // Print out that we are initializing the engine only if we are not grabbing the file link or creating a shareable link - log.logAndNotify("Initializing the Synchronization Engine ..."); - } - } - } catch (CurlException e) { - if (!cfg.getValueBool("monitor")) { - log.log("\nNo Internet connection."); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - - // if sync list is configured, set to true now that the sync engine is initialised - if (syncListConfigured) { - sync.setSyncListConfigured(); - } - - // Do we need to configure specific --upload-only options? - if (cfg.getValueBool("upload_only")) { - // --upload-only was passed in or configured - log.vdebug("Configuring uploadOnly flag to TRUE as --upload-only passed in or configured"); - sync.setUploadOnly(); - // was --no-remote-delete passed in or configured - if (cfg.getValueBool("no_remote_delete")) { - // Configure the noRemoteDelete flag - log.vdebug("Configuring noRemoteDelete flag to TRUE as --no-remote-delete passed in or configured"); - sync.setNoRemoteDelete(); - } - // was --remove-source-files passed in or configured - if (cfg.getValueBool("remove_source_files")) { - // Configure the localDeleteAfterUpload flag - log.vdebug("Configuring localDeleteAfterUpload flag to TRUE as --remove-source-files passed in or configured"); - sync.setLocalDeleteAfterUpload(); - } - } - - // Do we configure to disable the upload validation routine - if (cfg.getValueBool("disable_upload_validation")) sync.setDisableUploadValidation(); - - // Do we configure to disable the download validation routine - if (cfg.getValueBool("disable_download_validation")) sync.setDisableDownloadValidation(); - - // Has the user enabled to bypass data preservation of renaming local files when there is a conflict? - if (cfg.getValueBool("bypass_data_preservation")) { - log.log("WARNING: Application has been configured to bypass local data preservation in the event of file conflict."); - log.log("WARNING: Local data loss MAY occur in this scenario."); - sync.setBypassDataPreservation(); - } - // Do we configure to clean up local files if using --download-only ? - if ((cfg.getValueBool("download_only")) && (cfg.getValueBool("cleanup_local_files"))) { - // --download-only and --cleanup-local-files were passed in - log.log("WARNING: Application has been configured to cleanup local files that are not present online."); - log.log("WARNING: Local data loss MAY occur in this scenario if you are expecting data to remain archived locally."); - sync.setCleanupLocalFiles(); - // Set the global flag as we will use this as thhe item to be passed into the sync function below - cleanupLocalFilesGlobal = true; - } - - // Are we configured to use a National Cloud Deployment - if (cfg.getValueString("azure_ad_endpoint") != "") { - // value is configured, is it a valid value? - if ((cfg.getValueString("azure_ad_endpoint") == "USL4") || (cfg.getValueString("azure_ad_endpoint") == "USL5") || (cfg.getValueString("azure_ad_endpoint") == "DE") || (cfg.getValueString("azure_ad_endpoint") == "CN")) { - // valid entries to flag we are using a National Cloud Deployment - // National Cloud Deployments do not support /delta as a query - // https://docs.microsoft.com/en-us/graph/deployments#supported-features - // Flag that we have a valid National Cloud Deployment that cannot use /delta queries - sync.setNationalCloudDeployment(); - } - } - - // Are we forcing to use /children scan instead of /delta to simulate National Cloud Deployment use of /children? - if (cfg.getValueBool("force_children_scan")) { - log.log("Forcing client to use /children scan rather than /delta to simulate National Cloud Deployment use of /children"); - sync.setNationalCloudDeployment(); - } + // Change the working directory to the 'sync_dir' as configured + chdir(runtimeSyncDirectory); - // Do we need to display the function processing timing - if (cfg.getValueBool("display_processing_time")) { - log.log("Forcing client to display function processing times"); - sync.setPerformanceProcessingOutput(); - } - - // Do we need to validate the syncDir to check for the presence of a '.nosync' file - if (cfg.getValueBool("check_nomount")) { - // we were asked to check the mounts - if (exists(syncDir ~ "/.nosync")) { - log.logAndNotify("ERROR: .nosync file found. Aborting synchronization process to safeguard data."); - // Use exit scopes to shutdown API - return EXIT_FAILURE; - } - } - - // Do we need to create or remove a directory? - if ((cfg.getValueString("create_directory") != "") || (cfg.getValueString("remove_directory") != "")) { - // create directory - if (cfg.getValueString("create_directory") != "") { - // create a directory on OneDrive - sync.createDirectoryNoSync(cfg.getValueString("create_directory")); - } - //remove directory - if (cfg.getValueString("remove_directory") != "") { - // remove a directory on OneDrive - sync.deleteDirectoryNoSync(cfg.getValueString("remove_directory")); - } - } - - // Are we renaming or moving a directory? - if ((cfg.getValueString("source_directory") != "") && (cfg.getValueString("destination_directory") != "")) { - // We are renaming or moving a directory - sync.renameDirectoryNoSync(cfg.getValueString("source_directory"), cfg.getValueString("destination_directory")); - } - - // Are we obtaining the Office 365 Drive ID for a given Office 365 SharePoint Shared Library? - if (cfg.getValueString("get_o365_drive_id") != "") { - sync.querySiteCollectionForDriveID(cfg.getValueString("get_o365_drive_id")); - // Exit application - // Use exit scopes to shutdown API and cleanup data - return EXIT_SUCCESS; - } - - // --create-share-link - Are we createing a shareable link for an existing file on OneDrive? - if (cfg.getValueString("create_share_link") != "") { - // Query OneDrive for the file, and if valid, create a shareable link for the file - - // By default, the shareable link will be read-only. - // If the user adds: - // --with-editing-perms - // this will create a writeable link - bool writeablePermissions = cfg.getValueBool("with_editing_perms"); - sync.createShareableLinkForFile(cfg.getValueString("create_share_link"), writeablePermissions); - - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // --get-file-link - Are we obtaining the URL path for a synced file? - if (cfg.getValueString("get_file_link") != "") { - // Query OneDrive for the file link - sync.queryOneDriveForFileDetails(cfg.getValueString("get_file_link"), syncDir, "URL"); - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // --modified-by - Are we listing the modified-by details of a provided path? - if (cfg.getValueString("modified_by") != "") { - // Query OneDrive for the file link - sync.queryOneDriveForFileDetails(cfg.getValueString("modified_by"), syncDir, "ModifiedBy"); - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // Are we listing OneDrive Business Shared Folders - if (cfg.getValueBool("list_business_shared_folders")) { - // Is this a business account type? - if (sync.getAccountType() == "business"){ - // List OneDrive Business Shared Folders - sync.listOneDriveBusinessSharedFolders(); - } else { - log.error("ERROR: Unsupported account type for listing OneDrive Business Shared Folders"); - } - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; - } - - // Are we going to sync OneDrive Business Shared Folders - if (cfg.getValueBool("sync_business_shared_folders")) { - // Is this a business account type? - if (sync.getAccountType() == "business"){ - // Configure flag to sync business folders - sync.setSyncBusinessFolders(); - } else { - log.error("ERROR: Unsupported account type for syncing OneDrive Business Shared Folders"); - } - } + // Do we need to validate the runtimeSyncDirectory to check for the presence of a '.nosync' file + checkForNoMountScenario(); - // Ensure that the value stored for cfg.getValueString("single_directory") does not contain any extra quotation marks - if (cfg.getValueString("single_directory") != ""){ - string originalSingleDirectoryValue = cfg.getValueString("single_directory"); - // Strip quotation marks from provided path to ensure no issues within a Docker environment when using passed in values - string updatedSingleDirectoryValue = strip(originalSingleDirectoryValue, "\""); - cfg.setValueString("single_directory", updatedSingleDirectoryValue); - } - - // Are we displaying the sync status of the client? - if (cfg.getValueBool("display_sync_status")) { + // Set the default thread pool value - hard coded to 16 + defaultPoolThreads(to!int(appConfig.concurrentThreads)); + + // Is the sync engine initiallised correctly? + if (appConfig.syncEngineWasInitialised) { + // Configure some initial variables + string singleDirectoryPath; + string localPath = "."; string remotePath = "/"; - // Are we doing a single directory check? - if (cfg.getValueString("single_directory") != ""){ - // Need two different path strings here - remotePath = cfg.getValueString("single_directory"); + + // Check if there are interrupted upload session(s) + if (syncEngineInstance.checkForInterruptedSessionUploads) { + // Need to re-process the session upload files to resume the failed session uploads + addLogEntry("There are interrupted session uploads that need to be resumed ..."); + // Process the session upload files + syncEngineInstance.processForInterruptedSessionUploads(); + } + + // Are we doing a single directory operation (--single-directory) ? + if (!appConfig.getValueString("single_directory").empty) { + // Set singleDirectoryPath + singleDirectoryPath = appConfig.getValueString("single_directory"); + + // Ensure that this is a normalised relative path to runtimeSyncDirectory + string normalisedRelativePath = replace(buildNormalizedPath(absolutePath(singleDirectoryPath)), buildNormalizedPath(absolutePath(runtimeSyncDirectory)), "." ); + + // The user provided a directory to sync within the configured 'sync_dir' path + // This also validates if the path being used exists online and/or does not have a 'case-insensitive match' + syncEngineInstance.setSingleDirectoryScope(normalisedRelativePath); + + // Does the directory we want to sync actually exist locally? + if (!exists(singleDirectoryPath)) { + // The requested path to use with --single-directory does not exist locally within the configured 'sync_dir' + addLogEntry("WARNING: The requested path for --single-directory does not exist locally. Creating requested path within " ~ runtimeSyncDirectory, ["info", "notify"]); + // Make the required --single-directory path locally + mkdirRecurse(singleDirectoryPath); + // Configure the applicable permissions for the folder + addLogEntry("Setting directory permissions for: " ~ singleDirectoryPath, ["debug"]); + singleDirectoryPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + } + + // Update the paths that we use to perform the sync actions + localPath = singleDirectoryPath; + remotePath = singleDirectoryPath; + + // Display that we are syncing from a specific path due to --single-directory + addLogEntry("Syncing changes from this selected path: " ~ singleDirectoryPath, ["verbose"]); } - sync.queryDriveForChanges(remotePath); - } - - // Are we performing a sync, or monitor operation? - if ((cfg.getValueBool("synchronize")) || (cfg.getValueBool("monitor"))) { - // Initialise the monitor class, so that we can do more granular inotify handling when performing the actual sync - // needed for --synchronize and --monitor handling - Monitor m = new Monitor(selectiveSync); - if (cfg.getValueBool("synchronize")) { - if (online) { - // set flag for exit scope - synchronizeConfigured = true; - - // Check user entry for local path - the above chdir means we are already in ~/OneDrive/ thus singleDirectory is local to this path - if (cfg.getValueString("single_directory") != "") { - // Does the directory we want to sync actually exist? - if (!exists(cfg.getValueString("single_directory"))) { - // The requested path to use with --single-directory does not exist locally within the configured 'sync_dir' - log.logAndNotify("WARNING: The requested path for --single-directory does not exist locally. Creating requested path within ", syncDir); - // Make the required --single-directory path locally - string singleDirectoryPath = cfg.getValueString("single_directory"); - mkdirRecurse(singleDirectoryPath); - // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", singleDirectoryPath); - singleDirectoryPath.setAttributes(cfg.returnRequiredDirectoryPermisions()); - } + // Are we doing a --sync operation? This includes doing any --single-directory operations + if (appConfig.getValueBool("synchronize")) { + // Did the user specify --upload-only? + if (appConfig.getValueBool("upload_only")) { + // Perform the --upload-only sync process + performUploadOnlySyncProcess(localPath); + } + + // Did the user specify --download-only? + if (appConfig.getValueBool("download_only")) { + // Only download data from OneDrive + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + // Perform the DB consistency check + // This will also delete any out-of-sync flagged items if configured to do so + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + // Do we cleanup local files? + // - Deletes of data from online will already have been performed, but what we are now doing is searching the local filesystem + // for any new data locally, that usually would be uploaded to OneDrive, but instead, because of the options being + // used, will need to be deleted from the local filesystem + if (appConfig.getValueBool("cleanup_local_files")) { + // Perform the filesystem walk + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); } - // perform a --synchronize sync - // fullScanRequired = false, for final true-up - // but if we have sync_list configured, use syncListConfigured which = true - performSync(sync, cfg.getValueString("single_directory"), cfg.getValueBool("download_only"), cfg.getValueBool("local_first"), cfg.getValueBool("upload_only"), LOG_NORMAL, false, syncListConfigured, displaySyncOptions, cfg.getValueBool("monitor"), m, cleanupLocalFilesGlobal); - - // Write WAL and SHM data to file for this sync - log.vdebug("Merge contents of WAL and SHM files into main database file"); - itemDb.performVacuum(); } + + // If no use of --upload-only or --download-only + if ((!appConfig.getValueBool("upload_only")) && (!appConfig.getValueBool("download_only"))) { + // Perform the standard sync process + performStandardSyncProcess(localPath); + } + + // Detail the outcome of the sync process + displaySyncOutcome(); } - - if (cfg.getValueBool("monitor")) { - log.logAndNotify("Initializing monitor ..."); - log.log("OneDrive monitor interval (seconds): ", cfg.getValueLong("monitor_interval")); - - m.onDirCreated = delegate(string path) { + + // Are we doing a --monitor operation? + if (appConfig.getValueBool("monitor")) { + // What are the current values for the platform we are running on + // Max number of open files /proc/sys/fs/file-max + string maxOpenFiles = strip(readText("/proc/sys/fs/file-max")); + // What is the currently configured maximum inotify watches that can be used + // /proc/sys/fs/inotify/max_user_watches + string maxInotifyWatches = strip(readText("/proc/sys/fs/inotify/max_user_watches")); + + // Start the monitor process + addLogEntry("OneDrive synchronisation interval (seconds): " ~ to!string(appConfig.getValueLong("monitor_interval"))); + + // If we are in a --download-only method of operation, the output of these is not required + if (!appConfig.getValueBool("download_only")) { + addLogEntry("Maximum allowed open files: " ~ maxOpenFiles, ["verbose"]); + addLogEntry("Maximum allowed inotify user watches: " ~ maxInotifyWatches, ["verbose"]); + } + + // Configure the monitor class + Tid workerTid; + filesystemMonitor = new Monitor(appConfig, selectiveSync); + + // Delegated function for when inotify detects a new local directory has been created + filesystemMonitor.onDirCreated = delegate(string path) { // Handle .folder creation if skip_dotfiles is enabled - if ((cfg.getValueBool("skip_dotfiles")) && (selectiveSync.isDotFile(path))) { - log.vlog("[M] Skipping watching local path - .folder found & --skip-dot-files enabled: ", path); + if ((appConfig.getValueBool("skip_dotfiles")) && (isDotFile(path))) { + addLogEntry("[M] Skipping watching local path - .folder found & --skip-dot-files enabled: " ~ path, ["verbose"]); } else { - log.vlog("[M] Local directory created: ", path); + addLogEntry("[M] Local directory created: " ~ path, ["verbose"]); try { - sync.scanForDifferences(path); + syncEngineInstance.scanLocalFilesystemPathForNewData(path); } catch (CurlException e) { - log.vlog("Offline, cannot create remote dir!"); + addLogEntry("Offline, cannot create remote dir: " ~ path, ["verbose"]); } catch(Exception e) { - log.logAndNotify("Cannot create remote directory: ", e.msg); + addLogEntry("Cannot create remote directory: " ~ e.msg, ["info", "notify"]); } } }; - m.onFileChanged = delegate(string path) { - log.vlog("[M] Local file changed: ", path); + + // Delegated function for when inotify detects a local file has been changed + filesystemMonitor.onFileChanged = delegate(string path) { + // Handle a potentially locally changed file + // Logging for this event moved to handleLocalFileTrigger() due to threading and false triggers from scanLocalFilesystemPathForNewData() above try { - sync.scanForDifferences(path); + syncEngineInstance.handleLocalFileTrigger(path); } catch (CurlException e) { - log.vlog("Offline, cannot upload changed item!"); + addLogEntry("Offline, cannot upload changed item: " ~ path, ["verbose"]); } catch(Exception e) { - log.logAndNotify("Cannot upload file changes/creation: ", e.msg); + addLogEntry("Cannot upload file changes/creation: " ~ e.msg, ["info", "notify"]); } }; - m.onDelete = delegate(string path) { - log.log("Received inotify delete event from operating system .. attempting item deletion as requested"); - log.vlog("[M] Local item deleted: ", path); + + // Delegated function for when inotify detects a delete event + filesystemMonitor.onDelete = delegate(string path) { + addLogEntry("[M] Local item deleted: " ~ path, ["verbose"]); try { - sync.deleteByPath(path); + addLogEntry("The operating system sent a deletion notification. Trying to delete the item as requested"); + syncEngineInstance.deleteByPath(path); } catch (CurlException e) { - log.vlog("Offline, cannot delete item!"); + addLogEntry("Offline, cannot delete item: " ~ path, ["verbose"]); } catch(SyncException e) { if (e.msg == "The item to delete is not in the local database") { - log.vlog("Item cannot be deleted from OneDrive because it was not found in the local database"); + addLogEntry("Item cannot be deleted from Microsoft OneDrive because it was not found in the local database", ["verbose"]); } else { - log.logAndNotify("Cannot delete remote item: ", e.msg); + addLogEntry("Cannot delete remote item: " ~ e.msg, ["info", "notify"]); } } catch(Exception e) { - log.logAndNotify("Cannot delete remote item: ", e.msg); + addLogEntry("Cannot delete remote item: " ~ e.msg, ["info", "notify"]); } }; - m.onMove = delegate(string from, string to) { - log.vlog("[M] Local item moved: ", from, " -> ", to); + + // Delegated function for when inotify detects a move event + filesystemMonitor.onMove = delegate(string from, string to) { + addLogEntry("[M] Local item moved: " ~ from ~ " -> " ~ to, ["verbose"]); try { // Handle .folder -> folder if skip_dotfiles is enabled - if ((cfg.getValueBool("skip_dotfiles")) && (selectiveSync.isDotFile(from))) { + if ((appConfig.getValueBool("skip_dotfiles")) && (isDotFile(from))) { // .folder -> folder handling - has to be handled as a new folder - sync.scanForDifferences(to); + syncEngineInstance.scanLocalFilesystemPathForNewData(to); } else { - sync.uploadMoveItem(from, to); + syncEngineInstance.uploadMoveItem(from, to); } } catch (CurlException e) { - log.vlog("Offline, cannot move item!"); + addLogEntry("Offline, cannot move item !", ["verbose"]); } catch(Exception e) { - log.logAndNotify("Cannot move item: ", e.msg); + addLogEntry("Cannot move item: " ~ e.msg, ["info", "notify"]); } }; + + // Handle SIGINT and SIGTERM signal(SIGINT, &exitHandler); signal(SIGTERM, &exitHandler); - - // attempt to initialise monitor class - if (!cfg.getValueBool("download_only")) { + + // Initialise the local filesystem monitor class using inotify to monitor for local filesystem changes + // If we are in a --download-only method of operation, we do not enable local filesystem monitoring + if (!appConfig.getValueBool("download_only")) { + // Not using --download-only try { - m.init(cfg, cfg.getValueLong("verbose") > 0, cfg.getValueBool("skip_symlinks"), cfg.getValueBool("check_nosync")); - } catch (MonitorException e) { - // monitor initialisation failed - log.error("ERROR: ", e.msg); - oneDrive.shutdown(); + addLogEntry("Initialising filesystem inotify monitoring ..."); + filesystemMonitor.initialise(); + workerTid = filesystemMonitor.watch(); + addLogEntry("Performing initial syncronisation to ensure consistent local state ..."); + } catch (MonitorException e) { + // monitor class initialisation failed + addLogEntry("ERROR: " ~ e.msg); return EXIT_FAILURE; } } - - // monitor loop + + // Filesystem monitor loop variables + // Immutables + immutable auto checkOnlineInterval = dur!"seconds"(appConfig.getValueLong("monitor_interval")); + immutable auto githubCheckInterval = dur!"seconds"(86400); + immutable ulong fullScanFrequency = appConfig.getValueLong("monitor_fullscan_frequency"); + immutable ulong logOutputSupressionInterval = appConfig.getValueLong("monitor_log_frequency"); + immutable bool webhookEnabled = appConfig.getValueBool("webhook_enabled"); + immutable string loopStartOutputMessage = "################################################## NEW LOOP ##################################################"; + immutable string loopStopOutputMessage = "################################################ LOOP COMPLETE ###############################################"; + + // Changables bool performMonitor = true; ulong monitorLoopFullCount = 0; - immutable auto checkInterval = dur!"seconds"(cfg.getValueLong("monitor_interval")); - immutable auto githubCheckInterval = dur!"seconds"(86400); - immutable long logInterval = cfg.getValueLong("monitor_log_frequency"); - immutable long fullScanFrequency = cfg.getValueLong("monitor_fullscan_frequency"); + ulong fullScanFrequencyLoopCount = 0; + ulong monitorLogOutputLoopCount = 0; MonoTime lastCheckTime = MonoTime.currTime(); MonoTime lastGitHubCheckTime = MonoTime.currTime(); - long logMonitorCounter = 0; - long fullScanCounter = 0; - // set fullScanRequired to true so that at application startup we perform a full walk - bool fullScanRequired = true; - bool syncListConfiguredFullScanOverride = false; - // if sync list is configured, set to true - if (syncListConfigured) { - // sync list is configured - syncListConfiguredFullScanOverride = true; - } - immutable bool webhookEnabled = cfg.getValueBool("webhook_enabled"); - + // Webhook Notification Handling + bool notificationReceived = false; + while (performMonitor) { - if (!cfg.getValueBool("download_only")) { + // Do we need to validate the runtimeSyncDirectory to check for the presence of a '.nosync' file - the disk may have been ejected .. + checkForNoMountScenario(); + + // If we are in a --download-only method of operation, there is no filesystem monitoring, so no inotify events to check + if (!appConfig.getValueBool("download_only")) { try { - m.update(online); + // Process any inotify events + filesystemMonitor.update(true); } catch (MonitorException e) { // Catch any exceptions thrown by inotify / monitor engine - log.error("ERROR: The following inotify error was generated: ", e.msg); + addLogEntry("ERROR: The following inotify error was generated: " ~ e.msg); } } - + + // Webhook Notification reset to false for this loop + notificationReceived = false; + // Check for notifications pushed from Microsoft to the webhook - bool notificationReceived = false; if (webhookEnabled) { // Create a subscription on the first run, or renew the subscription // on subsequent runs when it is about to expire. - oneDrive.createOrRenewSubscription(); - - // Process incoming notifications if any. - - // Empirical evidence shows that Microsoft often sends multiple - // notifications for one single change, so we need a loop to exhaust - // all signals that were queued up by the webhook. The notifications - // do not contain any actual changes, and we will always rely do the - // delta endpoint to sync to latest. Therefore, only one sync run is - // good enough to catch up for multiple notifications. - for (int signalCount = 0;; signalCount++) { - const auto signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); - if (signalExists) { - notificationReceived = true; + oneDriveApiInstance.createOrRenewSubscription(); + } + + // Get the current time this loop is starting + auto currentTime = MonoTime.currTime(); + + // Do we perform a sync with OneDrive? + if ((currentTime - lastCheckTime >= checkOnlineInterval) || (monitorLoopFullCount == 0)) { + // Increment relevant counters + monitorLoopFullCount++; + fullScanFrequencyLoopCount++; + monitorLogOutputLoopCount++; + + // If full scan at a specific frequency enabled? + if (fullScanFrequency > 0) { + // Full Scan set for some 'frequency' - do we flag to perform a full scan of the online data? + if (fullScanFrequencyLoopCount > fullScanFrequency) { + // set full scan trigger for true up + addLogEntry("Enabling Full Scan True Up (fullScanFrequencyLoopCount > fullScanFrequency), resetting fullScanFrequencyLoopCount = 1", ["debug"]); + fullScanFrequencyLoopCount = 1; + appConfig.fullScanTrueUpRequired = true; } else { - if (notificationReceived) { - log.log("Received ", signalCount," refresh signals from the webhook"); - } - break; + // unset full scan trigger for true up + addLogEntry("Disabling Full Scan True Up", ["debug"]); + appConfig.fullScanTrueUpRequired = false; } + } else { + // No it is disabled - ensure this is false + appConfig.fullScanTrueUpRequired = false; } - } - - auto currTime = MonoTime.currTime(); - // has monitor_interval elapsed or are we at application startup / monitor startup? - // in a --resync scenario, if we have not 're-populated' the database, valid changes will get skipped: - // Monitor directory: ./target - // Monitor directory: target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // [M] Item moved: random_files/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby -> target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // Moving random_files/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby to target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // Skipping uploading this new file as parent path is not in the database: target/2eVPInOMTFNXzRXeNMEoJch5OR9XpGby - // 'target' should be in the DB, it should also exist online, but because of --resync, it does not exist in the database thus parent check fails - if (notificationReceived || (currTime - lastCheckTime > checkInterval) || (monitorLoopFullCount == 0)) { - // Check Application Version against GitHub once per day - if (currTime - lastGitHubCheckTime > githubCheckInterval) { - // --monitor GitHub Application Version Check time expired - checkApplicationVersion(); - // update when we have performed this check - lastGitHubCheckTime = MonoTime.currTime(); - } - // monitor sync loop - logOutputMessage = "################################################## NEW LOOP ##################################################"; - if (displaySyncOptions) { - log.log(logOutputMessage); + // Loop Start + addLogEntry(loopStartOutputMessage, ["debug"]); + addLogEntry("Total Run-Time Loop Number: " ~ to!string(monitorLoopFullCount), ["debug"]); + addLogEntry("Full Scan Freqency Loop Number: " ~ to!string(fullScanFrequencyLoopCount), ["debug"]); + SysTime startFunctionProcessingTime = Clock.currTime(); + addLogEntry("Start Monitor Loop Time: " ~ to!string(startFunctionProcessingTime), ["debug"]); + + // Do we perform any monitor console logging output surpression? + // 'monitor_log_frequency' controls how often, in a non-verbose application output mode, how often + // the full output of what is occuring is done. This is done to lessen the 'verbosity' of non-verbose + // logging, but only when running in --monitor + if (monitorLogOutputLoopCount > logOutputSupressionInterval) { + // unsurpress the logging output + monitorLogOutputLoopCount = 1; + addLogEntry("Unsuppressing initial sync log output", ["debug"]); + appConfig.surpressLoggingOutput = false; } else { - log.vdebug(logOutputMessage); - } - // Increment monitorLoopFullCount - monitorLoopFullCount++; - // Display memory details at start of loop - if (displayMemoryUsage) { - log.displayMemoryUsagePreGC(); - } - - // log monitor output suppression - logMonitorCounter += 1; - if (logMonitorCounter > logInterval) { - logMonitorCounter = 1; - } - - // do we perform a full scan of sync_dir and database integrity check? - fullScanCounter += 1; - // fullScanFrequency = 'monitor_fullscan_frequency' from config - if (fullScanCounter > fullScanFrequency){ - // 'monitor_fullscan_frequency' counter has exceeded - fullScanCounter = 1; - // set fullScanRequired = true due to 'monitor_fullscan_frequency' counter has been exceeded - fullScanRequired = true; - // are we using sync_list? - if (syncListConfigured) { - // sync list is configured - syncListConfiguredFullScanOverride = true; + // do we surpress the logging output to absolute minimal + if (monitorLoopFullCount == 1) { + // application startup with --monitor + addLogEntry("Unsuppressing initial sync log output", ["debug"]); + appConfig.surpressLoggingOutput = false; + } else { + // only surpress if we are not doing --verbose or higher + if (appConfig.verbosityCount == 0) { + addLogEntry("Suppressing --monitor log output", ["debug"]); + appConfig.surpressLoggingOutput = true; + } else { + addLogEntry("Unsuppressing log output", ["debug"]); + appConfig.surpressLoggingOutput = false; + } } } - - if (displaySyncOptions) { - // sync option handling per sync loop - log.log("fullScanCounter = ", fullScanCounter); - log.log("syncListConfigured = ", syncListConfigured); - log.log("fullScanRequired = ", fullScanRequired); - log.log("syncListConfiguredFullScanOverride = ", syncListConfiguredFullScanOverride); - } else { - // sync option handling per sync loop via debug - log.vdebug("fullScanCounter = ", fullScanCounter); - log.vdebug("syncListConfigured = ", syncListConfigured); - log.vdebug("fullScanRequired = ", fullScanRequired); - log.vdebug("syncListConfiguredFullScanOverride = ", syncListConfiguredFullScanOverride); - } - - try { - if (!initSyncEngine(sync)) { - // Use exit scopes to shutdown API - return EXIT_FAILURE; + + // How long has the application been running for? + auto elapsedTime = Clock.currTime() - applicationStartTime; + addLogEntry("Application run-time thus far: " ~ to!string(elapsedTime), ["debug"]); + + // Need to re-validate that the client is still online for this loop + if (testInternetReachability(appConfig)) { + // Starting a sync + addLogEntry("Starting a sync with Microsoft OneDrive"); + + // Attempt to reset syncFailures + syncEngineInstance.resetSyncFailures(); + + // Did the user specify --upload-only? + if (appConfig.getValueBool("upload_only")) { + // Perform the --upload-only sync process + performUploadOnlySyncProcess(localPath, filesystemMonitor); + } else { + // Perform the standard sync process + performStandardSyncProcess(localPath, filesystemMonitor); } - try { - // performance timing - SysTime startSyncProcessingTime = Clock.currTime(); - - // perform a --monitor sync - if ((cfg.getValueLong("verbose") > 0) || (logMonitorCounter == logInterval) || (fullScanRequired) ) { - // log to console and log file if enabled - if (cfg.getValueBool("display_processing_time")) { - log.log(startMessage, " ", startSyncProcessingTime); - } else { - log.log(startMessage); - } - } else { - // log file only if enabled so we know when a sync started when not using --verbose - log.fileOnly(startMessage); - } - performSync(sync, cfg.getValueString("single_directory"), cfg.getValueBool("download_only"), cfg.getValueBool("local_first"), cfg.getValueBool("upload_only"), (logMonitorCounter == logInterval ? MONITOR_LOG_QUIET : MONITOR_LOG_SILENT), fullScanRequired, syncListConfiguredFullScanOverride, displaySyncOptions, cfg.getValueBool("monitor"), m, cleanupLocalFilesGlobal); - if (!cfg.getValueBool("download_only")) { - // discard all events that may have been generated by the sync that have not already been handled - try { - m.update(false); - } catch (MonitorException e) { - // Catch any exceptions thrown by inotify / monitor engine - log.error("ERROR: The following inotify error was generated: ", e.msg); - } - } - SysTime endSyncProcessingTime = Clock.currTime(); - if ((cfg.getValueLong("verbose") > 0) || (logMonitorCounter == logInterval) || (fullScanRequired) ) { - // log to console and log file if enabled - if (cfg.getValueBool("display_processing_time")) { - log.log(finishMessage, " ", endSyncProcessingTime); - log.log("Elapsed Sync Time with OneDrive Service: ", (endSyncProcessingTime - startSyncProcessingTime)); - } else { - log.log(finishMessage); - } - } else { - // log file only if enabled so we know when a sync completed when not using --verbose - log.fileOnly(finishMessage); - } - } catch (CurlException e) { - // we already tried three times in the performSync routine - // if we still have problems, then the sync handle might have - // gone stale and we need to re-initialize the sync engine - log.log("Persistent connection errors, reinitializing connection"); - sync.reset(); + + // Handle any new inotify events + filesystemMonitor.update(true); + + // Detail the outcome of the sync process + displaySyncOutcome(); + + if (appConfig.fullScanTrueUpRequired) { + // Write WAL and SHM data to file for this loop + addLogEntry("Merge contents of WAL and SHM files into main database file", ["debug"]); + itemDB.performVacuum(); } - } catch (CurlException e) { - log.log("Cannot initialize connection to OneDrive"); + } else { + // Not online + addLogEntry("Microsoft OneDrive service is not reachable at this time. Will re-try on next sync attempt."); } - // performSync complete, set lastCheckTime to current time - lastCheckTime = MonoTime.currTime(); + + // Output end of loop processing times + SysTime endFunctionProcessingTime = Clock.currTime(); + addLogEntry("End Monitor Loop Time: " ~ to!string(endFunctionProcessingTime), ["debug"]); + addLogEntry("Elapsed Monitor Loop Processing Time: " ~ to!string((endFunctionProcessingTime - startFunctionProcessingTime)), ["debug"]); // Display memory details before cleanup - if (displayMemoryUsage) log.displayMemoryUsagePreGC(); + if (displayMemoryUsage) displayMemoryUsagePreGC(); // Perform Garbage Cleanup GC.collect(); + // Return free memory to the OS + GC.minimize(); // Display memory details after cleanup - if (displayMemoryUsage) log.displayMemoryUsagePostGC(); + if (displayMemoryUsage) displayMemoryUsagePostGC(); - // If we did a full scan, make sure we merge the conents of the WAL and SHM to disk - if (fullScanRequired) { - // Write WAL and SHM data to file for this loop - log.vdebug("Merge contents of WAL and SHM files into main database file"); - itemDb.performVacuum(); - } + // Log that this loop is complete + addLogEntry(loopStopOutputMessage, ["debug"]); - // reset fullScanRequired and syncListConfiguredFullScanOverride - fullScanRequired = false; - if (syncListConfigured) syncListConfiguredFullScanOverride = false; + // performSync complete, set lastCheckTime to current time + lastCheckTime = MonoTime.currTime(); - // monitor loop complete - logOutputMessage = "################################################ LOOP COMPLETE ###############################################"; - - // Handle display options - if (displaySyncOptions) { - log.log(logOutputMessage); - } else { - log.vdebug(logOutputMessage); - } // Developer break via config option - if (cfg.getValueLong("monitor_max_loop") > 0) { + if (appConfig.getValueLong("monitor_max_loop") > 0) { // developer set option to limit --monitor loops - if (monitorLoopFullCount == (cfg.getValueLong("monitor_max_loop"))) { + if (monitorLoopFullCount == (appConfig.getValueLong("monitor_max_loop"))) { + performMonitor = false; + addLogEntry("Exiting after " ~ to!string(monitorLoopFullCount) ~ " loops due to developer set option"); + } + } + } + + if (performMonitor) { + auto nextCheckTime = lastCheckTime + checkOnlineInterval; + currentTime = MonoTime.currTime(); + auto sleepTime = nextCheckTime - currentTime; + addLogEntry("Sleep for " ~ to!string(sleepTime), ["debug"]); + + if(filesystemMonitor.initialised || webhookEnabled) { + if(filesystemMonitor.initialised) { + // If local monitor is on + // start the worker and wait for event + if(!filesystemMonitor.isWorking()) { + workerTid.send(1); + } + } + + if(webhookEnabled) { + // if onedrive webhook is enabled + // update sleep time based on renew interval + Duration nextWebhookCheckDuration = oneDriveApiInstance.getNextExpirationCheckDuration(); + if (nextWebhookCheckDuration < sleepTime) { + sleepTime = nextWebhookCheckDuration; + addLogEntry("Update sleeping time to " ~ to!string(sleepTime), ["debug"]); + } + notificationReceived = false; + } + + int res = 1; + // Process incoming notifications if any. + auto signalExists = receiveTimeout(sleepTime, + (int msg) { + res = msg; + }, + (ulong _) { + notificationReceived = true; + } + ); + + // Debug values + addLogEntry("signalExists = " ~ to!string(signalExists), ["debug"]); + addLogEntry("worker status = " ~ to!string(res), ["debug"]); + addLogEntry("notificationReceived = " ~ to!string(notificationReceived), ["debug"]); + + // Empirical evidence shows that Microsoft often sends multiple + // notifications for one single change, so we need a loop to exhaust + // all signals that were queued up by the webhook. The notifications + // do not contain any actual changes, and we will always rely do the + // delta endpoint to sync to latest. Therefore, only one sync run is + // good enough to catch up for multiple notifications. + int signalCount = notificationReceived ? 1 : 0; + for (;; signalCount++) { + signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); + if (signalExists) { + notificationReceived = true; + } else { + if (notificationReceived) { + addLogEntry("Received " ~ to!string(signalCount) ~ " refresh signals from the webhook"); + oneDriveWebhookCallback(); + } + break; + } + } + + if(res == -1) { + addLogEntry("ERROR: Monitor worker failed."); + monitorFailures = true; performMonitor = false; - log.log("Exiting after ", monitorLoopFullCount, " loops due to developer set option"); } + } else { + // no hooks available, nothing to check + Thread.sleep(sleepTime); } } - // Sleep the monitor thread for 1 second, loop around and pick up any inotify changes - Thread.sleep(dur!"seconds"(1)); } } + } else { + // Exit application as the sync engine could not be initialised + addLogEntry("Application Sync Engine could not be initialised correctly"); + // Use exit scope + return EXIT_FAILURE; + } + + // Exit application using exit scope + if (!syncEngineInstance.syncFailures && !monitorFailures) { + return EXIT_SUCCESS; + } else { + return EXIT_FAILURE; } - - // Exit application - // Use exit scopes to shutdown API - return EXIT_SUCCESS; } -void cleanupDryRunDatabase(string databaseFilePathDryRun) -{ - // cleanup dry-run data - log.vdebug("Running cleanupDryRunDatabase"); - string dryRunShmFile = databaseFilePathDryRun ~ "-shm"; - string dryRunWalFile = databaseFilePathDryRun ~ "-wal"; - if (exists(databaseFilePathDryRun)) { - // remove the file - log.vdebug("Removing items-dryrun.sqlite3 as dry run operations complete"); - // remove items-dryrun.sqlite3 - safeRemove(databaseFilePathDryRun); +void performStandardExitProcess(string scopeCaller = null) { + // Who called this function + if (!scopeCaller.empty) { + addLogEntry("Running performStandardExitProcess due to: " ~ scopeCaller, ["debug"]); } - // silent cleanup of shm and wal files if they exist - if (exists(dryRunShmFile)) { - // remove items-dryrun.sqlite3-shm - safeRemove(dryRunShmFile); + + // Shutdown the OneDrive API instance + if (oneDriveApiInstance !is null) { + addLogEntry("Shutdown OneDrive API instance", ["debug"]); + oneDriveApiInstance.shutdown(); + object.destroy(oneDriveApiInstance); } - if (exists(dryRunWalFile)) { - // remove items-dryrun.sqlite3-wal - safeRemove(dryRunWalFile); + + // Shutdown the sync engine + if (syncEngineInstance !is null) { + addLogEntry("Shutdown Sync Engine instance", ["debug"]); + object.destroy(syncEngineInstance); } -} - -bool initSyncEngine(SyncEngine sync) -{ - try { - sync.init(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 400 || e.httpStatusCode == 401) { - // Authorization is invalid - log.log("\nAuthorization token invalid, use --reauth to authorize the client again\n"); - return false; - } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error, message already printed - return false; + + // Shutdown the client side filtering objects + if (selectiveSync !is null) { + addLogEntry("Shutdown Client Side Filtering instance", ["debug"]); + selectiveSync.shutdown(); + object.destroy(selectiveSync); + } + + // Shutdown the application configuration objects + if (appConfig !is null) { + addLogEntry("Shutdown Application Configuration instance", ["debug"]); + // Cleanup any existing dry-run elements ... these should never be left hanging around + cleanupDryRunDatabaseFiles(appConfig.databaseFilePathDryRun); + object.destroy(appConfig); + } + + // Shutdown any local filesystem monitoring + if (filesystemMonitor !is null) { + addLogEntry("Shutdown Filesystem Monitoring instance", ["debug"]); + filesystemMonitor.shutdown(); + object.destroy(filesystemMonitor); + } + + // Shutdown the database + if (itemDB !is null) { + addLogEntry("Shutdown Database instance", ["debug"]); + // Make sure the .wal file is incorporated into the main db before we exit + if (itemDB.isDatabaseInitialised()) { + itemDB.performVacuum(); } + object.destroy(itemDB); + } + + // Set all objects to null + if (scopeCaller == "failureScope") { + // Set these to be null due to failure scope - prevent 'ERROR: Unable to perform a database vacuum: out of memory' when the exit scope is then called + addLogEntry("Setting ALL Class Objects to null due to failure scope", ["debug"]); + itemDB = null; + appConfig = null; + oneDriveApiInstance = null; + selectiveSync = null; + syncEngineInstance = null; + } else { + addLogEntry("Application exit", ["debug"]); + addLogEntry("#######################################################################################################################################", ["logFileOnly"]); + // Sleep to allow any final logging output to be printed - this is needed as we are using buffered logging output + Thread.sleep(dur!("msecs")(500)); + // Destroy the shared logging buffer + object.destroy(logBuffer); } - return true; } -// try to synchronize the folder three times -void performSync(SyncEngine sync, string singleDirectory, bool downloadOnly, bool localFirst, bool uploadOnly, long logLevel, bool fullScanRequired, bool syncListConfiguredFullScanOverride, bool displaySyncOptions, bool monitorEnabled, Monitor m, bool cleanupLocalFiles) -{ - int count; - string remotePath = "/"; - string localPath = "."; - string logOutputMessage; +void oneDriveWebhookCallback() { + // If we are in a --download-only method of operation, there is no filesystem monitoring, so no inotify events to check + if (!appConfig.getValueBool("download_only")) { + try { + // Process any inotify events + filesystemMonitor.update(true); + } catch (MonitorException e) { + // Catch any exceptions thrown by inotify / monitor engine + addLogEntry("ERROR: The following inotify error was generated: " ~ e.msg); + } + } - // performSync API scan triggers - log.vdebug("performSync API scan triggers"); - log.vdebug("-----------------------------"); - log.vdebug("fullScanRequired = ", fullScanRequired); - log.vdebug("syncListConfiguredFullScanOverride = ", syncListConfiguredFullScanOverride); - log.vdebug("-----------------------------"); + // Download data from OneDrive last + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events + filesystemMonitor.update(true); + } +} - // Are we doing a single directory sync? - if (singleDirectory != ""){ - // Need two different path strings here - remotePath = singleDirectory; - localPath = singleDirectory; - // Set flag for singleDirectoryScope for change handling - sync.setSingleDirectoryScope(); +void performUploadOnlySyncProcess(string localPath, Monitor filesystemMonitor = null) { + // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + if (appConfig.getValueBool("monitor")) { + // Handle any inotify events whilst the DB was being scanned + filesystemMonitor.update(true); + } + + // Scan the configured 'sync_dir' for new data to upload + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events whilst the local filesystem was being scanned + filesystemMonitor.update(true); } +} - // Due to Microsoft Sharepoint 'enrichment' of files, we try to download the Microsoft modified file automatically - // Set flag if we are in upload only state to handle this differently - // See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details - if (uploadOnly) sync.setUploadOnly(); +void performStandardSyncProcess(string localPath, Monitor filesystemMonitor = null) { - do { - try { - // starting a sync - logOutputMessage = "################################################## NEW SYNC ##################################################"; - if (displaySyncOptions) { - log.log(logOutputMessage); - } else { - log.vdebug(logOutputMessage); + // If we are performing log supression, output this message so the user knows what is happening + if (appConfig.surpressLoggingOutput) { + addLogEntry("Syncing changes from Microsoft OneDrive ..."); + } + + // Zero out these arrays + syncEngineInstance.fileDownloadFailures = []; + syncEngineInstance.fileUploadFailures = []; + + // Which way do we sync first? + // OneDrive first then local changes (normal operational process that uses OneDrive as the source of truth) + // Local First then OneDrive changes (alternate operation process to use local files as source of truth) + if (appConfig.getValueBool("local_first")) { + // Local data first + // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + if (appConfig.getValueBool("monitor")) { + // Handle any inotify events whilst the DB was being scanned + filesystemMonitor.update(true); + } + + // Scan the configured 'sync_dir' for new data to upload to OneDrive + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events whilst the local filesystem was being scanned + filesystemMonitor.update(true); + } + + // Download data from OneDrive last + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Cancel out any inotify events from downloading data + filesystemMonitor.update(false); + } + } else { + // Normal sync + // Download data from OneDrive first + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Cancel out any inotify events from downloading data + filesystemMonitor.update(false); + } + + + // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive + syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); + if (appConfig.getValueBool("monitor")) { + // Handle any inotify events whilst the DB was being scanned + filesystemMonitor.update(true); + } + + // Is --download-only NOT configured? + if (!appConfig.getValueBool("download_only")) { + + // Scan the configured 'sync_dir' for new data to upload to OneDrive + syncEngineInstance.scanLocalFilesystemPathForNewData(localPath); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events whilst the local filesystem was being scanned + filesystemMonitor.update(true); } - if (singleDirectory != ""){ - // we were requested to sync a single directory - log.vlog("Syncing changes from this selected path: ", singleDirectory); - if (uploadOnly){ - // Upload Only of selected single directory - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from selected local path only - NOT syncing data changes from OneDrive ..."); - sync.scanForDifferences(localPath); - } else { - // No upload only - if (localFirst) { - // Local First - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from selected local path first before downloading changes from OneDrive ..."); - sync.scanForDifferences(localPath); - sync.applyDifferencesSingleDirectory(remotePath); - } else { - // OneDrive First - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from selected OneDrive path ..."); - sync.applyDifferencesSingleDirectory(remotePath); - - // Is this a --download-only --cleanup-local-files request? - // If yes, scan for local changes - but --cleanup-local-files is being used, a further flag will trigger local file deletes rather than attempt to upload files to OneDrive - if (cleanupLocalFiles) { - // --download-only and --cleanup-local-files were passed in - log.log("Searching local filesystem for extra files and folders which need to be removed"); - sync.scanForDifferencesFilesystemScan(localPath); - } else { - // is this a --download-only request? - if (!downloadOnly) { - // process local changes - sync.scanForDifferences(localPath); - // ensure that the current remote state is updated locally - sync.applyDifferencesSingleDirectory(remotePath); - } - } - } - } - } else { - // no single directory sync - if (uploadOnly){ - // Upload Only of entire sync_dir - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from local path only - NOT syncing data changes from OneDrive ..."); - sync.scanForDifferences(localPath); - } else { - // No upload only - string syncCallLogOutput; - if (localFirst) { - // sync local files first before downloading from OneDrive - if (logLevel < MONITOR_LOG_QUIET) log.log("Syncing changes from local path first before downloading changes from OneDrive ..."); - sync.scanForDifferences(localPath); - // if syncListConfiguredFullScanOverride = true - if (syncListConfiguredFullScanOverride) { - // perform a full walk of OneDrive objects - sync.applyDifferences(syncListConfiguredFullScanOverride); - } else { - // perform a walk based on if a full scan is required - sync.applyDifferences(fullScanRequired); - } - } else { - // sync from OneDrive first before uploading files to OneDrive - if ((logLevel < MONITOR_LOG_SILENT) || (fullScanRequired)) log.log("Syncing changes and items from OneDrive ..."); - - // For the initial sync, always use the delta link so that we capture all the right delta changes including adds, moves & deletes - logOutputMessage = "Initial Scan: Call OneDrive Delta API for delta changes as compared to last successful sync."; - syncCallLogOutput = "Calling sync.applyDifferences(false);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(false); - - // Is this a --download-only --cleanup-local-files request? - // If yes, scan for local changes - but --cleanup-local-files is being used, a further flag will trigger local file deletes rather than attempt to upload files to OneDrive - if (cleanupLocalFiles) { - // --download-only and --cleanup-local-files were passed in - log.log("Searching local filesystem for extra files and folders which need to be removed"); - sync.scanForDifferencesFilesystemScan(localPath); - } else { - // is this a --download-only request? - if (!downloadOnly) { - // process local changes walking the entire path checking for changes - // in monitor mode all local changes are captured via inotify - // thus scanning every 'monitor_interval' (default 300 seconds) for local changes is excessive and not required - logOutputMessage = "Process local filesystem (sync_dir) for file changes as compared to database entries"; - syncCallLogOutput = "Calling sync.scanForDifferences(localPath);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - - SysTime startIntegrityCheckProcessingTime = Clock.currTime(); - if (sync.getPerformanceProcessingOutput()) { - // performance timing for DB and file system integrity check - start - writeln("============================================================"); - writeln("Start Integrity Check Processing Time: ", startIntegrityCheckProcessingTime); - } - - // What sort of local scan do we want to do? - // In --monitor mode, when performing the DB scan, a race condition occurs where by if a file or folder is moved during this process - // the inotify event is discarded once performSync() is finished (see m.update(false) above), so these events need to be handled - // This can be remediated by breaking the DB and file system scan into separate processes, and handing any applicable inotify events in between - if (!monitorEnabled) { - // --synchronize in use - log.log("Performing a database consistency and integrity check on locally stored data ... "); - // standard process flow - sync.scanForDifferences(localPath); - } else { - // --monitor in use - // Use individual calls with inotify checks between to avoid a race condition between these 2 functions - // Database scan integrity check to compare DB data vs actual content on disk to ensure what we think is local, is local - // and that the data 'hash' as recorded in the DB equals the hash of the actual content - // This process can be extremely expensive time and CPU processing wise - // - // fullScanRequired is set to TRUE when the application starts up, or the config option 'monitor_fullscan_frequency' count is reached - // By default, 'monitor_fullscan_frequency' = 12, and 'monitor_interval' = 300, meaning that by default, a full database consistency check - // is done once an hour. - // - // To change this behaviour adjust 'monitor_interval' and 'monitor_fullscan_frequency' to desired values in the application config file - if (fullScanRequired) { - log.log("Performing a database consistency and integrity check on locally stored data due to fullscan requirement ... "); - sync.scanForDifferencesDatabaseScan(localPath); - // handle any inotify events that occured 'whilst' we were scanning the database - m.update(true); - } else { - log.vdebug("NOT performing Database Integrity Check .. fullScanRequired = FALSE"); - m.update(true); - } - - // Filesystem walk to find new files not uploaded - log.vdebug("Searching local filesystem for new data"); - sync.scanForDifferencesFilesystemScan(localPath); - // handle any inotify events that occured 'whilst' we were scanning the local filesystem - m.update(true); - } - - SysTime endIntegrityCheckProcessingTime = Clock.currTime(); - if (sync.getPerformanceProcessingOutput()) { - // performance timing for DB and file system integrity check - finish - writeln("End Integrity Check Processing Time: ", endIntegrityCheckProcessingTime); - writeln("Elapsed Function Processing Time: ", (endIntegrityCheckProcessingTime - startIntegrityCheckProcessingTime)); - writeln("============================================================"); - } - - // At this point, all OneDrive changes / local changes should be uploaded and in sync - // This MAY not be the case when using sync_list, thus a full walk of OneDrive ojects is required - - // --synchronize & no sync_list : fullScanRequired = false, syncListConfiguredFullScanOverride = false - // --synchronize & sync_list in use : fullScanRequired = false, syncListConfiguredFullScanOverride = true - - // --monitor loops around 12 iterations. On the 1st loop, sets fullScanRequired = true, syncListConfiguredFullScanOverride = true if requried - - // --monitor & no sync_list (loop #1) : fullScanRequired = true, syncListConfiguredFullScanOverride = false - // --monitor & no sync_list (loop #2 - #12) : fullScanRequired = false, syncListConfiguredFullScanOverride = false - // --monitor & sync_list in use (loop #1) : fullScanRequired = true, syncListConfiguredFullScanOverride = true - // --monitor & sync_list in use (loop #2 - #12) : fullScanRequired = false, syncListConfiguredFullScanOverride = false + + // Make sure we sync any DB data to this point, but only if not in --monitor mode + // In --monitor mode, this is handled within the 'loop', based on when the full scan true up is being performed + if (!appConfig.getValueBool("monitor")) { + itemDB.performVacuum(); + } + + // Perform the final true up scan to ensure we have correctly replicated the current online state locally + if (!appConfig.surpressLoggingOutput) { + addLogEntry("Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process"); + } + // We pass in the 'appConfig.fullScanTrueUpRequired' value which then flags do we use the configured 'deltaLink' + // If 'appConfig.fullScanTrueUpRequired' is true, we do not use the 'deltaLink' if we are in --monitor mode, thus forcing a full scan true up + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Cancel out any inotify events from downloading data + filesystemMonitor.update(false); + } + } + } +} - // Do not perform a full walk of the OneDrive objects - if ((!fullScanRequired) && (!syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Do not perform a full walk of the OneDrive objects - not required"; - syncCallLogOutput = "Calling sync.applyDifferences(false);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(false); - } +void displaySyncOutcome() { - // Perform a full walk of OneDrive objects because sync_list is in use / or trigger was set in --monitor loop - if ((!fullScanRequired) && (syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Perform a full walk of OneDrive objects because sync_list is in use / or trigger was set in --monitor loop"; - syncCallLogOutput = "Calling sync.applyDifferences(true);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(true); - } + // Detail any download or upload transfer failures + syncEngineInstance.displaySyncFailures(); + + // Sync is either complete or partially complete + if (!syncEngineInstance.syncFailures) { + // No download or upload issues + if (!appConfig.getValueBool("monitor")) addLogEntry(); // Add an additional line break so that this is clear when using --sync + addLogEntry("Sync with Microsoft OneDrive is complete"); + } else { + addLogEntry(); + addLogEntry("Sync with Microsoft OneDrive has completed, however there are items that failed to sync."); + // Due to how the OneDrive API works 'changes' such as add new files online, rename files online, delete files online are only sent once when using the /delta API call. + // That we failed to download it, we need to track that, and then issue a --resync to download any of these failed files .. unfortunate, but there is no easy way here + if (!syncEngineInstance.fileDownloadFailures.empty) { + addLogEntry("To fix any download failures you may need to perform a --resync to ensure this system is correctly synced with your Microsoft OneDrive Account"); + } + if (!syncEngineInstance.fileUploadFailures.empty) { + addLogEntry("To fix any upload failures you may need to perform a --resync to ensure this system is correctly synced with your Microsoft OneDrive Account"); + } + // So that from a logging perspective these messages are clear, add a line break in + addLogEntry(); + } +} - // Perform a full walk of OneDrive objects because a full scan was required - if ((fullScanRequired) && (!syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Perform a full walk of OneDrive objects because a full scan was required"; - syncCallLogOutput = "Calling sync.applyDifferences(true);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(true); - } +void processResyncDatabaseRemoval(string databaseFilePathToRemove) { + addLogEntry("Testing if we have exclusive access to local database file", ["debug"]); + + // Are we the only running instance? Test that we can open the database file path + itemDB = new ItemDatabase(databaseFilePathToRemove); + + // did we successfully initialise the database class? + if (!itemDB.isDatabaseInitialised()) { + // no .. destroy class + itemDB = null; + // exit application - void function, force exit this way + exit(-1); + } + + // If we have exclusive access we will not have exited + // destroy access test + destroy(itemDB); + // delete application sync state + addLogEntry("Deleting the saved application sync status ..."); + if (!appConfig.getValueBool("dry_run")) { + safeRemove(databaseFilePathToRemove); + } else { + // --dry-run scenario ... technically we should not be making any local file changes ....... + addLogEntry("DRY RUN: Not removing the saved application sync status"); + } +} - // Perform a full walk of OneDrive objects because a full scan was required and sync_list is in use and trigger was set in --monitor loop - if ((fullScanRequired) && (syncListConfiguredFullScanOverride)){ - logOutputMessage = "Final True-Up: Perform a full walk of OneDrive objects because a full scan was required and sync_list is in use and trigger was set in --monitor loop"; - syncCallLogOutput = "Calling sync.applyDifferences(true);"; - if (displaySyncOptions) { - log.log(logOutputMessage); - log.log(syncCallLogOutput); - } else { - log.vdebug(logOutputMessage); - log.vdebug(syncCallLogOutput); - } - sync.applyDifferences(true); - } - } - } - } - } - } +void cleanupDryRunDatabaseFiles(string dryRunDatabaseFile) { + // Temp variables + string dryRunShmFile = dryRunDatabaseFile ~ "-shm"; + string dryRunWalFile = dryRunDatabaseFile ~ "-wal"; - // sync is complete - logOutputMessage = "################################################ SYNC COMPLETE ###############################################"; - if (displaySyncOptions) { - log.log(logOutputMessage); - } else { - log.vdebug(logOutputMessage); - } + // If the dry run database exists, clean this up + if (exists(dryRunDatabaseFile)) { + // remove the existing file + addLogEntry("DRY-RUN: Removing items-dryrun.sqlite3 as it still exists for some reason", ["debug"]); + safeRemove(dryRunDatabaseFile); + } + + // silent cleanup of shm files if it exists + if (exists(dryRunShmFile)) { + // remove items-dryrun.sqlite3-shm + addLogEntry("DRY-RUN: Removing items-dryrun.sqlite3-shm as it still exists for some reason", ["debug"]); + safeRemove(dryRunShmFile); + } + + // silent cleanup of wal files if it exists + if (exists(dryRunWalFile)) { + // remove items-dryrun.sqlite3-wal + addLogEntry("DRY-RUN: Removing items-dryrun.sqlite3-wal as it still exists for some reason", ["debug"]); + safeRemove(dryRunWalFile); + } +} - count = -1; - } catch (Exception e) { - if (++count == 3) { - log.log("Giving up on sync after three attempts: ", e.msg); - throw e; - } else - log.log("Retry sync count: ", count, ": ", e.msg); +void checkForNoMountScenario() { + // If this is a 'mounted' folder, the 'mount point' should have this file to help the application stop any action to preserve data because the drive to mount is not currently mounted + if (appConfig.getValueBool("check_nomount")) { + // we were asked to check the mount point for the presence of a '.nosync' file + if (exists(".nosync")) { + addLogEntry("ERROR: .nosync file found in directory mount point. Aborting application startup process to safeguard data.", ["info", "notify"]); + Thread.sleep(dur!("msecs")(500)); + exit(EXIT_FAILURE); } - } while (count != -1); + } } -// getting around the @nogc problem +// Getting around the @nogc problem // https://p0nce.github.io/d-idioms/#Bypassing-@nogc -auto assumeNoGC(T) (T t) if (isFunctionPointer!T || isDelegate!T) -{ +auto assumeNoGC(T) (T t) if (isFunctionPointer!T || isDelegate!T) { enum attrs = functionAttributes!T | FunctionAttribute.nogc; return cast(SetFunctionAttributes!(T, functionLinkage!T, attrs)) t; } +// Catch CTRL-C extern(C) nothrow @nogc @system void exitHandler(int value) { try { assumeNoGC ( () { - log.log("Got termination signal, performing clean up"); - // if initialised, shut down the HTTP instance - if (onedriveInitialised) { - log.log("Shutting down the HTTP instance"); - oneDrive.shutdown(); - } - // was itemDb initialised? - if (itemDb.isDatabaseInitialised()) { + addLogEntry("Got termination signal, performing clean up"); + // Wait for all parallel jobs that depend on the database to complete + addLogEntry("Waiting for any existing upload|download process to complete"); + taskPool.finish(true); + // Was itemDb initialised? + if (itemDB.isDatabaseInitialised()) { // Make sure the .wal file is incorporated into the main db before we exit - log.log("Shutting down db connection and merging temporary data"); - itemDb.performVacuum(); - destroy(itemDb); + addLogEntry("Shutting down DB connection and merging temporary data"); + itemDB.performVacuum(); + object.destroy(itemDB); } + performStandardExitProcess(); })(); } catch(Exception e) {} exit(0); -} - +} \ No newline at end of file diff --git a/src/monitor.d b/src/monitor.d index 06aac0d7a..d5481dc30 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -1,69 +1,205 @@ -import core.sys.linux.sys.inotify; +// What is this module called? +module monitor; + +// What does this module require to function? import core.stdc.errno; -import core.sys.posix.poll, core.sys.posix.unistd; -import std.exception, std.file, std.path, std.regex, std.stdio, std.string, std.algorithm; import core.stdc.stdlib; +import core.sys.linux.sys.inotify; +import core.sys.posix.poll; +import core.sys.posix.unistd; +import core.sys.posix.sys.select; +import core.time; +import std.algorithm; +import std.concurrency; +import std.exception; +import std.file; +import std.path; +import std.regex; +import std.stdio; +import std.string; +import std.conv; + +// What other modules that we have created do we need to import? import config; -import selective; import util; -static import log; +import log; +import clientSideFiltering; -// relevant inotify events +// Relevant inotify events private immutable uint32_t mask = IN_CLOSE_WRITE | IN_CREATE | IN_DELETE | IN_MOVE | IN_IGNORED | IN_Q_OVERFLOW; -class MonitorException: ErrnoException -{ - @safe this(string msg, string file = __FILE__, size_t line = __LINE__) - { +class MonitorException: ErrnoException { + @safe this(string msg, string file = __FILE__, size_t line = __LINE__) { super(msg, file, line); } } -final class Monitor -{ - bool verbose; +shared class MonitorBackgroundWorker { // inotify file descriptor - private int fd; + int fd; + private bool working; + + void initialise() { + fd = inotify_init(); + working = false; + if (fd < 0) throw new MonitorException("inotify_init failed"); + } + + // Add this path to be monitored + private int addInotifyWatch(string pathname) { + int wd = inotify_add_watch(fd, toStringz(pathname), mask); + if (wd < 0) { + if (errno() == ENOSPC) { + // Get the current value + ulong maxInotifyWatches = to!int(strip(readText("/proc/sys/fs/inotify/max_user_watches"))); + addLogEntry("The user limit on the total number of inotify watches has been reached."); + addLogEntry("Your current limit of inotify watches is: " ~ to!string(maxInotifyWatches)); + addLogEntry("It is recommended that you change the max number of inotify watches to at least double your existing value."); + addLogEntry("To change the current max number of watches to " ~ to!string((maxInotifyWatches * 2)) ~ " run:"); + addLogEntry("EXAMPLE: sudo sysctl fs.inotify.max_user_watches=" ~ to!string((maxInotifyWatches * 2))); + } + if (errno() == 13) { + addLogEntry("WARNING: inotify_add_watch failed - permission denied: " ~ pathname, ["verbose"]); + } + // Flag any other errors + addLogEntry("ERROR: inotify_add_watch failed: " ~ pathname); + return wd; + } + + // Add path to inotify watch - required regardless if a '.folder' or 'folder' + addLogEntry("inotify_add_watch successfully added for: " ~ pathname, ["debug"]); + + // Do we log that we are monitoring this directory? + if (isDir(pathname)) { + // Log that this is directory is being monitored + addLogEntry("Monitoring directory: " ~ pathname, ["verbose"]); + } + return wd; + } + + int remove(int wd) { + return inotify_rm_watch(fd, wd); + } + + bool isWorking() { + return working; + } + + void watch(Tid callerTid) { + // On failure, send -1 to caller + int res; + + // wait for the caller to be ready + int isAlive = receiveOnly!int(); + + while (isAlive) { + fd_set fds; + FD_ZERO (&fds); + FD_SET(fd, &fds); + + working = true; + res = select(FD_SETSIZE, &fds, null, null, null); + + if(res == -1) { + if(errno() == EINTR) { + // Received an interrupt signal but no events are available + // try update work staus and directly watch again + receiveTimeout(dur!"seconds"(1), (int msg) { + isAlive = msg; + }); + } else { + // Error occurred, tell caller to terminate. + callCaller(callerTid, -1); + working = false; + break; + } + } else { + // Wake up caller + callCaller(callerTid, 1); + // Wait for the caller to be ready + isAlive = receiveOnly!int(); + } + } + } + + void callCaller(Tid callerTid, int msg) { + working = false; + callerTid.send(msg); + } + + void shutdown() { + if (fd > 0) { + close(fd); + fd = 0; + } + } +} + + +void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid) +{ + try { + worker.watch(callerTid); + } catch (OwnerTerminated error) { + // caller is terminated + } + worker.shutdown(); +} + +final class Monitor { + // Class variables + ApplicationConfig appConfig; + ClientSideFiltering selectiveSync; + + // Are we verbose in logging output + bool verbose = false; + // skip symbolic links + bool skip_symlinks = false; + // check for .nosync if enabled + bool check_nosync = false; + // check if initialised + bool initialised = false; + + // Configure Private Class Variables + shared(MonitorBackgroundWorker) worker; // map every inotify watch descriptor to its directory private string[int] wdToDirName; // map the inotify cookies of move_from events to their path private string[int] cookieToPath; // buffer to receive the inotify events private void[] buffer; - // skip symbolic links - bool skip_symlinks; - // check for .nosync if enabled - bool check_nosync; - - private SelectiveSync selectiveSync; + // Configure function delegates void delegate(string path) onDirCreated; void delegate(string path) onFileChanged; void delegate(string path) onDelete; void delegate(string from, string to) onMove; - - this(SelectiveSync selectiveSync) - { - assert(selectiveSync); + + // Configure the class varaible to consume the application configuration including selective sync + this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) { + this.appConfig = appConfig; this.selectiveSync = selectiveSync; } - - void init(Config cfg, bool verbose, bool skip_symlinks, bool check_nosync) - { - this.verbose = verbose; - this.skip_symlinks = skip_symlinks; - this.check_nosync = check_nosync; + + // Initialise the monitor class + void initialise() { + // Configure the variables + skip_symlinks = appConfig.getValueBool("skip_symlinks"); + check_nosync = appConfig.getValueBool("check_nosync"); + if (appConfig.getValueLong("verbose") > 0) { + verbose = true; + } assert(onDirCreated && onFileChanged && onDelete && onMove); - fd = inotify_init(); - if (fd < 0) throw new MonitorException("inotify_init failed"); if (!buffer) buffer = new void[4096]; - + worker = new shared(MonitorBackgroundWorker); + worker.initialise(); + // from which point do we start watching for changes? string monitorPath; - if (cfg.getValueString("single_directory") != ""){ - // single directory in use, monitor only this - monitorPath = "./" ~ cfg.getValueString("single_directory"); + if (appConfig.getValueString("single_directory") != ""){ + // single directory in use, monitor only this path + monitorPath = "./" ~ appConfig.getValueString("single_directory"); } else { // default monitorPath = "."; @@ -71,17 +207,19 @@ final class Monitor addRecursive(monitorPath); } - void shutdown() - { - if (fd > 0) close(fd); + // Shutdown the monitor class + void shutdown() { + if(!initialised) + return; + worker.shutdown(); wdToDirName = null; } - private void addRecursive(string dirname) - { + // Recursivly add this path to be monitored + private void addRecursive(string dirname) { // skip non existing/disappeared items if (!exists(dirname)) { - log.vlog("Not adding non-existing/disappeared directory: ", dirname); + addLogEntry("Not adding non-existing/disappeared directory: " ~ dirname, ["verbose"]); return; } @@ -93,7 +231,7 @@ final class Monitor if (isDir(dirname)) { if (selectiveSync.isDirNameExcluded(dirname.strip('.'))) { // dont add a watch for this item - log.vdebug("Skipping monitoring due to skip_dir match: ", dirname); + addLogEntry("Skipping monitoring due to skip_dir match: " ~ dirname, ["debug"]); return; } } @@ -103,14 +241,14 @@ final class Monitor // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched if (selectiveSync.isFileNameExcluded(dirname.strip('.'))) { // dont add a watch for this item - log.vdebug("Skipping monitoring due to skip_file match: ", dirname); + addLogEntry("Skipping monitoring due to skip_file match: " ~ dirname, ["debug"]); return; } } // is the path exluded by sync_list? if (selectiveSync.isPathExcludedViaSyncList(buildNormalizedPath(dirname))) { // dont add a watch for this item - log.vdebug("Skipping monitoring due to sync_list match: ", dirname); + addLogEntry("Skipping monitoring due to sync_list match: " ~ dirname, ["debug"]); return; } } @@ -127,15 +265,27 @@ final class Monitor // Do we need to check for .nosync? Only if check_nosync is true if (check_nosync) { if (exists(buildNormalizedPath(dirname) ~ "/.nosync")) { - log.vlog("Skipping watching path - .nosync found & --check-for-nosync enabled: ", buildNormalizedPath(dirname)); + addLogEntry("Skipping watching path - .nosync found & --check-for-nosync enabled: " ~ buildNormalizedPath(dirname), ["verbose"]); + return; + } + } + + if (isDir(dirname)) { + // This is a directory + // is the path exluded if skip_dotfiles configured and path is a .folder? + if ((selectiveSync.getSkipDotfiles()) && (isDotFile(dirname))) { + // dont add a watch for this directory return; } } // passed all potential exclusions // add inotify watch for this path / directory / file - log.vdebug("Calling add() for this dirname: ", dirname); - add(dirname); + addLogEntry("Calling worker.addInotifyWatch() for this dirname: " ~ dirname, ["debug"]); + int wd = worker.addInotifyWatch(dirname); + if (wd > 0) { + wdToDirName[wd] = buildNormalizedPath(dirname) ~ "/"; + } // if this is a directory, recursivly add this path if (isDir(dirname)) { @@ -144,7 +294,7 @@ final class Monitor auto pathList = dirEntries(dirname, SpanMode.shallow, false); foreach(DirEntry entry; pathList) { if (entry.isDir) { - log.vdebug("Calling addRecursive() for this directory: ", entry.name); + addLogEntry("Calling addRecursive() for this directory: " ~ entry.name, ["debug"]); addRecursive(entry.name); } } @@ -158,10 +308,10 @@ final class Monitor // Need to check for: Failed to stat file in error message if (canFind(e.msg, "Failed to stat file")) { // File system access issue - log.error("ERROR: The local file system returned an error with the following message:"); - log.error(" Error Message: ", e.msg); - log.error("ACCESS ERROR: Please check your UID and GID access to this file, as the permissions on this file is preventing this application to read it"); - log.error("\nFATAL: Exiting application to avoid deleting data due to local file system access issues\n"); + addLogEntry("ERROR: The local file system returned an error with the following message:"); + addLogEntry(" Error Message: " ~ e.msg); + addLogEntry("ACCESS ERROR: Please check your UID and GID access to this file, as the permissions on this file is preventing this application to read it"); + addLogEntry("\nFATAL: Forcing exiting application to avoid deleting data due to local file system access issues\n"); // Must exit here exit(-1); } else { @@ -173,85 +323,47 @@ final class Monitor } } - private void add(string pathname) - { - int wd = inotify_add_watch(fd, toStringz(pathname), mask); - if (wd < 0) { - if (errno() == ENOSPC) { - log.log("The user limit on the total number of inotify watches has been reached."); - log.log("To see the current max number of watches run:"); - log.log("sysctl fs.inotify.max_user_watches"); - log.log("To change the current max number of watches to 524288 run:"); - log.log("sudo sysctl fs.inotify.max_user_watches=524288"); - } - if (errno() == 13) { - if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) { - // no misleading output that we could not add a watch due to permission denied - return; - } else { - log.vlog("WARNING: inotify_add_watch failed - permission denied: ", pathname); - return; - } - } - // Flag any other errors - log.error("ERROR: inotify_add_watch failed: ", pathname); - return; - } - - // Add path to inotify watch - required regardless if a '.folder' or 'folder' - wdToDirName[wd] = buildNormalizedPath(pathname) ~ "/"; - log.vdebug("inotify_add_watch successfully added for: ", pathname); - - // Do we log that we are monitoring this directory? - if (isDir(pathname)) { - // This is a directory - // is the path exluded if skip_dotfiles configured and path is a .folder? - if ((selectiveSync.getSkipDotfiles()) && (selectiveSync.isDotFile(pathname))) { - // no misleading output that we are monitoring this directory - return; - } - // Log that this is directory is being monitored - log.vlog("Monitor directory: ", pathname); - } - } - - // remove a watch descriptor - private void remove(int wd) - { + // Remove a watch descriptor + private void remove(int wd) { assert(wd in wdToDirName); - int ret = inotify_rm_watch(fd, wd); + int ret = worker.remove(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); - log.vlog("Monitored directory removed: ", wdToDirName[wd]); + addLogEntry("Monitored directory removed: " ~ to!string(wdToDirName[wd]), ["verbose"]); wdToDirName.remove(wd); } - // remove the watch descriptors associated to the given path - private void remove(const(char)[] path) - { + // Remove the watch descriptors associated to the given path + private void remove(const(char)[] path) { path ~= "/"; foreach (wd, dirname; wdToDirName) { if (dirname.startsWith(path)) { - int ret = inotify_rm_watch(fd, wd); + int ret = worker.remove(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); wdToDirName.remove(wd); - log.vlog("Monitored directory removed: ", dirname); + addLogEntry("Monitored directory removed: " ~ dirname, ["verbose"]); } } } - // return the file path from an inotify event - private string getPath(const(inotify_event)* event) - { + // Return the file path from an inotify event + private string getPath(const(inotify_event)* event) { string path = wdToDirName[event.wd]; if (event.len > 0) path ~= fromStringz(event.name.ptr); - log.vdebug("inotify path event for: ", path); + addLogEntry("inotify path event for: " ~ path, ["debug"]); return path; } - void update(bool useCallbacks = true) - { + shared(MonitorBackgroundWorker) getWorker() { + return worker; + } + + // Update + void update(bool useCallbacks = true) { + if(!initialised) + return; + pollfd fds = { - fd: fd, + fd: worker.fd, events: POLLIN }; @@ -260,7 +372,7 @@ final class Monitor if (ret == -1) throw new MonitorException("poll failed"); else if (ret == 0) break; // no events available - size_t length = read(fd, buffer.ptr, buffer.length); + size_t length = read(worker.fd, buffer.ptr, buffer.length); if (length == -1) throw new MonitorException("read failed"); int i = 0; @@ -268,35 +380,38 @@ final class Monitor inotify_event *event = cast(inotify_event*) &buffer[i]; string path; string evalPath; + // inotify event debug - log.vdebug("inotify event wd: ", event.wd); - log.vdebug("inotify event mask: ", event.mask); - log.vdebug("inotify event cookie: ", event.cookie); - log.vdebug("inotify event len: ", event.len); - log.vdebug("inotify event name: ", event.name); - if (event.mask & IN_ACCESS) log.vdebug("inotify event flag: IN_ACCESS"); - if (event.mask & IN_MODIFY) log.vdebug("inotify event flag: IN_MODIFY"); - if (event.mask & IN_ATTRIB) log.vdebug("inotify event flag: IN_ATTRIB"); - if (event.mask & IN_CLOSE_WRITE) log.vdebug("inotify event flag: IN_CLOSE_WRITE"); - if (event.mask & IN_CLOSE_NOWRITE) log.vdebug("inotify event flag: IN_CLOSE_NOWRITE"); - if (event.mask & IN_MOVED_FROM) log.vdebug("inotify event flag: IN_MOVED_FROM"); - if (event.mask & IN_MOVED_TO) log.vdebug("inotify event flag: IN_MOVED_TO"); - if (event.mask & IN_CREATE) log.vdebug("inotify event flag: IN_CREATE"); - if (event.mask & IN_DELETE) log.vdebug("inotify event flag: IN_DELETE"); - if (event.mask & IN_DELETE_SELF) log.vdebug("inotify event flag: IN_DELETE_SELF"); - if (event.mask & IN_MOVE_SELF) log.vdebug("inotify event flag: IN_MOVE_SELF"); - if (event.mask & IN_UNMOUNT) log.vdebug("inotify event flag: IN_UNMOUNT"); - if (event.mask & IN_Q_OVERFLOW) log.vdebug("inotify event flag: IN_Q_OVERFLOW"); - if (event.mask & IN_IGNORED) log.vdebug("inotify event flag: IN_IGNORED"); - if (event.mask & IN_CLOSE) log.vdebug("inotify event flag: IN_CLOSE"); - if (event.mask & IN_MOVE) log.vdebug("inotify event flag: IN_MOVE"); - if (event.mask & IN_ONLYDIR) log.vdebug("inotify event flag: IN_ONLYDIR"); - if (event.mask & IN_DONT_FOLLOW) log.vdebug("inotify event flag: IN_DONT_FOLLOW"); - if (event.mask & IN_EXCL_UNLINK) log.vdebug("inotify event flag: IN_EXCL_UNLINK"); - if (event.mask & IN_MASK_ADD) log.vdebug("inotify event flag: IN_MASK_ADD"); - if (event.mask & IN_ISDIR) log.vdebug("inotify event flag: IN_ISDIR"); - if (event.mask & IN_ONESHOT) log.vdebug("inotify event flag: IN_ONESHOT"); - if (event.mask & IN_ALL_EVENTS) log.vdebug("inotify event flag: IN_ALL_EVENTS"); + addLogEntry("inotify event wd: " ~ to!string(event.wd), ["debug"]); + addLogEntry("inotify event mask: " ~ to!string(event.mask), ["debug"]); + addLogEntry("inotify event cookie: " ~ to!string(event.cookie), ["debug"]); + addLogEntry("inotify event len: " ~ to!string(event.len), ["debug"]); + addLogEntry("inotify event name: " ~ to!string(event.name), ["debug"]); + + // inotify event handling + if (event.mask & IN_ACCESS) addLogEntry("inotify event flag: IN_ACCESS", ["debug"]); + if (event.mask & IN_MODIFY) addLogEntry("inotify event flag: IN_MODIFY", ["debug"]); + if (event.mask & IN_ATTRIB) addLogEntry("inotify event flag: IN_ATTRIB", ["debug"]); + if (event.mask & IN_CLOSE_WRITE) addLogEntry("inotify event flag: IN_CLOSE_WRITE", ["debug"]); + if (event.mask & IN_CLOSE_NOWRITE) addLogEntry("inotify event flag: IN_CLOSE_NOWRITE", ["debug"]); + if (event.mask & IN_MOVED_FROM) addLogEntry("inotify event flag: IN_MOVED_FROM", ["debug"]); + if (event.mask & IN_MOVED_TO) addLogEntry("inotify event flag: IN_MOVED_TO", ["debug"]); + if (event.mask & IN_CREATE) addLogEntry("inotify event flag: IN_CREATE", ["debug"]); + if (event.mask & IN_DELETE) addLogEntry("inotify event flag: IN_DELETE", ["debug"]); + if (event.mask & IN_DELETE_SELF) addLogEntry("inotify event flag: IN_DELETE_SELF", ["debug"]); + if (event.mask & IN_MOVE_SELF) addLogEntry("inotify event flag: IN_MOVE_SELF", ["debug"]); + if (event.mask & IN_UNMOUNT) addLogEntry("inotify event flag: IN_UNMOUNT", ["debug"]); + if (event.mask & IN_Q_OVERFLOW) addLogEntry("inotify event flag: IN_Q_OVERFLOW", ["debug"]); + if (event.mask & IN_IGNORED) addLogEntry("inotify event flag: IN_IGNORED", ["debug"]); + if (event.mask & IN_CLOSE) addLogEntry("inotify event flag: IN_CLOSE", ["debug"]); + if (event.mask & IN_MOVE) addLogEntry("inotify event flag: IN_MOVE", ["debug"]); + if (event.mask & IN_ONLYDIR) addLogEntry("inotify event flag: IN_ONLYDIR", ["debug"]); + if (event.mask & IN_DONT_FOLLOW) addLogEntry("inotify event flag: IN_DONT_FOLLOW", ["debug"]); + if (event.mask & IN_EXCL_UNLINK) addLogEntry("inotify event flag: IN_EXCL_UNLINK", ["debug"]); + if (event.mask & IN_MASK_ADD) addLogEntry("inotify event flag: IN_MASK_ADD", ["debug"]); + if (event.mask & IN_ISDIR) addLogEntry("inotify event flag: IN_ISDIR", ["debug"]); + if (event.mask & IN_ONESHOT) addLogEntry("inotify event flag: IN_ONESHOT", ["debug"]); + if (event.mask & IN_ALL_EVENTS) addLogEntry("inotify event flag: IN_ALL_EVENTS", ["debug"]); // skip events that need to be ignored if (event.mask & IN_IGNORED) { @@ -304,7 +419,7 @@ final class Monitor wdToDirName.remove(event.wd); goto skip; } else if (event.mask & IN_Q_OVERFLOW) { - throw new MonitorException("Inotify overflow, events missing"); + throw new MonitorException("inotify overflow, inotify events will be missing"); } // if the event is not to be ignored, obtain path @@ -342,10 +457,10 @@ final class Monitor // handle the inotify events if (event.mask & IN_MOVED_FROM) { - log.vdebug("event IN_MOVED_FROM: ", path); + addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]); cookieToPath[event.cookie] = path; } else if (event.mask & IN_MOVED_TO) { - log.vdebug("event IN_MOVED_TO: ", path); + addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]); if (event.mask & IN_ISDIR) addRecursive(path); auto from = event.cookie in cookieToPath; if (from) { @@ -360,32 +475,43 @@ final class Monitor } } } else if (event.mask & IN_CREATE) { - log.vdebug("event IN_CREATE: ", path); + addLogEntry("event IN_CREATE: " ~ path, ["debug"]); if (event.mask & IN_ISDIR) { addRecursive(path); if (useCallbacks) onDirCreated(path); } } else if (event.mask & IN_DELETE) { - log.vdebug("event IN_DELETE: ", path); + addLogEntry("event IN_DELETE: " ~ path, ["debug"]); if (useCallbacks) onDelete(path); } else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) { - log.vdebug("event IN_CLOSE_WRITE and ...: ", path); + addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]); if (useCallbacks) onFileChanged(path); } else { - log.vdebug("event unhandled: ", path); + addLogEntry("event unhandled: " ~ path, ["debug"]); assert(0); } skip: i += inotify_event.sizeof + event.len; } - // assume that the items moved outside the watched directory have been deleted + // Assume that the items moved outside the watched directory have been deleted foreach (cookie, path; cookieToPath) { - log.vdebug("deleting (post loop): ", path); + addLogEntry("Deleting cookie|watch (post loop): " ~ path, ["debug"]); if (useCallbacks) onDelete(path); remove(path); cookieToPath.remove(cookie); } + // Debug Log that all inotify events are flushed + addLogEntry("inotify events flushed", ["debug"]); } } + + Tid watch() { + initialised = true; + return spawn(&startMonitorJob, worker, thisTid); + } + + bool isWorking() { + return worker.isWorking(); + } } diff --git a/src/onedrive.d b/src/onedrive.d index 29d33a46e..545afc01e 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -1,104 +1,54 @@ -import std.net.curl; -import etc.c.curl: CurlOption; -import std.datetime, std.datetime.systime, std.exception, std.file, std.json, std.path; -import std.stdio, std.string, std.uni, std.uri, std.file, std.uuid; -import std.array: split; -import core.atomic : atomicOp; -import core.stdc.stdlib; -import core.thread, std.conv, std.math; +// What is this module called? +module onedrive; + +// What does this module require to function? +import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; +import core.memory; +import core.thread; +import std.stdio; +import std.string; +import std.utf; +import std.file; +import std.exception; +import std.regex; +import std.json; import std.algorithm.searching; -import std.concurrency; -import progress; -import config; -import util; -import arsd.cgi; +import std.net.curl; import std.datetime; -static import log; -shared bool debugResponse = false; -private bool dryRun = false; -private bool simulateNoRefreshTokenFile = false; -private ulong retryAfterValue = 0; - -private immutable { - // Client ID / Application ID (abraunegg) - string clientIdDefault = "d50ca740-c83f-4d1b-b616-12c519384f0c"; - - // Azure Active Directory & Graph Explorer Endpoints - // Global & Defaults - string globalAuthEndpoint = "https://login.microsoftonline.com"; - string globalGraphEndpoint = "https://graph.microsoft.com"; - - // US Government L4 - string usl4AuthEndpoint = "https://login.microsoftonline.us"; - string usl4GraphEndpoint = "https://graph.microsoft.us"; - - // US Government L5 - string usl5AuthEndpoint = "https://login.microsoftonline.us"; - string usl5GraphEndpoint = "https://dod-graph.microsoft.us"; - - // Germany - string deAuthEndpoint = "https://login.microsoftonline.de"; - string deGraphEndpoint = "https://graph.microsoft.de"; - - // China - string cnAuthEndpoint = "https://login.chinacloudapi.cn"; - string cnGraphEndpoint = "https://microsoftgraph.chinacloudapi.cn"; -} - -private { - // Client ID / Application ID - string clientId = clientIdDefault; - - // Default User Agent configuration - string isvTag = "ISV"; - string companyName = "abraunegg"; - // Application name as per Microsoft Azure application registration - string appTitle = "OneDrive Client for Linux"; - - // Default Drive ID - string driveId = ""; - - // API Query URL's, based on using defaults, but can be updated by config option 'azure_ad_endpoint' - // Authentication - string authUrl = globalAuthEndpoint ~ "/common/oauth2/v2.0/authorize"; - string redirectUrl = globalAuthEndpoint ~ "/common/oauth2/nativeclient"; - string tokenUrl = globalAuthEndpoint ~ "/common/oauth2/v2.0/token"; - - // Drive Queries - string driveUrl = globalGraphEndpoint ~ "/v1.0/me/drive"; - string driveByIdUrl = globalGraphEndpoint ~ "/v1.0/drives/"; +import std.path; +import std.conv; +import std.math; +import std.uri; +import std.array; - // What is 'shared with me' Query - string sharedWithMeUrl = globalGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; - - // Item Queries - string itemByIdUrl = globalGraphEndpoint ~ "/v1.0/me/drive/items/"; - string itemByPathUrl = globalGraphEndpoint ~ "/v1.0/me/drive/root:/"; +// Required for webhooks +import arsd.cgi; +import std.concurrency; +import core.atomic : atomicOp; +import std.uuid; - // Office 365 / SharePoint Queries - string siteSearchUrl = globalGraphEndpoint ~ "/v1.0/sites?search"; - string siteDriveUrl = globalGraphEndpoint ~ "/v1.0/sites/"; +// What other modules that we have created do we need to import? +import config; +import log; +import util; +import curlEngine; - // Subscriptions - string subscriptionUrl = globalGraphEndpoint ~ "/v1.0/subscriptions"; -} +// Shared variables between classes +shared bool debugHTTPResponseOutput = false; -class OneDriveException: Exception -{ +class OneDriveException: Exception { // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/errors int httpStatusCode; JSONValue error; - @safe pure this(int httpStatusCode, string reason, string file = __FILE__, size_t line = __LINE__) - { + @safe pure this(int httpStatusCode, string reason, string file = __FILE__, size_t line = __LINE__) { this.httpStatusCode = httpStatusCode; this.error = error; string msg = format("HTTP request returned status code %d (%s)", httpStatusCode, reason); super(msg, file, line); } - this(int httpStatusCode, string reason, ref const JSONValue error, string file = __FILE__, size_t line = __LINE__) - { + this(int httpStatusCode, string reason, ref const JSONValue error, string file = __FILE__, size_t line = __LINE__) { this.httpStatusCode = httpStatusCode; this.error = error; string msg = format("HTTP request returned status code %d (%s)\n%s", httpStatusCode, reason, toJSON(error, true)); @@ -118,6 +68,7 @@ class OneDriveWebhook { // Cache instantiation flag in thread-local bool // Thread local private static bool instantiated_; + private RequestServer server; // Thread global private __gshared OneDriveWebhook instance_; @@ -126,6 +77,7 @@ class OneDriveWebhook { private ushort port; private Tid parentTid; private shared uint count; + private bool started; static OneDriveWebhook getOrCreate(string host, ushort port, Tid parentTid) { if (!instantiated_) { @@ -147,9 +99,24 @@ class OneDriveWebhook { this.parentTid = parentTid; this.count = 0; } + + void serve() { + spawn(&serveStatic); + this.started = true; + addLogEntry("Started webhook server"); + } + + void stop() { + if (this.started) { + server.stop(); + this.started = false; + } + addLogEntry("Stopped webhook server"); + object.destroy(server); + } // The static serve() is necessary because spawn() does not like instance methods - static serve() { + private static void serveStatic() { // we won't create the singleton instance if it hasn't been created already // such case is a bug which should crash the program and gets fixed instance_.serveImpl(); @@ -163,15 +130,15 @@ class OneDriveWebhook { } private void serveImpl() { - auto server = new RequestServer(host, port); + server = RequestServer(host, port); server.serveEmbeddedHttp!handle(); } private void handleImpl(Cgi cgi) { - if (.debugResponse) { - log.log("Webhook request: ", cgi.requestMethod, " ", cgi.requestUri); + if (debugHTTPResponseOutput) { + addLogEntry("Webhook request: " ~ to!string(cgi.requestMethod) ~ " " ~ to!string(cgi.requestUri)); if (!cgi.postBody.empty) { - log.log("Webhook post body: ", cgi.postBody); + addLogEntry("Webhook post body: " ~ to!string(cgi.postBody)); } } @@ -181,7 +148,7 @@ class OneDriveWebhook { // For validation requests, respond with the validation token passed in the query string // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/webhook-receiver-validation-request cgi.write(cgi.get["validationToken"]); - log.log("Webhook: handled validation request"); + addLogEntry("Webhook: handled validation request"); } else { // Notifications don't include any information about the changes that triggered them. // Put a refresh signal in the queue and let the main monitor loop process it. @@ -189,398 +156,364 @@ class OneDriveWebhook { count.atomicOp!"+="(1); send(parentTid, to!ulong(count)); cgi.write("OK"); - log.log("Webhook: sent refresh signal #", count); + addLogEntry("Webhook: sent refresh signal #" ~ to!string(count)); } } } -final class OneDriveApi -{ - private Config cfg; - private string refreshToken, accessToken, subscriptionId; - private SysTime accessTokenExpiration; - private HTTP http; - private OneDriveWebhook webhook; - private SysTime subscriptionExpiration; - private Duration subscriptionExpirationInterval, subscriptionRenewalInterval; - private string notificationUrl; - - // if true, every new access token is printed - bool printAccessToken; - - this(Config cfg) - { - this.cfg = cfg; - http = HTTP(); - // Curl Timeout Handling - // libcurl dns_cache_timeout timeout - http.dnsTimeout = (dur!"seconds"(cfg.getValueLong("dns_timeout"))); - // Timeout for HTTPS connections - http.connectTimeout = (dur!"seconds"(cfg.getValueLong("connect_timeout"))); - // with the following settings we force - // - if there is no data flow for 10min, abort - // - if the download time for one item exceeds 1h, abort - // - // timeout for activity on connection - // this translates into Curl's CURLOPT_LOW_SPEED_TIME - // which says - // It contains the time in number seconds that the - // transfer speed should be below the CURLOPT_LOW_SPEED_LIMIT - // for the library to consider it too slow and abort. - http.dataTimeout = (dur!"seconds"(cfg.getValueLong("data_timeout"))); - // maximum time an operation is allowed to take - // This includes dns resolution, connecting, data transfer, etc. - http.operationTimeout = (dur!"seconds"(cfg.getValueLong("operation_timeout"))); - // What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6 - http.handle.set(CurlOption.ipresolve,cfg.getValueLong("ip_protocol_version")); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only - // Specify how many redirects should be allowed - http.maxRedirects(cfg.defaultMaxRedirects); - - // Do we enable curl debugging? - if (cfg.getValueBool("debug_https")) { - http.verbose = true; - .debugResponse = true; - - // Output what options we are using so that in the debug log this can be tracked - log.vdebug("http.dnsTimeout = ", cfg.getValueLong("dns_timeout")); - log.vdebug("http.connectTimeout = ", cfg.getValueLong("connect_timeout")); - log.vdebug("http.dataTimeout = ", cfg.getValueLong("data_timeout")); - log.vdebug("http.operationTimeout = ", cfg.getValueLong("operation_timeout")); - log.vdebug("http.CurlOption.ipresolve = ", cfg.getValueLong("ip_protocol_version")); - log.vdebug("http.maxRedirects = ", cfg.defaultMaxRedirects); - } +class OneDriveApi { + // Class variables + ApplicationConfig appConfig; + CurlEngine curlEngine; + OneDriveWebhook webhook; + + string clientId = ""; + string companyName = ""; + string authUrl = ""; + string redirectUrl = ""; + string tokenUrl = ""; + string driveUrl = ""; + string driveByIdUrl = ""; + string sharedWithMeUrl = ""; + string itemByIdUrl = ""; + string itemByPathUrl = ""; + string siteSearchUrl = ""; + string siteDriveUrl = ""; + string tenantId = ""; + string authScope = ""; + const(char)[] refreshToken = ""; + bool dryRun = false; + bool debugResponse = false; + ulong retryAfterValue = 0; + + // Webhook Subscriptions + string subscriptionUrl = ""; + string subscriptionId = ""; + SysTime subscriptionExpiration, subscriptionLastErrorAt; + Duration subscriptionExpirationInterval, subscriptionRenewalInterval, subscriptionRetryInterval; + string notificationUrl = ""; + + this(ApplicationConfig appConfig) { + // Configure the class varaible to consume the application configuration + this.appConfig = appConfig; + // Configure the major API Query URL's, based on using application configuration + // These however can be updated by config option 'azure_ad_endpoint', thus handled differently + + // Drive Queries + driveUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.globalGraphEndpoint ~ "/v1.0/drives/"; - // Update clientId if application_id is set in config file - if (cfg.getValueString("application_id") != "") { - // an application_id is set in config file - log.vdebug("Setting custom application_id to: " , cfg.getValueString("application_id")); - clientId = cfg.getValueString("application_id"); + // What is 'shared with me' Query + sharedWithMeUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + + // Item Queries + itemByIdUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.globalGraphEndpoint ~ "/v1.0/me/drive/root:/"; + + // Office 365 / SharePoint Queries + siteSearchUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites/"; + + // Subscriptions + subscriptionUrl = appConfig.globalGraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionExpiration = Clock.currTime(UTC()); + subscriptionLastErrorAt = SysTime.fromUnixTime(0); + subscriptionExpirationInterval = dur!"seconds"(appConfig.getValueLong("webhook_expiration_interval")); + subscriptionRenewalInterval = dur!"seconds"(appConfig.getValueLong("webhook_renewal_interval")); + subscriptionRetryInterval = dur!"seconds"(appConfig.getValueLong("webhook_retry_interval")); + notificationUrl = appConfig.getValueString("webhook_public_url"); + } + + // Initialise the OneDrive API class + bool initialise(bool keepAlive=false) { + // Initialise the curl engine + curlEngine = new CurlEngine(); + curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version"), keepAlive); + + // Authorised value to return + bool authorised = false; + + // Did the user specify --dry-run + dryRun = appConfig.getValueBool("dry_run"); + + // Did the user specify --debug-https + debugResponse = appConfig.getValueBool("debug_https"); + // Flag this so if webhooks are being used, it can also be consumed + debugHTTPResponseOutput = appConfig.getValueBool("debug_https"); + + // Set clientId to use the configured 'application_id' + clientId = appConfig.getValueString("application_id"); + if (clientId != appConfig.defaultApplicationId) { + // a custom 'application_id' was set companyName = "custom_application"; } - - // Configure tenant id value, if 'azure_tenant_id' is configured, - // otherwise use the "common" multiplexer - string tenantId = "common"; - if (cfg.getValueString("azure_tenant_id") != "") { + + // Do we have a custom Azure Tenant ID? + if (!appConfig.getValueString("azure_tenant_id").empty) { // Use the value entered by the user - tenantId = cfg.getValueString("azure_tenant_id"); + tenantId = appConfig.getValueString("azure_tenant_id"); + } else { + // set to common + tenantId = "common"; } - + + // Did the user specify a 'drive_id' ? + if (!appConfig.getValueString("drive_id").empty) { + // Update base URL's + driveUrl = driveByIdUrl ~ appConfig.getValueString("drive_id"); + itemByIdUrl = driveUrl ~ "/items"; + itemByPathUrl = driveUrl ~ "/root:/"; + } + + // Configure the authentication scope + if (appConfig.getValueBool("read_only_auth_scope")) { + // read-only authentication scopes has been requested + authScope = "&scope=Files.Read%20Files.Read.All%20Sites.Read.All%20offline_access&response_type=code&prompt=login&redirect_uri="; + } else { + // read-write authentication scopes will be used (default) + authScope = "&scope=Files.ReadWrite%20Files.ReadWrite.All%20Sites.ReadWrite.All%20offline_access&response_type=code&prompt=login&redirect_uri="; + } + // Configure Azure AD endpoints if 'azure_ad_endpoint' is configured - string azureConfigValue = cfg.getValueString("azure_ad_endpoint"); + string azureConfigValue = appConfig.getValueString("azure_ad_endpoint"); switch(azureConfigValue) { case "": if (tenantId == "common") { - log.log("Configuring Global Azure AD Endpoints"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Global Azure AD Endpoints"); } else { - log.log("Configuring Global Azure AD Endpoints - Single Tenant Application"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Global Azure AD Endpoints - Single Tenant Application"); } // Authentication - authUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; - tokenUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + authUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + tokenUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; break; case "USL4": - log.log("Configuring Azure AD for US Government Endpoints"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Azure AD for US Government Endpoints"); // Authentication - authUrl = usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default - log.vdebug("USL4 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + addLogEntry("USL4 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.usl4AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = usl4GraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = usl4GraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = usl4GraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = usl4GraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = usl4GraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = usl4GraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = usl4GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = usl4GraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.usl4GraphEndpoint ~ "/v1.0/subscriptions"; break; case "USL5": - log.log("Configuring Azure AD for US Government Endpoints (DOD)"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Azure AD for US Government Endpoints (DOD)"); // Authentication - authUrl = usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default - log.vdebug("USL5 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + addLogEntry("USL5 AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.usl5AuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = usl5GraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = usl5GraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = usl5GraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = usl5GraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = usl5GraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = usl5GraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = usl5GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = usl5GraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.usl5GraphEndpoint ~ "/v1.0/subscriptions"; break; case "DE": - log.log("Configuring Azure AD Germany"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring Azure AD Germany"); // Authentication - authUrl = deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default - log.vdebug("DE AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + addLogEntry("DE AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.deAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = deGraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = deGraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.deGraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = deGraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = deGraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = deGraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = deGraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.deGraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.deGraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = deGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.deGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = deGraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.deGraphEndpoint ~ "/v1.0/subscriptions"; break; case "CN": - log.log("Configuring AD China operated by 21Vianet"); + if (!appConfig.apiWasInitialised) addLogEntry("Configuring AD China operated by 21Vianet"); // Authentication - authUrl = cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; - tokenUrl = cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; - if (clientId == clientIdDefault) { + authUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/authorize"; + tokenUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/v2.0/token"; + if (clientId == appConfig.defaultApplicationId) { // application_id == default - log.vdebug("CN AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint"); - redirectUrl = globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + addLogEntry("CN AD Endpoint but default application_id, redirectUrl needs to be aligned to globalAuthEndpoint", ["debug"]); + redirectUrl = appConfig.globalAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } else { // custom application_id - redirectUrl = cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; + redirectUrl = appConfig.cnAuthEndpoint ~ "/" ~ tenantId ~ "/oauth2/nativeclient"; } // Drive Queries - driveUrl = cnGraphEndpoint ~ "/v1.0/me/drive"; - driveByIdUrl = cnGraphEndpoint ~ "/v1.0/drives/"; + driveUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive"; + driveByIdUrl = appConfig.cnGraphEndpoint ~ "/v1.0/drives/"; // Item Queries - itemByIdUrl = cnGraphEndpoint ~ "/v1.0/me/drive/items/"; - itemByPathUrl = cnGraphEndpoint ~ "/v1.0/me/drive/root:/"; + itemByIdUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive/items/"; + itemByPathUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive/root:/"; // Office 365 / SharePoint Queries - siteSearchUrl = cnGraphEndpoint ~ "/v1.0/sites?search"; - siteDriveUrl = cnGraphEndpoint ~ "/v1.0/sites/"; + siteSearchUrl = appConfig.cnGraphEndpoint ~ "/v1.0/sites?search"; + siteDriveUrl = appConfig.cnGraphEndpoint ~ "/v1.0/sites/"; // Shared With Me - sharedWithMeUrl = cnGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; + sharedWithMeUrl = appConfig.cnGraphEndpoint ~ "/v1.0/me/drive/sharedWithMe"; // Subscriptions - subscriptionUrl = cnGraphEndpoint ~ "/v1.0/subscriptions"; + subscriptionUrl = appConfig.cnGraphEndpoint ~ "/v1.0/subscriptions"; break; // Default - all other entries default: - log.log("Unknown Azure AD Endpoint request - using Global Azure AD Endpoints"); - } - - // Debug output of configured URL's - // Authentication - log.vdebug("Configured authUrl: ", authUrl); - log.vdebug("Configured redirectUrl: ", redirectUrl); - log.vdebug("Configured tokenUrl: ", tokenUrl); - - // Drive Queries - log.vdebug("Configured driveUrl: ", driveUrl); - log.vdebug("Configured driveByIdUrl: ", driveByIdUrl); - - // Shared With Me - log.vdebug("Configured sharedWithMeUrl: ", sharedWithMeUrl); - - // Item Queries - log.vdebug("Configured itemByIdUrl: ", itemByIdUrl); - log.vdebug("Configured itemByPathUrl: ", itemByPathUrl); - - // SharePoint Queries - log.vdebug("Configured siteSearchUrl: ", siteSearchUrl); - log.vdebug("Configured siteDriveUrl: ", siteDriveUrl); - - // Configure the User Agent string - if (cfg.getValueString("user_agent") == "") { - // Application User Agent string defaults - // Comply with OneDrive traffic decoration requirements - // https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online - // - Identify as ISV and include Company Name, App Name separated by a pipe character and then adding Version number separated with a slash character - // Note: If you've created an application, the recommendation is to register and use AppID and AppTitle - // The issue here is that currently the application is still using the 'skilion' application ID, thus no idea what the AppTitle used was. - http.setUserAgent = isvTag ~ "|" ~ companyName ~ "|" ~ appTitle ~ "/" ~ strip(import("version")); - } else { - // Use the value entered by the user - http.setUserAgent = cfg.getValueString("user_agent"); - } - - // What version of HTTP protocol do we use? - // Curl >= 7.62.0 defaults to http2 for a significant number of operations - if (cfg.getValueBool("force_http_11")) { - // Downgrade to curl to use HTTP 1.1 for all operations - log.vlog("Downgrading all HTTP operations to HTTP/1.1 due to user configuration"); - // Downgrade to HTTP 1.1 - yes version = 2 is HTTP 1.1 - http.handle.set(CurlOption.http_version,2); - } else { - // Use curl defaults - log.vlog("Using Curl defaults for all HTTP operations"); - } - - // Configure upload / download rate limits if configured - long userRateLimit = cfg.getValueLong("rate_limit"); - // 131072 = 128 KB/s - minimum for basic application operations to prevent timeouts - // A 0 value means rate is unlimited, and is the curl default - - if (userRateLimit > 0) { - // User configured rate limit - writeln("User Configured Rate Limit: ", userRateLimit); - - // If user provided rate limit is < 131072, flag that this is too low, setting to the minimum of 131072 - if (userRateLimit < 131072) { - // user provided limit too low - log.log("WARNING: User configured rate limit too low for normal application processing and preventing application timeouts. Overriding to default minimum of 131072 (128KB/s)"); - userRateLimit = 131072; - } - - // set rate limit - http.handle.set(CurlOption.max_send_speed_large,userRateLimit); - http.handle.set(CurlOption.max_recv_speed_large,userRateLimit); + if (!appConfig.apiWasInitialised) addLogEntry("Unknown Azure AD Endpoint request - using Global Azure AD Endpoints"); } - - // Explicitly set libcurl options - // https://curl.se/libcurl/c/CURLOPT_NOSIGNAL.html - // Ensure that nosignal is set to 0 - Setting CURLOPT_NOSIGNAL to 0 makes libcurl ask the system to ignore SIGPIPE signals - http.handle.set(CurlOption.nosignal,0); - // https://curl.se/libcurl/c/CURLOPT_TCP_NODELAY.html - // Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled - http.handle.set(CurlOption.tcp_nodelay,0); - // https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html - // Ensure that we ARE reusing connections - setting to 0 ensures that we are reusing connections - http.handle.set(CurlOption.forbid_reuse,0); - // Do we set the dryRun handlers? - if (cfg.getValueBool("dry_run")) { - .dryRun = true; - if (cfg.getValueBool("logout")) { - .simulateNoRefreshTokenFile = true; - } - } - - subscriptionExpiration = Clock.currTime(UTC()); - subscriptionExpirationInterval = dur!"seconds"(cfg.getValueLong("webhook_expiration_interval")); - subscriptionRenewalInterval = dur!"seconds"(cfg.getValueLong("webhook_renewal_interval")); - notificationUrl = cfg.getValueString("webhook_public_url"); - } - - // Shutdown OneDrive HTTP construct - void shutdown() - { - // delete subscription if there exists any - deleteSubscription(); - - // reset any values to defaults, freeing any set objects - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; - // shut down the curl instance - http.shutdown(); - } - - bool init() - { - static import std.utf; - // detail what we are using for applicaion identification - log.vdebug("clientId = ", clientId); - log.vdebug("companyName = ", companyName); - log.vdebug("appTitle = ", appTitle); - - try { - driveId = cfg.getValueString("drive_id"); - if (driveId.length) { - driveUrl = driveByIdUrl ~ driveId; - itemByIdUrl = driveUrl ~ "/items"; - itemByPathUrl = driveUrl ~ "/root:/"; - } - } catch (Exception e) {} - - if (!.dryRun) { - // original code - try { - refreshToken = readText(cfg.refreshTokenFilePath); - } catch (FileException e) { - try { - return authorize(); - } catch (CurlException e) { - log.error("Cannot authorize with Microsoft OneDrive Service"); - return false; - } - } catch (std.utf.UTFException e) { - // path contains characters which generate a UTF exception - log.error("Cannot read refreshToken from: ", cfg.refreshTokenFilePath); - log.error(" Error Reason:", e.msg); - return false; - } - return true; + // Has the application been authenticated? + if (!exists(appConfig.refreshTokenFilePath)) { + addLogEntry("Application has no 'refresh_token' thus needs to be authenticated", ["debug"]); + authorised = authorise(); } else { - // --dry-run - if (!.simulateNoRefreshTokenFile) { + // Try and read the value from the appConfig if it is set, rather than trying to read the value from disk + if (!appConfig.refreshToken.empty) { + addLogEntry("Read token from appConfig", ["debug"]); + refreshToken = strip(appConfig.refreshToken); + authorised = true; + } else { + // Try and read the file from disk try { - refreshToken = readText(cfg.refreshTokenFilePath); + refreshToken = strip(readText(appConfig.refreshTokenFilePath)); + // is the refresh_token empty? + if (refreshToken.empty) { + addLogEntry("RefreshToken exists but is empty: " ~ appConfig.refreshTokenFilePath); + authorised = authorise(); + } else { + // existing token not empty + authorised = true; + // update appConfig.refreshToken + appConfig.refreshToken = refreshToken; + } } catch (FileException e) { - return authorize(); + authorised = authorise(); } catch (std.utf.UTFException e) { // path contains characters which generate a UTF exception - log.error("Cannot read refreshToken from: ", cfg.refreshTokenFilePath); - log.error(" Error Reason:", e.msg); - return false; + addLogEntry("Cannot read refreshToken from: " ~ appConfig.refreshTokenFilePath); + addLogEntry(" Error Reason:" ~ e.msg); + authorised = false; } - return true; - } else { - // --dry-run & --reauth - return authorize(); + } + + if (refreshToken.empty) { + // PROBLEM ... CODING TO DO ?????????? + addLogEntry("refreshToken is empty !!!!!!!!!! This will cause 4xx errors ... CODING TO DO TO HANDLE ?????"); } } + // Return if we are authorised + addLogEntry("Authorised State: " ~ to!string(authorised), ["debug"]); + return authorised; } - - bool authorize() - { - import std.stdio, std.regex; - char[] response; - string authScope; - // What authentication scope to use? - if (cfg.getValueBool("read_only_auth_scope")) { - // read-only authentication scopes has been requested - authScope = "&scope=Files.Read%20Files.Read.All%20Sites.Read.All%20offline_access&response_type=code&prompt=login&redirect_uri="; - } else { - // read-write authentication scopes will be used (default) - authScope = "&scope=Files.ReadWrite%20Files.ReadWrite.All%20Sites.ReadWrite.All%20offline_access&response_type=code&prompt=login&redirect_uri="; + + // If the API has been configured correctly, print the items that been configured + void debugOutputConfiguredAPIItems() { + // Debug output of configured URL's + // Application Identification + addLogEntry("Configured clientId " ~ clientId, ["debug"]); + addLogEntry("Configured userAgent " ~ appConfig.getValueString("user_agent"), ["debug"]); + // Authentication + addLogEntry("Configured authScope: " ~ authScope, ["debug"]); + addLogEntry("Configured authUrl: " ~ authUrl, ["debug"]); + addLogEntry("Configured redirectUrl: " ~ redirectUrl, ["debug"]); + addLogEntry("Configured tokenUrl: " ~ tokenUrl, ["debug"]); + // Drive Queries + addLogEntry("Configured driveUrl: " ~ driveUrl, ["debug"]); + addLogEntry("Configured driveByIdUrl: " ~ driveByIdUrl, ["debug"]); + // Shared With Me + addLogEntry("Configured sharedWithMeUrl: " ~ sharedWithMeUrl, ["debug"]); + // Item Queries + addLogEntry("Configured itemByIdUrl: " ~ itemByIdUrl, ["debug"]); + addLogEntry("Configured itemByPathUrl: " ~ itemByPathUrl, ["debug"]); + // SharePoint Queries + addLogEntry("Configured siteSearchUrl: " ~ siteSearchUrl, ["debug"]); + addLogEntry("Configured siteDriveUrl: " ~ siteDriveUrl, ["debug"]); + } + + // Shutdown OneDrive API Curl Engine + void shutdown() { + + // Delete subscription if there exists any + try { + deleteSubscription(); + } catch (OneDriveException e) { + logSubscriptionError(e); + } + + // Shutdown webhook server if it is running + if (webhook !is null) { + webhook.stop(); + object.destroy(webhook); } + // Reset any values to defaults, freeing any set objects + curlEngine.http.clearRequestHeaders(); + curlEngine.http.onSend = null; + curlEngine.http.onReceive = null; + curlEngine.http.onReceiveHeader = null; + curlEngine.http.onReceiveStatusLine = null; + curlEngine.http.contentLength = 0; + // Shut down the curl instance & close any open sockets + curlEngine.http.shutdown(); + // Free object and memory + object.destroy(curlEngine); + } + + // Authenticate this client against Microsoft OneDrive API + bool authorise() { + + char[] response; + // What URL should be presented to the user to access string url = authUrl ~ "?client_id=" ~ clientId ~ authScope ~ redirectUrl; - string authFilesString = cfg.getValueString("auth_files"); - string authResponseString = cfg.getValueString("auth_response"); - if (authResponseString != "") { + // Configure automated authentication if --auth-files authUrl:responseUrl is being used + string authFilesString = appConfig.getValueString("auth_files"); + string authResponseString = appConfig.getValueString("auth_response"); + + if (!authResponseString.empty) { + // read the response from authResponseString response = cast(char[]) authResponseString; } else if (authFilesString != "") { string[] authFiles = authFilesString.split(":"); @@ -588,22 +521,32 @@ final class OneDriveApi string responseUrl = authFiles[1]; try { - // Try and write out the auth URL to the nominated file auto authUrlFile = File(authUrl, "w"); authUrlFile.write(url); authUrlFile.close(); - } catch (std.exception.ErrnoException e) { + } catch (FileException e) { // There was a file system error // display the error message displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return false; + // Must force exit here, allow logging to be done + Thread.sleep(dur!("msecs")(500)); + exit(-1); + } catch (ErrnoException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + // Must force exit here, allow logging to be done + Thread.sleep(dur!("msecs")(500)); + exit(-1); } + + addLogEntry("Client requires authentication before proceeding. Waiting for --auth-files elements to be available."); while (!exists(responseUrl)) { Thread.sleep(dur!("msecs")(100)); } - // read response from OneDrive + // read response from provided from OneDrive try { response = cast(char[]) read(responseUrl); } catch (OneDriveException e) { @@ -617,331 +560,231 @@ final class OneDriveApi std.file.remove(authUrl); std.file.remove(responseUrl); } catch (FileException e) { - log.error("Cannot remove files ", authUrl, " ", responseUrl); + addLogEntry("Cannot remove files " ~ authUrl ~ " " ~ responseUrl); return false; } } else { - log.log("Authorize this app visiting:\n"); - write(url, "\n\n", "Enter the response uri: "); + addLogEntry("Authorise this application by visiting:\n", ["consoleOnly"]); + addLogEntry(url ~ "\n\n", ["consoleOnly"]); + addLogEntry("Enter the response uri from your browser: ", ["consoleOnlyNoNewLine"]); readln(response); - cfg.applicationAuthorizeResponseUri = true; + appConfig.applicationAuthorizeResponseUri = true; } // match the authorization code auto c = matchFirst(response, r"(?:[\?&]code=)([\w\d-.]+)"); if (c.empty) { - log.log("Invalid response uri entered"); + addLogEntry("An empty or invalid response uri was entered"); return false; } c.popFront(); // skip the whole match redeemToken(c.front); + + return true; + } - string getSiteSearchUrl() - { - // Return the actual siteSearchUrl being used and/or requested when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call - return .siteSearchUrl; - } - - ulong getRetryAfterValue() - { - // Return the current value of retryAfterValue if it has been set to something other than 0 - return .retryAfterValue; - } - - void resetRetryAfterValue() - { - // Reset the current value of retryAfterValue to 0 after it has been used - .retryAfterValue = 0; - } - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/drive_get - JSONValue getDefaultDrive() - { + JSONValue getDefaultDriveDetails() { checkAccessTokenExpired(); - const(char)[] url; + string url; url = driveUrl; - return get(driveUrl); + return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getDefaultRoot() - { + JSONValue getDefaultRootDetails() { checkAccessTokenExpired(); - const(char)[] url; + string url; url = driveUrl ~ "/root"; return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getDriveIdRoot(const(char)[] driveId) - { + JSONValue getDriveIdRoot(string driveId) { checkAccessTokenExpired(); - const(char)[] url; + string url; url = driveByIdUrl ~ driveId ~ "/root"; return get(url); } - - // https://docs.microsoft.com/en-us/graph/api/drive-sharedwithme - JSONValue getSharedWithMe() - { - checkAccessTokenExpired(); - return get(sharedWithMeUrl); - } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/drive_get - JSONValue getDriveQuota(const(char)[] driveId) - { + JSONValue getDriveQuota(string driveId) { checkAccessTokenExpired(); - const(char)[] url; + string url; url = driveByIdUrl ~ driveId ~ "/"; url ~= "?select=quota"; return get(url); } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delta - JSONValue viewChangesByItemId(const(char)[] driveId, const(char)[] id, const(char)[] deltaLink) - { + + // Return the details of the specified path, by giving the path we wish to query + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get + JSONValue getPathDetails(string path) { checkAccessTokenExpired(); - const(char)[] url; - // configure deltaLink to query - if (deltaLink.empty) { - url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/delta"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + string url; + if ((path == ".")||(path == "/")) { + url = driveUrl ~ "/root/"; } else { - url = deltaLink; + url = itemByPathUrl ~ encodeComponent(path) ~ ":/"; } return get(url); } - + + // Return the details of the specified item based on its driveID and itemID + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get + JSONValue getPathDetailsById(string driveId, string id) { + checkAccessTokenExpired(); + string url; + url = driveByIdUrl ~ driveId ~ "/items/" ~ id; + //url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + return get(url); + } + + // Create a shareable link for an existing file on OneDrive based on the accessScope JSON permissions + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createlink + JSONValue createShareableLink(string driveId, string id, JSONValue accessScope) { + checkAccessTokenExpired(); + string url; + url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/createLink"; + curlEngine.http.addRequestHeader("Content-Type", "application/json"); + return post(url, accessScope.toString()); + } + + // Return the requested details of the specified path on the specified drive id and path + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get + JSONValue getPathDetailsByDriveId(string driveId, string path) { + checkAccessTokenExpired(); + string url; + // https://learn.microsoft.com/en-us/onedrive/developer/rest-api/concepts/addressing-driveitems?view=odsp-graph-online + // Required format: /drives/{drive-id}/root:/{item-path}: + url = driveByIdUrl ~ driveId ~ "/root:/" ~ encodeComponent(path) ~ ":"; + return get(url); + } + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delta - JSONValue viewChangesByDriveId(const(char)[] driveId, const(char)[] deltaLink) - { + JSONValue viewChangesByItemId(string driveId, string id, string deltaLink) { checkAccessTokenExpired(); - const(char)[] url = deltaLink; - if (url == null) { - url = driveByIdUrl ~ driveId ~ "/root/delta"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + + // If Business Account add addIncludeFeatureRequestHeader() which should add Prefer: Include-Feature=AddToOneDrive + if ((appConfig.accountType != "personal") && ( appConfig.getValueBool("sync_business_shared_items"))) { + addIncludeFeatureRequestHeader(); + } + + string url; + // configure deltaLink to query + if (deltaLink.empty) { + url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/delta"; + } else { + url = deltaLink; } return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_list_children - JSONValue listChildren(const(char)[] driveId, const(char)[] id, const(char)[] nextLink) - { + JSONValue listChildren(string driveId, string id, string nextLink) { checkAccessTokenExpired(); - const(char)[] url; + + // If Business Account add addIncludeFeatureRequestHeader() which should add Prefer: Include-Feature=AddToOneDrive + if ((appConfig.accountType != "personal") && ( appConfig.getValueBool("sync_business_shared_items"))) { + addIncludeFeatureRequestHeader(); + } + + string url; // configure URL to query if (nextLink.empty) { url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/children"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; + //url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; } else { url = nextLink; } return get(url); } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get_content - void downloadById(const(char)[] driveId, const(char)[] id, string saveToPath, long fileSize) - { - checkAccessTokenExpired(); - scope(failure) { - if (exists(saveToPath)) { - // try and remove the file, catch error - try { - remove(saveToPath); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } - - // Create the required local directory - string newPath = dirName(saveToPath); - - // Does the path exist locally? - if (!exists(newPath)) { - try { - log.vdebug("Requested path does not exist, creating directory structure: ", newPath); - mkdirRecurse(newPath); - // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", newPath); - newPath.setAttributes(cfg.returnRequiredDirectoryPermisions()); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - - const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content?AVOverride=1"; - // Download file - download(url, saveToPath, fileSize); - // Does path exist? - if (exists(saveToPath)) { - // File was downloaded successfully - configure the applicable permissions for the file - log.vdebug("Setting file permissions for: ", saveToPath); - saveToPath.setAttributes(cfg.returnRequiredFilePermisions()); - } - } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content - JSONValue simpleUpload(string localPath, string parentDriveId, string parentId, string filename, const(char)[] eTag = null) - { - checkAccessTokenExpired(); - string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/content"; - // TODO: investigate why this fails for remote folders - //if (eTag) http.addRequestHeader("If-Match", eTag); - /*else http.addRequestHeader("If-None-Match", "*");*/ - return upload(localPath, url); - } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content - JSONValue simpleUploadReplace(string localPath, string driveId, string id, const(char)[] eTag = null) - { + + // https://learn.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_search + JSONValue searchDriveForPath(string driveId, string path) { checkAccessTokenExpired(); - string url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content"; - if (eTag) http.addRequestHeader("If-Match", eTag); - return upload(localPath, url); + string url; + url = "https://graph.microsoft.com/v1.0/drives/" ~ driveId ~ "/root/search(q='" ~ encodeComponent(path) ~ "')"; + return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_update - JSONValue updateById(const(char)[] driveId, const(char)[] id, JSONValue data, const(char)[] eTag = null) - { + JSONValue updateById(const(char)[] driveId, const(char)[] id, JSONValue data, const(char)[] eTag = null) { checkAccessTokenExpired(); const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id; - if (eTag) http.addRequestHeader("If-Match", eTag); - http.addRequestHeader("Content-Type", "application/json"); + if (eTag) curlEngine.http.addRequestHeader("If-Match", eTag); + curlEngine.http.addRequestHeader("Content-Type", "application/json"); return patch(url, data.toString()); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_delete - void deleteById(const(char)[] driveId, const(char)[] id, const(char)[] eTag = null) - { + void deleteById(const(char)[] driveId, const(char)[] id, const(char)[] eTag = null) { checkAccessTokenExpired(); const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id; //TODO: investigate why this always fail with 412 (Precondition Failed) //if (eTag) http.addRequestHeader("If-Match", eTag); - del(url); + performDelete(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_post_children - JSONValue createById(const(char)[] parentDriveId, const(char)[] parentId, JSONValue item) - { + JSONValue createById(string parentDriveId, string parentId, JSONValue item) { checkAccessTokenExpired(); - const(char)[] url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ "/children"; - http.addRequestHeader("Content-Type", "application/json"); + string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ "/children"; + curlEngine.http.addRequestHeader("Content-Type", "application/json"); return post(url, item.toString()); } - - // Return the details of the specified path - JSONValue getPathDetails(const(string) path) - { - checkAccessTokenExpired(); - const(char)[] url; - if ((path == ".")||(path == "/")) url = driveUrl ~ "/root/"; - else url = itemByPathUrl ~ encodeComponent(path) ~ ":/"; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the details of the specified id - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getPathDetailsById(const(char)[] driveId, const(char)[] id) - { - checkAccessTokenExpired(); - const(char)[] url; - url = driveByIdUrl ~ driveId ~ "/items/" ~ id; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the requested details of the specified path on the specified drive id and path - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get?view=odsp-graph-online - JSONValue getPathDetailsByDriveId(const(char)[] driveId, const(string) path) - { - checkAccessTokenExpired(); - const(char)[] url; - // string driveByIdUrl = "https://graph.microsoft.com/v1.0/drives/"; - // Required format: /drives/{drive-id}/root:/{item-path} - url = driveByIdUrl ~ driveId ~ "/root:/" ~ encodeComponent(path); - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the requested details of the specified path on the specified drive id and item id - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get?view=odsp-graph-online - JSONValue getPathDetailsByDriveIdAndItemId(const(char)[] driveId, const(char)[] itemId) - { - checkAccessTokenExpired(); - const(char)[] url; - // string driveByIdUrl = "https://graph.microsoft.com/v1.0/drives/"; - // Required format: /drives/{drive-id}/items/{item-id} - url = driveByIdUrl ~ driveId ~ "/items/" ~ itemId; - url ~= "?select=id,name,eTag,cTag,deleted,file,folder,root,fileSystemInfo,remoteItem,parentReference,size"; - return get(url); - } - - // Return the requested details of the specified id - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get - JSONValue getFileDetails(const(char)[] driveId, const(char)[] id) - { - checkAccessTokenExpired(); - const(char)[] url; - url = driveByIdUrl ~ driveId ~ "/items/" ~ id; - url ~= "?select=size,malware,file,webUrl,lastModifiedBy,lastModifiedDateTime"; - return get(url); - } - - // Create an anonymous read-only shareable link for an existing file on OneDrive - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createlink - JSONValue createShareableLink(const(char)[] driveId, const(char)[] id, JSONValue accessScope) - { + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content + JSONValue simpleUpload(string localPath, string parentDriveId, string parentId, string filename) { checkAccessTokenExpired(); - const(char)[] url; - url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/createLink"; - http.addRequestHeader("Content-Type", "application/json"); - return post(url, accessScope.toString()); + string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/content"; + return upload(localPath, url); } - - // https://dev.onedrive.com/items/move.htm - JSONValue moveByPath(const(char)[] sourcePath, JSONValue moveData) - { - // Need to use itemByPathUrl + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_put_content + JSONValue simpleUploadReplace(string localPath, string driveId, string id) { checkAccessTokenExpired(); - string url = itemByPathUrl ~ encodeComponent(sourcePath); - http.addRequestHeader("Content-Type", "application/json"); - return move(url, moveData.toString()); + string url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content"; + return upload(localPath, url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_createuploadsession - JSONValue createUploadSession(const(char)[] parentDriveId, const(char)[] parentId, const(char)[] filename, const(char)[] eTag = null, JSONValue item = null) - { + //JSONValue createUploadSession(string parentDriveId, string parentId, string filename, string eTag = null, JSONValue item = null) { + JSONValue createUploadSession(string parentDriveId, string parentId, string filename, const(char)[] eTag = null, JSONValue item = null) { checkAccessTokenExpired(); - const(char)[] url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/createUploadSession"; - if (eTag) http.addRequestHeader("If-Match", eTag); - http.addRequestHeader("Content-Type", "application/json"); + string url = driveByIdUrl ~ parentDriveId ~ "/items/" ~ parentId ~ ":/" ~ encodeComponent(filename) ~ ":/createUploadSession"; + // eTag If-Match header addition commented out for the moment + // At some point, post the creation of this upload session the eTag is being 'updated' by OneDrive, thus when uploadFragment() is used + // this generates a 412 Precondition Failed and then a 416 Requested Range Not Satisfiable + // This needs to be investigated further as to why this occurs + //if (eTag) curlEngine.http.addRequestHeader("If-Match", eTag); + curlEngine.http.addRequestHeader("Content-Type", "application/json"); return post(url, item.toString()); } - + // https://dev.onedrive.com/items/upload_large_files.htm - JSONValue uploadFragment(const(char)[] uploadUrl, string filepath, long offset, long offsetSize, long fileSize) - { + JSONValue uploadFragment(string uploadUrl, string filepath, long offset, long offsetSize, long fileSize) { checkAccessTokenExpired(); // open file as read-only in binary mode + + // If we upload a modified file, with the current known online eTag, this gets changed when the session is started - thus, the tail end of uploading + // a fragment fails with a 412 Precondition Failed and then a 416 Requested Range Not Satisfiable + // For the moment, comment out adding the If-Match header in createUploadSession, which then avoids this issue + auto file = File(filepath, "rb"); file.seek(offset); string contentRange = "bytes " ~ to!string(offset) ~ "-" ~ to!string(offset + offsetSize - 1) ~ "/" ~ to!string(fileSize); - log.vdebugNewLine("contentRange: ", contentRange); - + addLogEntry("", ["debug"]); // Add an empty newline before log output + addLogEntry("contentRange: " ~ contentRange, ["debug"]); + // function scopes scope(exit) { - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; + curlEngine.http.clearRequestHeaders(); + curlEngine.http.onSend = null; + curlEngine.http.onReceive = null; + curlEngine.http.onReceiveHeader = null; + curlEngine.http.onReceiveStatusLine = null; + curlEngine.http.contentLength = 0; // close file if open if (file.isOpen()){ // close open file @@ -949,30 +792,26 @@ final class OneDriveApi } } - http.method = HTTP.Method.put; - http.url = uploadUrl; - http.addRequestHeader("Content-Range", contentRange); - http.onSend = data => file.rawRead(data).length; + curlEngine.connect(HTTP.Method.put, uploadUrl); + curlEngine.http.addRequestHeader("Content-Range", contentRange); + curlEngine.http.onSend = data => file.rawRead(data).length; // convert offsetSize to ulong - http.contentLength = to!ulong(offsetSize); - auto response = perform(); - // TODO: retry on 5xx errors - checkHttpCode(response); + curlEngine.http.contentLength = to!ulong(offsetSize); + auto response = performHTTPOperation(); + checkHttpResponseCode(response); return response; } - + // https://dev.onedrive.com/items/upload_large_files.htm - JSONValue requestUploadStatus(const(char)[] uploadUrl) - { + JSONValue requestUploadStatus(string uploadUrl) { checkAccessTokenExpired(); - // when using microsoft graph the auth code is different return get(uploadUrl, true); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/site_search?view=odsp-graph-online - JSONValue o365SiteSearch(const(char)[] nextLink){ + JSONValue o365SiteSearch(string nextLink) { checkAccessTokenExpired(); - const(char)[] url; + string url; // configure URL to query if (nextLink.empty) { url = siteSearchUrl ~ "=*"; @@ -981,15 +820,73 @@ final class OneDriveApi } return get(url); } - + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/drive_list?view=odsp-graph-online JSONValue o365SiteDrives(string site_id){ checkAccessTokenExpired(); - const(char)[] url; + string url; url = siteDriveUrl ~ site_id ~ "/drives"; return get(url); } + + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get_content + void downloadById(const(char)[] driveId, const(char)[] id, string saveToPath, long fileSize) { + checkAccessTokenExpired(); + scope(failure) { + if (exists(saveToPath)) { + // try and remove the file, catch error + try { + remove(saveToPath); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + } + + // Create the required local directory + string newPath = dirName(saveToPath); + + // Does the path exist locally? + if (!exists(newPath)) { + try { + addLogEntry("Requested local path does not exist, creating directory structure: " ~ newPath, ["debug"]); + mkdirRecurse(newPath); + // Configure the applicable permissions for the folder + addLogEntry("Setting directory permissions for: " ~ newPath, ["debug"]); + newPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + + const(char)[] url = driveByIdUrl ~ driveId ~ "/items/" ~ id ~ "/content?AVOverride=1"; + // Download file + downloadFile(url, saveToPath, fileSize); + // Does path exist? + if (exists(saveToPath)) { + // File was downloaded successfully - configure the applicable permissions for the file + addLogEntry("Setting file permissions for: " ~ saveToPath, ["debug"]); + saveToPath.setAttributes(appConfig.returnRequiredFilePermisions()); + } + } + + // Return the actual siteSearchUrl being used and/or requested when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call + string getSiteSearchUrl() { + return siteSearchUrl; + } + + // Return the current value of retryAfterValue + ulong getRetryAfterValue() { + return retryAfterValue; + } + // Reset the current value of retryAfterValue to 0 after it has been used + void resetRetryAfterValue() { + retryAfterValue = 0; + } + // Create a new subscription or renew the existing subscription void createOrRenewSubscription() { checkAccessTokenExpired(); @@ -997,27 +894,51 @@ final class OneDriveApi // Kick off the webhook server first if (webhook is null) { webhook = OneDriveWebhook.getOrCreate( - cfg.getValueString("webhook_listening_host"), - to!ushort(cfg.getValueLong("webhook_listening_port")), + appConfig.getValueString("webhook_listening_host"), + to!ushort(appConfig.getValueLong("webhook_listening_port")), thisTid ); - spawn(&OneDriveWebhook.serve); + webhook.serve(); } - if (!hasValidSubscription()) { - createSubscription(); - } else if (isSubscriptionUpForRenewal()) { - try { + auto elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; + if (elapsed < subscriptionRetryInterval) { + return; + } + + try { + if (!hasValidSubscription()) { + createSubscription(); + } else if (isSubscriptionUpForRenewal()) { renewSubscription(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - log.log("The subscription is not found on the server. Recreating subscription ..."); - createSubscription(); - } } + } catch (OneDriveException e) { + logSubscriptionError(e); + subscriptionLastErrorAt = Clock.currTime(UTC()); + addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); + } catch (JSONException e) { + addLogEntry("ERROR: Unexpected JSON error when attempting to validate subscription: " ~ e.msg); + subscriptionLastErrorAt = Clock.currTime(UTC()); + addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); } } - + + // Return the duration to next subscriptionExpiration check + Duration getNextExpirationCheckDuration() { + SysTime now = Clock.currTime(UTC()); + if (hasValidSubscription()) { + Duration elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; + // Check if we are waiting for the next retry + if (elapsed < subscriptionRetryInterval) + return subscriptionRetryInterval - elapsed; + else + return subscriptionExpiration - now - subscriptionRenewalInterval; + } + else + return subscriptionRetryInterval; + } + + // Private functions private bool hasValidSubscription() { return !subscriptionId.empty && subscriptionExpiration > Clock.currTime(UTC()); } @@ -1025,13 +946,14 @@ final class OneDriveApi private bool isSubscriptionUpForRenewal() { return subscriptionExpiration < Clock.currTime(UTC()) + subscriptionRenewalInterval; } - + private void createSubscription() { - log.log("Initializing subscription for updates ..."); + addLogEntry("Initializing subscription for updates ..."); auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - const(char)[] url; - url = subscriptionUrl; + string driveId = appConfig.getValueString("drive_id"); + string url = subscriptionUrl; + // Create a resource item based on if we have a driveId string resourceItem; if (driveId.length) { @@ -1039,7 +961,7 @@ final class OneDriveApi } else { resourceItem = "/me/drive/root"; } - + // create JSON request to create webhook subscription const JSONValue request = [ "changeType": "updated", @@ -1048,185 +970,333 @@ final class OneDriveApi "expirationDateTime": expirationDateTime.toISOExtString(), "clientState": randomUUID().toString() ]; - http.addRequestHeader("Content-Type", "application/json"); - JSONValue response; + curlEngine.http.addRequestHeader("Content-Type", "application/json"); try { - response = post(url, request.toString()); + JSONValue response = post(url, request.toString()); + + // Save important subscription metadata including id and expiration + subscriptionId = response["id"].str; + subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); } catch (OneDriveException e) { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - - // We need to exit here, user needs to fix issue - log.error("ERROR: Unable to initialize subscriptions for updates. Please fix this issue."); - shutdown(); - exit(-1); - } + if (e.httpStatusCode == 409) { + // Take over an existing subscription on HTTP 409. + // + // Sample 409 error: + // { + // "error": { + // "code": "ObjectIdentifierInUse", + // "innerError": { + // "client-request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d", + // "date": "2023-09-26T09:27:45", + // "request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d" + // }, + // "message": "Subscription Id c0bba80e-57a3-43a7-bac2-e6f525a76e7c already exists for the requested combination" + // } + // } + + // Make sure the error code is "ObjectIdentifierInUse" + try { + if (e.error["error"]["code"].str != "ObjectIdentifierInUse") { + throw e; + } + } catch (JSONException jsonEx) { + throw e; + } - // Save important subscription metadata including id and expiration - subscriptionId = response["id"].str; - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); - } + // Extract the existing subscription id from the error message + import std.regex; + auto idReg = ctRegex!(r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", "i"); + auto m = matchFirst(e.error["error"]["message"].str, idReg); + if (!m) { + throw e; + } + // Save the subscription id and renew it immediately since we don't know the expiration timestamp + subscriptionId = m[0]; + addLogEntry("Found existing subscription " ~ subscriptionId); + renewSubscription(); + } else { + throw e; + } + } + } + private void renewSubscription() { - log.log("Renewing subscription for updates ..."); + addLogEntry("Renewing subscription for updates ..."); auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - const(char)[] url; + string url; url = subscriptionUrl ~ "/" ~ subscriptionId; const JSONValue request = [ "expirationDateTime": expirationDateTime.toISOExtString() ]; - http.addRequestHeader("Content-Type", "application/json"); - JSONValue response = patch(url, request.toString()); + curlEngine.http.addRequestHeader("Content-Type", "application/json"); + + try { + JSONValue response = patch(url, request.toString()); + + // Update subscription expiration from the response + subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + addLogEntry("Renewed subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + addLogEntry("The subscription is not found on the server. Recreating subscription ..."); + subscriptionId = null; + subscriptionExpiration = Clock.currTime(UTC()); + createSubscription(); + } else { + throw e; + } + } + } + + private void deleteSubscription() { + if (!hasValidSubscription()) { + addLogEntry("No valid Microsoft OneDrive webhook subscription to delete", ["debug"]); + return; + } - // Update subscription expiration from the response - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + string url; + url = subscriptionUrl ~ "/" ~ subscriptionId; + performDelete(url); + addLogEntry("Deleted Microsoft OneDrive webhook subscription", ["debug"]); } + + private void logSubscriptionError(OneDriveException e) { + if (e.httpStatusCode == 400) { + // Log known 400 error where Microsoft cannot get a 200 OK from the webhook endpoint + // + // Sample 400 error: + // { + // "error": { + // "code": "InvalidRequest", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Subscription validation request failed. Notification endpoint must respond with 200 OK to validation request." + // } + // } + + try { + if (e.error["error"]["code"].str == "InvalidRequest") { + import std.regex; + auto msgReg = ctRegex!(r"Subscription validation request failed", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Microsoft did not get 200 OK from the webhook endpoint."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } else if (e.httpStatusCode == 401) { + // Log known 401 error where authentication failed + // + // Sample 401 error: + // { + // "error": { + // "code": "ExtensionError", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Operation: Create; Exception: [Status Code: Unauthorized; Reason: Authentication failed]" + // } + // } - private void deleteSubscription() { - if (!hasValidSubscription()) { - return; + try { + if (e.error["error"]["code"].str == "ExtensionError") { + import std.regex; + auto msgReg = ctRegex!(r"Authentication failed", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Authentication failed."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } else if (e.httpStatusCode == 403) { + // Log known 403 error where the number of subscriptions on item has exceeded limit + // + // Sample 403 error: + // { + // "error": { + // "code": "ExtensionError", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Operation: Create; Exception: [Status Code: Forbidden; Reason: Number of subscriptions on item has exceeded limit]" + // } + // } + try { + if (e.error["error"]["code"].str == "ExtensionError") { + import std.regex; + auto msgReg = ctRegex!(r"Number of subscriptions on item has exceeded limit", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Number of subscriptions has exceeded limit."); + return; + } + } + } catch (JSONException) { + // fallthrough + } } - const(char)[] url; - url = subscriptionUrl ~ "/" ~ subscriptionId; - del(url); - log.log("Deleted subscription"); + // Log detailed message for unknown errors + addLogEntry("ERROR: Cannot create or renew subscription."); + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); } - - private void redeemToken(const(char)[] authCode) - { - const(char)[] postData = - "client_id=" ~ clientId ~ - "&redirect_uri=" ~ redirectUrl ~ - "&code=" ~ authCode ~ - "&grant_type=authorization_code"; - acquireToken(postData); + + private void addAccessTokenHeader() { + curlEngine.http.addRequestHeader("Authorization", appConfig.accessToken); } - - private void newToken() - { - string postData = - "client_id=" ~ clientId ~ - "&redirect_uri=" ~ redirectUrl ~ - "&refresh_token=" ~ refreshToken ~ - "&grant_type=refresh_token"; - acquireToken(postData); + + private void addIncludeFeatureRequestHeader() { + addLogEntry("Adding 'Include-Feature=AddToOneDrive' API request header as 'sync_business_shared_items' config option is enabled", ["debug"]); + curlEngine.http.addRequestHeader("Prefer", "Include-Feature=AddToOneDrive"); } - - private void acquireToken(const(char)[] postData) - { + + private void acquireToken(char[] postData) { JSONValue response; try { response = post(tokenUrl, postData); } catch (OneDriveException e) { // an error was generated - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + if ((e.httpStatusCode == 400) || (e.httpStatusCode == 401)) { + // Handle an unauthorised client + handleClientUnauthorised(e.httpStatusCode, e.msg); + } else { + if (e.httpStatusCode >= 500) { + // There was a HTTP 5xx Server Side Error - retry + acquireToken(postData); + } else { + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } + } } if (response.type() == JSONType.object) { // Has the client been configured to use read_only_auth_scope - if (cfg.getValueBool("read_only_auth_scope")) { + if (appConfig.getValueBool("read_only_auth_scope")) { // read_only_auth_scope has been configured if ("scope" in response){ string effectiveScopes = response["scope"].str(); // Display the effective authentication scopes - writeln(); - writeln("Effective API Authentication Scopes: ", effectiveScopes); + addLogEntry(); + addLogEntry("Effective API Authentication Scopes: " ~ effectiveScopes, ["verbose"]); + // if we have any write scopes, we need to tell the user to update an remove online prior authentication and exit application if (canFind(effectiveScopes, "Write")) { // effective scopes contain write scopes .. so not a read-only configuration - writeln(); - writeln("ERROR: You have authentication scopes that allow write operations. You need to remove your existing application access consent"); - writeln(); - writeln("Please login to https://account.live.com/consent/Manage and remove your existing application access consent"); - writeln(); + addLogEntry(); + addLogEntry("ERROR: You have authentication scopes that allow write operations. You need to remove your existing application access consent"); + addLogEntry(); + addLogEntry("Please login to https://account.live.com/consent/Manage and remove your existing application access consent"); + addLogEntry(); // force exit shutdown(); + // Must force exit here, allow logging to be done + Thread.sleep(dur!("msecs")(500)); exit(-1); } } } if ("access_token" in response){ - accessToken = "bearer " ~ response["access_token"].str(); - refreshToken = response["refresh_token"].str(); - accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer()); - if (!.dryRun) { + appConfig.accessToken = "bearer " ~ strip(response["access_token"].str); + + // Do we print the current access token + if (appConfig.verbosityCount > 1) { + if (appConfig.getValueBool("debug_https")) { + if (appConfig.getValueBool("print_token")) { + // This needs to be highly restricted in output .... + addLogEntry("CAUTION - KEEP THIS SAFE: Current access token: " ~ to!string(appConfig.accessToken), ["debug"]); + } + } + } + + refreshToken = strip(response["refresh_token"].str); + appConfig.accessTokenExpiration = Clock.currTime() + dur!"seconds"(response["expires_in"].integer()); + if (!dryRun) { + // Update the refreshToken in appConfig so that we can reuse it + if (appConfig.refreshToken.empty) { + // The access token is empty + addLogEntry("Updating appConfig.refreshToken with new refreshToken as appConfig.refreshToken is empty", ["debug"]); + appConfig.refreshToken = refreshToken; + } else { + // Is the access token different? + if (appConfig.refreshToken != refreshToken) { + // Update the memory version + addLogEntry("Updating appConfig.refreshToken with updated refreshToken", ["debug"]); + appConfig.refreshToken = refreshToken; + } + } + + // try and update the refresh_token file on disk try { - // try and update the refresh_token file - std.file.write(cfg.refreshTokenFilePath, refreshToken); - log.vdebug("Setting file permissions for: ", cfg.refreshTokenFilePath); - cfg.refreshTokenFilePath.setAttributes(cfg.returnRequiredFilePermisions()); + addLogEntry("Updating refreshToken on disk", ["debug"]); + std.file.write(appConfig.refreshTokenFilePath, refreshToken); + addLogEntry("Setting file permissions for: " ~ appConfig.refreshTokenFilePath, ["debug"]); + appConfig.refreshTokenFilePath.setAttributes(appConfig.returnRequiredFilePermisions()); } catch (FileException e) { // display the error message displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } - if (printAccessToken) writeln("New access token: ", accessToken); } else { - log.error("\nInvalid authentication response from OneDrive. Please check the response uri\n"); + addLogEntry("\nInvalid authentication response from OneDrive. Please check the response uri\n"); // re-authorize - authorize(); + authorise(); } } else { - log.vdebug("Invalid JSON response from OneDrive unable to initialize application"); + addLogEntry("Invalid response from the OneDrive API. Unable to initialise OneDrive API instance."); + // Must force exit here, allow logging to be done + Thread.sleep(dur!("msecs")(500)); + exit(-1); } } - - private void checkAccessTokenExpired() - { + + private void checkAccessTokenExpired() { try { - if (Clock.currTime() >= accessTokenExpiration) { + if (Clock.currTime() >= appConfig.accessTokenExpiration) { + addLogEntry("Microsoft OneDrive Access Token has EXPIRED. Must generate a new Microsoft OneDrive Access Token", ["debug"]); newToken(); + } else { + addLogEntry("Existing Microsoft OneDrive Access Token Expires: " ~ to!string(appConfig.accessTokenExpiration), ["debug"]); } } catch (OneDriveException e) { if (e.httpStatusCode == 400 || e.httpStatusCode == 401) { // flag error and notify - writeln(); - log.errorAndNotify("ERROR: Refresh token invalid, use --reauth to authorize the client again."); - writeln(); + addLogEntry(); + addLogEntry("ERROR: Refresh token invalid, use --reauth to authorize the client again.", ["info", "notify"]); + addLogEntry(); // set error message e.msg ~= "\nRefresh token invalid, use --reauth to authorize the client again"; } } } - - private void addAccessTokenHeader() - { - http.addRequestHeader("Authorization", accessToken); - } - - private JSONValue get(const(char)[] url, bool skipToken = false) - { - scope(exit) http.clearRequestHeaders(); - log.vdebug("Request URL = ", url); - http.method = HTTP.Method.get; - http.url = url; - if (!skipToken) addAccessTokenHeader(); // HACK: requestUploadStatus - JSONValue response; - response = perform(); - checkHttpCode(response); - // OneDrive API Response Debugging if --https-debug is being used - if (.debugResponse){ - log.vdebug("OneDrive API Response: ", response); - } - return response; - } - - private void del(const(char)[] url) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.del; - http.url = url; + + private void performDelete(const(char)[] url) { + scope(exit) curlEngine.http.clearRequestHeaders(); + curlEngine.connect(HTTP.Method.del, url); addAccessTokenHeader(); - auto response = perform(); - checkHttpCode(response); + auto response = performHTTPOperation(); + checkHttpResponseCode(response); } - - private void download(const(char)[] url, string filename, long fileSize) - { + + private void downloadFile(const(char)[] url, string filename, long fileSize) { // Threshold for displaying download bar long thresholdFileSize = 4 * 2^^20; // 4 MiB @@ -1239,14 +1309,14 @@ final class OneDriveApi // function scopes scope(exit) { - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; + curlEngine.http.clearRequestHeaders(); + curlEngine.http.onSend = null; + curlEngine.http.onReceive = null; + curlEngine.http.onReceiveHeader = null; + curlEngine.http.onReceiveStatusLine = null; + curlEngine.http.contentLength = 0; // Reset onProgress to not display anything for next download - http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) + curlEngine.http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) { return 0; }; @@ -1257,37 +1327,40 @@ final class OneDriveApi } } - http.method = HTTP.Method.get; - http.url = url; + curlEngine.connect(HTTP.Method.get, url); addAccessTokenHeader(); - http.onReceive = (ubyte[] data) { + curlEngine.http.onReceive = (ubyte[] data) { file.rawWrite(data); return data.length; }; if (fileSize >= thresholdFileSize){ - // Download Progress Bar - size_t iteration = 20; - Progress p = new Progress(iteration); - p.title = "Downloading"; - writeln(); + // Download Progress variables + size_t expected_total_segments = 20; + ulong start_unix_time = Clock.currTime.toUnixTime(); + int h, m, s; + string etaString; bool barInit = false; real previousProgressPercent = -1.0; real percentCheck = 5.0; - long segmentCount = 1; + long segmentCount = -1; + // Setup progress bar to display - http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) - { + curlEngine.http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) { // For each onProgress, what is the % of dlnow to dltotal // floor - rounds down to nearest whole number real currentDLPercent = floor(double(dlnow)/dltotal*100); + string downloadLogEntry = "Downloading: " ~ filename ~ " ... "; + // Have we started downloading? if (currentDLPercent > 0){ // We have started downloading - log.vdebugNewLine("Data Received = ", dlnow); - log.vdebug("Expected Total = ", dltotal); - log.vdebug("Percent Complete = ", currentDLPercent); + addLogEntry("", ["debug"]); // Debug new line only + addLogEntry("Data Received = " ~ to!string(dlnow), ["debug"]); + addLogEntry("Expected Total = " ~ to!string(dltotal), ["debug"]); + addLogEntry("Percent Complete = " ~ to!string(currentDLPercent), ["debug"]); + // Every 5% download we need to increment the download bar // Has the user set a data rate limit? @@ -1303,67 +1376,103 @@ final class OneDriveApi // Expected Total = 52428800 // Percent Complete = 26 - if (cfg.getValueLong("rate_limit") > 0) { + if (appConfig.getValueLong("rate_limit") > 0) { // User configured rate limit // How much data should be in each segment to qualify for 5% - long dataPerSegment = to!long(floor(double(dltotal)/iteration)); + ulong dataPerSegment = to!ulong(floor(double(dltotal)/expected_total_segments)); // How much data received do we need to validate against - long thisSegmentData = dataPerSegment * segmentCount; - long nextSegmentData = dataPerSegment * (segmentCount + 1); + ulong thisSegmentData = dataPerSegment * segmentCount; + ulong nextSegmentData = dataPerSegment * (segmentCount + 1); + // Has the data that has been received in a 5% window that we need to increment the progress bar at if ((dlnow > thisSegmentData) && (dlnow < nextSegmentData) && (previousProgressPercent != currentDLPercent) || (dlnow == dltotal)) { // Downloaded data equals approx 5% - log.vdebug("Incrementing Progress Bar using calculated 5% of data received"); - // Downloading 50% |oooooooooooooooooooo | ETA 00:01:40 - // increment progress bar - p.next(); + addLogEntry("Incrementing Progress Bar using calculated 5% of data received", ["debug"]); + + // 100% check + if (currentDLPercent != 100) { + // Not 100% yet + // Calculate the output + segmentCount++; + auto eta = calc_eta(segmentCount, expected_total_segments, start_unix_time); + dur!"seconds"(eta).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| ETA %02d:%02d:%02d"( h, m, s); + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + } else { + // 100% done + ulong end_unix_time = Clock.currTime.toUnixTime(); + auto upload_duration = cast(int)(end_unix_time - start_unix_time); + dur!"seconds"(upload_duration).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| DONE in %02d:%02d:%02d"( h, m, s); + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + } + // update values - log.vdebug("Setting previousProgressPercent to ", currentDLPercent); + addLogEntry("Setting previousProgressPercent to " ~ to!string(currentDLPercent), ["debug"]); previousProgressPercent = currentDLPercent; - log.vdebug("Incrementing segmentCount"); + addLogEntry("Incrementing segmentCount", ["debug"]); segmentCount++; } } else { // Is currentDLPercent divisible by 5 leaving remainder 0 and does previousProgressPercent not equal currentDLPercent if ((isIdentical(fmod(currentDLPercent, percentCheck), 0.0)) && (previousProgressPercent != currentDLPercent)) { // currentDLPercent matches a new increment - log.vdebug("Incrementing Progress Bar using fmod match"); - // Downloading 50% |oooooooooooooooooooo | ETA 00:01:40 - // increment progress bar - p.next(); + addLogEntry("Incrementing Progress Bar using fmod match", ["debug"]); + + // 100% check + if (currentDLPercent != 100) { + // Not 100% yet + // Calculate the output + segmentCount++; + auto eta = calc_eta(segmentCount, expected_total_segments, start_unix_time); + dur!"seconds"(eta).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| ETA %02d:%02d:%02d"( h, m, s); + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + } else { + // 100% done + ulong end_unix_time = Clock.currTime.toUnixTime(); + auto upload_duration = cast(int)(end_unix_time - start_unix_time); + dur!"seconds"(upload_duration).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| DONE in %02d:%02d:%02d"( h, m, s); + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + } + // update values previousProgressPercent = currentDLPercent; } } } else { if ((currentDLPercent == 0) && (!barInit)) { - // Initialise the download bar at 0% - // Downloading 0% | | ETA --:--:--: - p.next(); + // Calculate the output + segmentCount++; + etaString = "| ETA --:--:--"; + string percentage = leftJustify(to!string(currentDLPercent) ~ "%", 5, ' '); + addLogEntry(downloadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); barInit = true; } } return 0; }; - // Perform download & display progress bar + // Perform download try { // try and catch any curl error - http.perform(); + curlEngine.http.perform(); // Check the HTTP Response headers - needed for correct 429 handling // check will be performed in checkHttpCode() - writeln(); // Reset onProgress to not display anything for next download done using exit scope } catch (CurlException e) { displayOneDriveErrorMessage(e.msg, getFunctionName!({})); } - // free progress bar memory - p = null; } else { // No progress bar try { // try and catch any curl error - http.perform(); + curlEngine.http.perform(); // Check the HTTP Response headers - needed for correct 429 handling // check will be performed in checkHttpCode() } catch (CurlException e) { @@ -1378,81 +1487,57 @@ final class OneDriveApi checkHttpCode(); } - private auto patch(T)(const(char)[] url, const(T)[] patchData) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.patch; - http.url = url; - addAccessTokenHeader(); - auto response = perform(patchData); - checkHttpCode(response); + private JSONValue get(string url, bool skipToken = false) { + scope(exit) curlEngine.http.clearRequestHeaders(); + addLogEntry("Request URL = " ~ url, ["debug"]); + curlEngine.connect(HTTP.Method.get, url); + if (!skipToken) addAccessTokenHeader(); // HACK: requestUploadStatus + JSONValue response; + response = performHTTPOperation(); + checkHttpResponseCode(response); + // OneDrive API Response Debugging if --https-debug is being used + if (debugResponse){ + addLogEntry("OneDrive API Response: " ~ to!string(response), ["debug"]); + } return response; } - - private auto post(T)(const(char)[] url, const(T)[] postData) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.post; - http.url = url; + + private void newToken() { + addLogEntry("Need to generate a new access token for Microsoft OneDrive", ["debug"]); + auto postData = appender!(string)(); + postData ~= "client_id=" ~ clientId; + postData ~= "&redirect_uri=" ~ redirectUrl; + postData ~= "&refresh_token=" ~ to!string(refreshToken); + postData ~= "&grant_type=refresh_token"; + acquireToken(postData.data.dup); + } + + private auto patch(T)(const(char)[] url, const(T)[] patchData) { + scope(exit) curlEngine.http.clearRequestHeaders(); + curlEngine.connect(HTTP.Method.patch, url); addAccessTokenHeader(); - auto response = perform(postData); - checkHttpCode(response); + auto response = perform(patchData); + checkHttpResponseCode(response); return response; } - - private auto move(T)(const(char)[] url, const(T)[] postData) - { - scope(exit) http.clearRequestHeaders(); - http.method = HTTP.Method.patch; - http.url = url; + + private auto post(T)(string url, const(T)[] postData) { + scope(exit) curlEngine.http.clearRequestHeaders(); + curlEngine.connect(HTTP.Method.post, url); addAccessTokenHeader(); auto response = perform(postData); - // Check the HTTP response code, which, if a 429, will also check response headers - checkHttpCode(); - return response; - } - - private JSONValue upload(string filepath, string url) - { - checkAccessTokenExpired(); - // open file as read-only in binary mode - auto file = File(filepath, "rb"); - - // function scopes - scope(exit) { - http.clearRequestHeaders(); - http.onSend = null; - http.onReceive = null; - http.onReceiveHeader = null; - http.onReceiveStatusLine = null; - http.contentLength = 0; - // close file if open - if (file.isOpen()){ - // close open file - file.close(); - } - } - - http.method = HTTP.Method.put; - http.url = url; - addAccessTokenHeader(); - http.addRequestHeader("Content-Type", "application/octet-stream"); - http.onSend = data => file.rawRead(data).length; - http.contentLength = file.size; - auto response = perform(); - checkHttpCode(response); + checkHttpResponseCode(response); return response; } - - private JSONValue perform(const(void)[] sendData) - { + + private JSONValue perform(const(void)[] sendData) { scope(exit) { - http.onSend = null; - http.contentLength = 0; + curlEngine.http.onSend = null; + curlEngine.http.contentLength = 0; } if (sendData) { - http.contentLength = sendData.length; - http.onSend = (void[] buf) { + curlEngine.http.contentLength = sendData.length; + curlEngine.http.onSend = (void[] buf) { import std.algorithm: min; size_t minLen = min(buf.length, sendData.length); if (minLen == 0) return 0; @@ -1461,34 +1546,33 @@ final class OneDriveApi return minLen; }; } else { - http.onSend = buf => 0; + curlEngine.http.onSend = buf => 0; } - auto response = perform(); + auto response = performHTTPOperation(); return response; } - - private JSONValue perform() - { - scope(exit) http.onReceive = null; + + private JSONValue performHTTPOperation() { + scope(exit) curlEngine.http.onReceive = null; char[] content; JSONValue json; - http.onReceive = (ubyte[] data) { + curlEngine.http.onReceive = (ubyte[] data) { content ~= data; // HTTP Server Response Code Debugging if --https-debug is being used - if (.debugResponse){ - log.vdebug("onedrive.perform() => OneDrive HTTP Server Response: ", http.statusLine.code); + if (debugResponse){ + addLogEntry("onedrive.performHTTPOperation() => OneDrive HTTP Server Response: " ~ to!string(curlEngine.http.statusLine.code), ["debug"]); } return data.length; }; try { - http.perform(); + curlEngine.http.perform(); // Check the HTTP Response headers - needed for correct 429 handling checkHTTPResponseHeaders(); } catch (CurlException e) { // Parse and display error message received from OneDrive - log.vdebug("onedrive.perform() Generated a OneDrive CurlException"); + addLogEntry("onedrive.performHTTPOperation() Generated a OneDrive CurlException", ["debug"]); auto errorArray = splitLines(e.msg); string errorMessage = errorArray[0]; @@ -1506,25 +1590,28 @@ final class OneDriveApi bool retrySuccess = false; SysTime currentTime; + // Connectivity to Microsoft OneDrive was lost + addLogEntry("Internet connectivity to Microsoft OneDrive service has been lost .. re-trying in the background"); + // what caused the initial curl exception? - if (canFind(errorMessage, "Couldn't connect to server on handle")) log.vdebug("Unable to connect to server - HTTPS access blocked?"); - if (canFind(errorMessage, "Couldn't resolve host name on handle")) log.vdebug("Unable to resolve server - DNS access blocked?"); - if (canFind(errorMessage, "Timeout was reached on handle")) log.vdebug("A timeout was triggered - data too slow, no response ... use --debug-https to diagnose further"); + if (canFind(errorMessage, "Couldn't connect to server on handle")) addLogEntry("Unable to connect to server - HTTPS access blocked?", ["debug"]); + if (canFind(errorMessage, "Couldn't resolve host name on handle")) addLogEntry("Unable to resolve server - DNS access blocked?", ["debug"]); + if (canFind(errorMessage, "Timeout was reached on handle")) addLogEntry("A timeout was triggered - data too slow, no response ... use --debug-https to diagnose further", ["debug"]); while (!retrySuccess){ try { // configure libcurl to perform a fresh connection - log.vdebug("Configuring libcurl to use a fresh connection for re-try"); - http.handle.set(CurlOption.fresh_connect,1); + addLogEntry("Configuring libcurl to use a fresh connection for re-try", ["debug"]); + curlEngine.http.handle.set(CurlOption.fresh_connect,1); // try the access - http.perform(); + curlEngine.http.perform(); // Check the HTTP Response headers - needed for correct 429 handling checkHTTPResponseHeaders(); // no error from http.perform() on re-try - log.log("Internet connectivity to Microsoft OneDrive service has been restored"); + addLogEntry("Internet connectivity to Microsoft OneDrive service has been restored"); // unset the fresh connect option as this then creates performance issues if left enabled - log.vdebug("Unsetting libcurl to use a fresh connection as this causes a performance impact if left enabled"); - http.handle.set(CurlOption.fresh_connect,0); + addLogEntry("Unsetting libcurl to use a fresh connection as this causes a performance impact if left enabled", ["debug"]); + curlEngine.http.handle.set(CurlOption.fresh_connect,0); // connectivity restored retrySuccess = true; } catch (CurlException e) { @@ -1534,15 +1621,15 @@ final class OneDriveApi retryAttempts++; if (canFind(e.msg, "Couldn't connect to server on handle") || canFind(e.msg, "Couldn't resolve host name on handle") || canFind(errorMessage, "Timeout was reached on handle")) { // no access to Internet - writeln(); - log.error("ERROR: There was a timeout in accessing the Microsoft OneDrive service - Internet connectivity issue?"); + addLogEntry(); + addLogEntry("ERROR: There was a timeout in accessing the Microsoft OneDrive service - Internet connectivity issue?"); // what is the error reason to assis the user as what to check if (canFind(e.msg, "Couldn't connect to server on handle")) { - log.log(" - Check HTTPS access or Firewall Rules"); + addLogEntry(" - Check HTTPS access or Firewall Rules"); timestampAlign = 9; } if (canFind(e.msg, "Couldn't resolve host name on handle")) { - log.log(" - Check DNS resolution or Firewall Rules"); + addLogEntry(" - Check DNS resolution or Firewall Rules"); timestampAlign = 0; } @@ -1553,8 +1640,8 @@ final class OneDriveApi // display retry information currentTime.fracSecs = Duration.zero; auto timeString = currentTime.toString(); - log.vlog(" Retry attempt: ", retryAttempts); - log.vlog(" This attempt timestamp: ", timeString); + addLogEntry(" Retry attempt: " ~ to!string(retryAttempts), ["verbose"]); + addLogEntry(" This attempt timestamp: " ~ timeString, ["verbose"]); if (thisBackOffInterval > maxBackoffInterval) { thisBackOffInterval = maxBackoffInterval; } @@ -1562,9 +1649,8 @@ final class OneDriveApi // detail when the next attempt will be tried // factor in the delay for curl to generate the exception - otherwise the next timestamp appears to be 'out' even though technically correct auto nextRetry = currentTime + dur!"seconds"(thisBackOffInterval) + dur!"seconds"(timestampAlign); - log.vlog(" Next retry in approx: ", (thisBackOffInterval + timestampAlign), " seconds"); - log.vlog(" Next retry approx: ", nextRetry); - + addLogEntry(" Next retry in approx: " ~ to!string((thisBackOffInterval + timestampAlign)) ~ " seconds", ["verbose"]); + addLogEntry(" Next retry approx: " ~ to!string(nextRetry), ["verbose"]); // thread sleep Thread.sleep(dur!"seconds"(thisBackOffInterval)); } @@ -1576,24 +1662,46 @@ final class OneDriveApi } } if (retryAttempts >= retryCount) { - log.error(" ERROR: Unable to reconnect to the Microsoft OneDrive service after ", retryCount, " attempts lasting over 1.2 years!"); + addLogEntry(" ERROR: Unable to reconnect to the Microsoft OneDrive service after " ~ to!string(retryCount) ~ " attempts lasting over 1.2 years!"); throw new OneDriveException(408, "Request Timeout - HTTP 408 or Internet down?"); } } else { - // Log that an error was returned - log.error("ERROR: OneDrive returned an error with the following message:"); - // Some other error was returned - log.error(" Error Message: ", errorMessage); - log.error(" Calling Function: ", getFunctionName!({})); + + // what error was returned? + if (canFind(errorMessage, "Problem with the SSL CA cert (path? access rights?) on handle")) { + // error setting certificate verify locations: + // CAfile: /etc/pki/tls/certs/ca-bundle.crt + // CApath: none + // + // Tell the Curl Engine to bypass SSL check - essentially SSL is passing back a bad value due to 'stdio' compile time option + // Further reading: + // https://github.com/curl/curl/issues/6090 + // https://github.com/openssl/openssl/issues/7536 + // https://stackoverflow.com/questions/45829588/brew-install-fails-curl77-error-setting-certificate-verify + // https://forum.dlang.org/post/vwvkbubufexgeuaxhqfl@forum.dlang.org + + addLogEntry("Problem with reading the SSL CA cert via libcurl - please repair your system SSL CA Certificates"); + // Must force exit here, allow logging to be done. If needed later, we could re-use setDisableSSLVerifyPeer() + Thread.sleep(dur!("msecs")(500)); + exit(-1); + } else { + // Log that an error was returned + addLogEntry("ERROR: OneDrive returned an error with the following message:"); + // Some other error was returned + addLogEntry(" Error Message: " ~ errorMessage); + addLogEntry(" Calling Function: " ~ getFunctionName!({})); - // Was this a curl initialization error? - if (canFind(errorMessage, "Failed initialization on handle")) { - // initialization error ... prevent a run-away process if we have zero disk space - ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace(".")); - if (localActualFreeSpace == 0) { - // force exit - shutdown(); - exit(-1); + // Was this a curl initialization error? + if (canFind(errorMessage, "Failed initialization on handle")) { + // initialization error ... prevent a run-away process if we have zero disk space + ulong localActualFreeSpace = getAvailableDiskSpace("."); + if (localActualFreeSpace == 0) { + // force exit + shutdown(); + // Must force exit here, allow logging to be done + Thread.sleep(dur!("msecs")(500)); + exit(-1); + } } } } @@ -1605,30 +1713,145 @@ final class OneDriveApi json = content.parseJSON(); } catch (JSONException e) { // Log that a JSON Exception was caught, dont output the HTML response from OneDrive - log.vdebug("JSON Exception caught when performing HTTP operations - use --debug-https to diagnose further"); + addLogEntry("JSON Exception caught when performing HTTP operations - use --debug-https to diagnose further", ["debug"]); } return json; } + + private void redeemToken(char[] authCode){ + char[] postData = + "client_id=" ~ clientId ~ + "&redirect_uri=" ~ redirectUrl ~ + "&code=" ~ authCode ~ + "&grant_type=authorization_code"; + acquireToken(postData); + } + + private JSONValue upload(string filepath, string url) { + checkAccessTokenExpired(); + // open file as read-only in binary mode + auto file = File(filepath, "rb"); + + // function scopes + scope(exit) { + curlEngine.http.clearRequestHeaders(); + curlEngine.http.onSend = null; + curlEngine.http.onReceive = null; + curlEngine.http.onReceiveHeader = null; + curlEngine.http.onReceiveStatusLine = null; + curlEngine.http.contentLength = 0; + // close file if open + if (file.isOpen()){ + // close open file + file.close(); + } + } - private void checkHTTPResponseHeaders() - { + curlEngine.connect(HTTP.Method.put, url); + addAccessTokenHeader(); + curlEngine.http.addRequestHeader("Content-Type", "application/octet-stream"); + curlEngine.http.onSend = data => file.rawRead(data).length; + curlEngine.http.contentLength = file.size; + auto response = performHTTPOperation(); + checkHttpResponseCode(response); + return response; + } + + private void checkHTTPResponseHeaders() { // Get the HTTP Response headers - needed for correct 429 handling - auto responseHeaders = http.responseHeaders(); - if (.debugResponse){ - log.vdebug("http.perform() => HTTP Response Headers: ", responseHeaders); + auto responseHeaders = curlEngine.http.responseHeaders(); + if (debugResponse){ + addLogEntry("curlEngine.http.perform() => HTTP Response Headers: " ~ to!string(responseHeaders), ["debug"]); } // is retry-after in the response headers - if ("retry-after" in http.responseHeaders) { + if ("retry-after" in curlEngine.http.responseHeaders) { // Set the retry-after value - log.vdebug("http.perform() => Received a 'Retry-After' Header Response with the following value: ", http.responseHeaders["retry-after"]); - log.vdebug("http.perform() => Setting retryAfterValue to: ", http.responseHeaders["retry-after"]); - .retryAfterValue = to!ulong(http.responseHeaders["retry-after"]); + addLogEntry("curlEngine.http.perform() => Received a 'Retry-After' Header Response with the following value: " ~ to!string(curlEngine.http.responseHeaders["retry-after"]), ["debug"]); + addLogEntry("curlEngine.http.perform() => Setting retryAfterValue to: " ~ to!string(curlEngine.http.responseHeaders["retry-after"]), ["debug"]); + retryAfterValue = to!ulong(curlEngine.http.responseHeaders["retry-after"]); } } - private void checkHttpCode() - { + private void checkHttpResponseCode(JSONValue response) { + switch(curlEngine.http.statusLine.code) { + // 0 - OK ... HTTP2 version of 200 OK + case 0: + break; + // 100 - Continue + case 100: + break; + // 200 - OK + case 200: + // No Log .. + break; + // 201 - Created OK + // 202 - Accepted + // 204 - Deleted OK + case 201,202,204: + // Log if --debug-https logging is used + if (debugHTTPResponseOutput) { + addLogEntry("OneDrive Response: '" ~ to!string(curlEngine.http.statusLine.code) ~ " - " ~ to!string(curlEngine.http.statusLine.reason) ~ "'", ["debug"]); + } + break; + + // 302 - resource found and available at another location, redirect + case 302: + // Log if --debug-https logging is used + if (debugHTTPResponseOutput) { + addLogEntry("OneDrive Response: '" ~ to!string(curlEngine.http.statusLine.code) ~ " - " ~ to!string(curlEngine.http.statusLine.reason) ~ "'", ["debug"]); + } + break; + + // 400 - Bad Request + case 400: + // Bad Request .. how should we act? + // make sure this is thrown so that it is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + // 403 - Forbidden + case 403: + // OneDrive responded that the user is forbidden + addLogEntry("OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error", ["verbose"]); + + // Throw this as a specific exception so this is caught when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + // 412 - Precondition Failed + case 412: + // Throw this as a specific exception so this is caught when performing sync.uploadLastModifiedTime + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + // Server side (OneDrive) Errors + // 500 - Internal Server Error + // 502 - Bad Gateway + // 503 - Service Unavailable + // 504 - Gateway Timeout (Issue #320) + case 500: + // Throw this as a specific exception so this is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + case 502: + // Throw this as a specific exception so this is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + case 503: + // Throw this as a specific exception so this is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + case 504: + // Throw this as a specific exception so this is caught + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + + // Default - all other errors that are not a 2xx or a 302 + default: + if (curlEngine.http.statusLine.code / 100 != 2 && curlEngine.http.statusLine.code != 302) { + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason, response); + } + } + } + + private void checkHttpCode() { // https://dev.onedrive.com/misc/errors.htm // https://developer.overdrive.com/docs/reference-guide @@ -1673,7 +1896,7 @@ final class OneDriveApi */ - switch(http.statusLine.code) + switch(curlEngine.http.statusLine.code) { // 0 - OK ... HTTP2 version of 200 OK case 0: @@ -1689,54 +1912,60 @@ final class OneDriveApi // 202 - Accepted // 204 - Deleted OK case 201,202,204: - // No actions, but log if verbose logging - //log.vlog("OneDrive Response: '", http.statusLine.code, " - ", http.statusLine.reason, "'"); + // Log if --debug-https logging is used + if (debugHTTPResponseOutput) { + addLogEntry("OneDrive Response: '" ~ to!string(curlEngine.http.statusLine.code) ~ " - " ~ to!string(curlEngine.http.statusLine.reason) ~ "'", ["debug"]); + } break; // 302 - resource found and available at another location, redirect case 302: + // Log if --debug-https logging is used + if (debugHTTPResponseOutput) { + addLogEntry("OneDrive Response: '" ~ to!string(curlEngine.http.statusLine.code) ~ " - " ~ to!string(curlEngine.http.statusLine.reason) ~ "'", ["debug"]); + } break; // 400 - Bad Request case 400: // Bad Request .. how should we act? - log.vlog("OneDrive returned a 'HTTP 400 - Bad Request' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 400 - Bad Request' - gracefully handling error", ["verbose"]); break; - + // 403 - Forbidden case 403: // OneDrive responded that the user is forbidden - log.vlog("OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error", ["verbose"]); break; // 404 - Item not found case 404: // Item was not found - do not throw an exception - log.vlog("OneDrive returned a 'HTTP 404 - Item not found' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 404 - Item not found' - gracefully handling error", ["verbose"]); break; // 408 - Request Timeout case 408: // Request to connect to OneDrive service timed out - log.vlog("Request Timeout - gracefully handling error"); + addLogEntry("Request Timeout - gracefully handling error", ["verbose"]); throw new OneDriveException(408, "Request Timeout - HTTP 408 or Internet down?"); // 409 - Conflict case 409: // Conflict handling .. how should we act? This only really gets triggered if we are using --local-first & we remove items.db as the DB thinks the file is not uploaded but it is - log.vlog("OneDrive returned a 'HTTP 409 - Conflict' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 409 - Conflict' - gracefully handling error", ["verbose"]); break; // 412 - Precondition Failed case 412: // A precondition provided in the request (such as an if-match header) does not match the resource's current state. - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error", ["verbose"]); break; // 415 - Unsupported Media Type case 415: // Unsupported Media Type ... sometimes triggered on image files, especially PNG - log.vlog("OneDrive returned a 'HTTP 415 - Unsupported Media Type' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 415 - Unsupported Media Type' - gracefully handling error", ["verbose"]); break; // 429 - Too Many Requests @@ -1745,8 +1974,8 @@ final class OneDriveApi // Check the HTTP Response headers - needed for correct 429 handling checkHTTPResponseHeaders(); // https://docs.microsoft.com/en-us/sharepoint/dev/general-development/how-to-avoid-getting-throttled-or-blocked-in-sharepoint-online - log.vlog("OneDrive returned a 'HTTP 429 - Too Many Requests' - gracefully handling error"); - throw new OneDriveException(http.statusLine.code, http.statusLine.reason); + addLogEntry("OneDrive returned a 'HTTP 429 - Too Many Requests' - gracefully handling error", ["verbose"]); + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason); // Server side (OneDrive) Errors // 500 - Internal Server Error @@ -1755,133 +1984,27 @@ final class OneDriveApi // 504 - Gateway Timeout (Issue #320) case 500: // No actions - log.vlog("OneDrive returned a 'HTTP 500 Internal Server Error' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 500 Internal Server Error' - gracefully handling error", ["verbose"]); break; case 502: // No actions - log.vlog("OneDrive returned a 'HTTP 502 Bad Gateway Error' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 502 Bad Gateway Error' - gracefully handling error", ["verbose"]); break; case 503: // No actions - log.vlog("OneDrive returned a 'HTTP 503 Service Unavailable Error' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 503 Service Unavailable Error' - gracefully handling error", ["verbose"]); break; case 504: // No actions - log.vlog("OneDrive returned a 'HTTP 504 Gateway Timeout Error' - gracefully handling error"); + addLogEntry("OneDrive returned a 'HTTP 504 Gateway Timeout Error' - gracefully handling error", ["verbose"]); break; // "else" default: - throw new OneDriveException(http.statusLine.code, http.statusLine.reason); - } - } - - private void checkHttpCode(ref const JSONValue response) - { - switch(http.statusLine.code) - { - // 0 - OK ... HTTP2 version of 200 OK - case 0: - break; - // 100 - Continue - case 100: - break; - // 200 - OK - case 200: - // No Log .. - break; - // 201 - Created OK - // 202 - Accepted - // 204 - Deleted OK - case 201,202,204: - // No actions, but log if verbose logging - //log.vlog("OneDrive Response: '", http.statusLine.code, " - ", http.statusLine.reason, "'"); - break; - - // 302 - resource found and available at another location, redirect - case 302: - break; - - // 400 - Bad Request - case 400: - // Bad Request .. how should we act? - // make sure this is thrown so that it is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // 403 - Forbidden - case 403: - // OneDrive responded that the user is forbidden - log.vlog("OneDrive returned a 'HTTP 403 - Forbidden' - gracefully handling error"); - // Throw this as a specific exception so this is caught when performing 'siteQuery = onedrive.o365SiteSearch(nextLink);' call - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // 412 - Precondition Failed - case 412: - // Throw this as a specific exception so this is caught when performing sync.uploadLastModifiedTime - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // Server side (OneDrive) Errors - // 500 - Internal Server Error - // 502 - Bad Gateway - // 503 - Service Unavailable - // 504 - Gateway Timeout (Issue #320) - case 500: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - case 502: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - case 503: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - case 504: - // Throw this as a specific exception so this is caught - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - - // Default - all other errors that are not a 2xx or a 302 - default: - if (http.statusLine.code / 100 != 2 && http.statusLine.code != 302) { - throw new OneDriveException(http.statusLine.code, http.statusLine.reason, response); - } + throw new OneDriveException(curlEngine.http.statusLine.code, curlEngine.http.statusLine.reason); } } -} - -unittest -{ - string configDirName = expandTilde("~/.config/onedrive"); - auto cfg = new config.Config(configDirName); - cfg.init(); - OneDriveApi onedrive = new OneDriveApi(cfg); - onedrive.init(); - std.file.write("/tmp/test", "test"); - - // simpleUpload - auto item = onedrive.simpleUpload("/tmp/test", "/test"); - try { - item = onedrive.simpleUpload("/tmp/test", "/test"); - } catch (OneDriveException e) { - assert(e.httpStatusCode == 409); - } - try { - item = onedrive.simpleUpload("/tmp/test", "/test", "123"); - } catch (OneDriveException e) { - assert(e.httpStatusCode == 412); - } - item = onedrive.simpleUpload("/tmp/test", "/test", item["eTag"].str); - - // deleteById - try { - onedrive.deleteById(item["id"].str, "123"); - } catch (OneDriveException e) { - assert(e.httpStatusCode == 412); - } - onedrive.deleteById(item["id"].str, item["eTag"].str); - onedrive.http.shutdown(); -} +} \ No newline at end of file diff --git a/src/progress.d b/src/progress.d deleted file mode 100644 index 9277ae121..000000000 --- a/src/progress.d +++ /dev/null @@ -1,156 +0,0 @@ -module progress; - -import std.stdio; -import std.range; -import std.format; -import std.datetime; -import core.sys.posix.unistd; -import core.sys.posix.sys.ioctl; - -class Progress -{ - private: - - immutable static size_t default_width = 80; - size_t max_width = 40; - size_t width = default_width; - - ulong start_time; - string caption = "Progress"; - size_t iterations; - size_t counter; - - - size_t getTerminalWidth() { - size_t column = default_width; - version (CRuntime_Musl) { - } else version(Android) { - } else { - winsize ws; - if(ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws) != -1 && ws.ws_col > 0) { - column = ws.ws_col; - } - } - - return column; - } - - - void clear() { - write("\r"); - for(auto i = 0; i < width; i++) write(" "); - write("\r"); - } - - - int calc_eta() { - immutable auto ratio = cast(double)counter / iterations; - auto current_time = Clock.currTime.toUnixTime(); - auto duration = cast(int)(current_time - start_time); - int hours, minutes, seconds; - double elapsed = (current_time - start_time); - int eta_sec = cast(int)((elapsed / ratio) - elapsed); - - // Return an ETA or Duration? - if (eta_sec != 0){ - return eta_sec; - } else { - return duration; - } - } - - - string progressbarText(string header_text, string footer_text) { - immutable auto ratio = cast(double)counter / iterations; - string result = ""; - - double bar_length = width - header_text.length - footer_text.length; - if(bar_length > max_width && max_width > 0) { - bar_length = max_width; - } - size_t i = 0; - for(; i < ratio * bar_length; i++) result ~= "o"; - for(; i < bar_length; i++) result ~= " "; - - return header_text ~ result ~ footer_text; - } - - - void print() { - immutable auto ratio = cast(double)counter / iterations; - auto header = appender!string(); - auto footer = appender!string(); - - header.formattedWrite("%s %3d%% |", caption, cast(int)(ratio * 100)); - - if(counter <= 0 || ratio == 0.0) { - footer.formattedWrite("| ETA --:--:--:"); - } else { - int h, m, s; - dur!"seconds"(calc_eta()) - .split!("hours", "minutes", "seconds")(h, m, s); - if (counter != iterations){ - footer.formattedWrite("| ETA %02d:%02d:%02d ", h, m, s); - } else { - footer.formattedWrite("| DONE IN %02d:%02d:%02d ", h, m, s); - } - } - - write(progressbarText(header.data, footer.data)); - } - - - void update() { - width = getTerminalWidth(); - - clear(); - - print(); - stdout.flush(); - } - - - public: - - this(size_t iterations) { - if(iterations <= 0) iterations = 1; - - counter = -1; - this.iterations = iterations; - start_time = Clock.currTime.toUnixTime; - } - - @property { - string title() { return caption; } - string title(string text) { return caption = text; } - } - - @property { - size_t count() { return counter; } - size_t count(size_t val) { - if(val > iterations) val = iterations; - return counter = val; - } - } - - @property { - size_t maxWidth() { return max_width; } - size_t maxWidth(size_t w) { - return max_width = w; - } - } - - void reset() { - counter = -1; - start_time = Clock.currTime.toUnixTime; - } - - void next() { - counter++; - if(counter > iterations) counter = iterations; - - update(); - } - - -} diff --git a/src/qxor.d b/src/qxor.d index 63e8f0f5e..64de204f7 100644 --- a/src/qxor.d +++ b/src/qxor.d @@ -1,7 +1,11 @@ +// What is this module called? +module qxor; + +// What does this module require to function? import std.algorithm; import std.digest; -// implementation of the QuickXorHash algorithm in D +// Implementation of the QuickXorHash algorithm in D // https://github.com/OneDrive/onedrive-api-docs/blob/live/docs/code-snippets/quickxorhash.md struct QuickXor { @@ -71,18 +75,4 @@ struct QuickXor } return tmp; } -} - -unittest -{ - assert(isDigest!QuickXor); -} - -unittest -{ - QuickXor qxor; - qxor.put(cast(ubyte[]) "The quick brown fox jumps over the lazy dog"); - assert(qxor.finish().toHexString() == "6CC4A56F2B26C492FA4BBE57C1F31C4193A972BE"); -} - -alias QuickXorDigest = WrapperDigest!(QuickXor); +} \ No newline at end of file diff --git a/src/selective.d b/src/selective.d deleted file mode 100644 index 55be94eb7..000000000 --- a/src/selective.d +++ /dev/null @@ -1,422 +0,0 @@ -import std.algorithm; -import std.array; -import std.file; -import std.path; -import std.regex; -import std.stdio; -import std.string; -import util; -import log; - -final class SelectiveSync -{ - private string[] paths; - private string[] businessSharedFoldersList; - private Regex!char mask; - private Regex!char dirmask; - private bool skipDirStrictMatch = false; - private bool skipDotfiles = false; - - // load sync_list file - void load(string filepath) - { - if (exists(filepath)) { - // open file as read only - auto file = File(filepath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - paths ~= buildNormalizedPath(line); - } - file.close(); - } - } - - // Configure skipDirStrictMatch if function is called - // By default, skipDirStrictMatch = false; - void setSkipDirStrictMatch() - { - skipDirStrictMatch = true; - } - - // load business_shared_folders file - void loadSharedFolders(string filepath) - { - if (exists(filepath)) { - // open file as read only - auto file = File(filepath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - businessSharedFoldersList ~= buildNormalizedPath(line); - } - file.close(); - } - } - - void setFileMask(const(char)[] mask) - { - this.mask = wild2regex(mask); - } - - void setDirMask(const(char)[] dirmask) - { - this.dirmask = wild2regex(dirmask); - } - - // Configure skipDotfiles if function is called - // By default, skipDotfiles = false; - void setSkipDotfiles() - { - skipDotfiles = true; - } - - // return value of skipDotfiles - bool getSkipDotfiles() - { - return skipDotfiles; - } - - // config file skip_dir parameter - bool isDirNameExcluded(string name) - { - // Does the directory name match skip_dir config entry? - // Returns true if the name matches a skip_dir config entry - // Returns false if no match - log.vdebug("skip_dir evaluation for: ", name); - - // Try full path match first - if (!name.matchFirst(dirmask).empty) { - log.vdebug("'!name.matchFirst(dirmask).empty' returned true = matched"); - return true; - } else { - // Do we check the base name as well? - if (!skipDirStrictMatch) { - log.vdebug("No Strict Matching Enforced"); - - // Test the entire path working backwards from child - string path = buildNormalizedPath(name); - string checkPath; - auto paths = pathSplitter(path); - - foreach_reverse(directory; paths) { - if (directory != "/") { - // This will add a leading '/' but that needs to be stripped to check - checkPath = "/" ~ directory ~ checkPath; - if(!checkPath.strip('/').matchFirst(dirmask).empty) { - log.vdebug("'!checkPath.matchFirst(dirmask).empty' returned true = matched"); - return true; - } - } - } - } else { - log.vdebug("Strict Matching Enforced - No Match"); - } - } - // no match - return false; - } - - // config file skip_file parameter - bool isFileNameExcluded(string name) - { - // Does the file name match skip_file config entry? - // Returns true if the name matches a skip_file config entry - // Returns false if no match - log.vdebug("skip_file evaluation for: ", name); - - // Try full path match first - if (!name.matchFirst(mask).empty) { - return true; - } else { - // check just the file name - string filename = baseName(name); - if(!filename.matchFirst(mask).empty) { - return true; - } - } - // no match - return false; - } - - // Match against sync_list only - bool isPathExcludedViaSyncList(string path) - { - // Debug output that we are performing a 'sync_list' inclusion / exclusion test - return .isPathExcluded(path, paths); - } - - // Match against skip_dir, skip_file & sync_list entries - bool isPathExcludedMatchAll(string path) - { - return .isPathExcluded(path, paths) || .isPathMatched(path, mask) || .isPathMatched(path, dirmask); - } - - // is the path a dotfile? - bool isDotFile(string path) - { - // always allow the root - if (path == ".") return false; - - path = buildNormalizedPath(path); - auto paths = pathSplitter(path); - foreach(base; paths) { - if (startsWith(base, ".")){ - return true; - } - } - return false; - } - - // is business shared folder matched - bool isSharedFolderMatched(string name) - { - // if there are no shared folder always return false - if (businessSharedFoldersList.empty) return false; - - if (!name.matchFirst(businessSharedFoldersList).empty) { - return true; - } else { - // try a direct comparison just in case - foreach (userFolder; businessSharedFoldersList) { - if (userFolder == name) { - // direct match - log.vdebug("'matchFirst' failed to match, however direct comparison was matched: ", name); - return true; - } - } - return false; - } - } - - // is business shared folder included - bool isPathIncluded(string path, string[] allowedPaths) - { - // always allow the root - if (path == ".") return true; - // if there are no allowed paths always return true - if (allowedPaths.empty) return true; - - path = buildNormalizedPath(path); - foreach (allowed; allowedPaths) { - auto comm = commonPrefix(path, allowed); - if (comm.length == path.length) { - // the given path is contained in an allowed path - return true; - } - if (comm.length == allowed.length && path[comm.length] == '/') { - // the given path is a subitem of an allowed path - return true; - } - } - return false; - } -} - -// test if the given path is not included in the allowed paths -// if there are no allowed paths always return false -private bool isPathExcluded(string path, string[] allowedPaths) -{ - // function variables - bool exclude = false; - bool exludeDirectMatch = false; // will get updated to true, if there is a pattern match to sync_list entry - bool excludeMatched = false; // will get updated to true, if there is a pattern match to sync_list entry - bool finalResult = true; // will get updated to false, if pattern match to sync_list entry - int offset; - string wildcard = "*"; - - // always allow the root - if (path == ".") return false; - // if there are no allowed paths always return false - if (allowedPaths.empty) return false; - path = buildNormalizedPath(path); - log.vdebug("Evaluation against 'sync_list' for this path: ", path); - log.vdebug("[S]exclude = ", exclude); - log.vdebug("[S]exludeDirectMatch = ", exludeDirectMatch); - log.vdebug("[S]excludeMatched = ", excludeMatched); - - // unless path is an exact match, entire sync_list entries need to be processed to ensure - // negative matches are also correctly detected - foreach (allowedPath; allowedPaths) { - // is this an inclusion path or finer grained exclusion? - switch (allowedPath[0]) { - case '-': - // sync_list path starts with '-', this user wants to exclude this path - exclude = true; - // If the sync_list entry starts with '-/' offset needs to be 2, else 1 - if (startsWith(allowedPath, "-/")){ - // Offset needs to be 2 - offset = 2; - } else { - // Offset needs to be 1 - offset = 1; - } - break; - case '!': - // sync_list path starts with '!', this user wants to exclude this path - exclude = true; - // If the sync_list entry starts with '!/' offset needs to be 2, else 1 - if (startsWith(allowedPath, "!/")){ - // Offset needs to be 2 - offset = 2; - } else { - // Offset needs to be 1 - offset = 1; - } - break; - case '/': - // sync_list path starts with '/', this user wants to include this path - // but a '/' at the start causes matching issues, so use the offset for comparison - exclude = false; - offset = 1; - break; - - default: - // no negative pattern, default is to not exclude - exclude = false; - offset = 0; - } - - // What are we comparing against? - log.vdebug("Evaluation against 'sync_list' entry: ", allowedPath); - - // Generate the common prefix from the path vs the allowed path - auto comm = commonPrefix(path, allowedPath[offset..$]); - - // Is path is an exact match of the allowed path? - if (comm.length == path.length) { - // we have a potential exact match - // strip any potential '/*' from the allowed path, to avoid a potential lesser common match - string strippedAllowedPath = strip(allowedPath[offset..$], "/*"); - - if (path == strippedAllowedPath) { - // we have an exact path match - log.vdebug("exact path match"); - if (!exclude) { - log.vdebug("Evaluation against 'sync_list' result: direct match"); - finalResult = false; - // direct match, break and go sync - break; - } else { - log.vdebug("Evaluation against 'sync_list' result: direct match - path to be excluded"); - // do not set excludeMatched = true here, otherwise parental path also gets excluded - // flag exludeDirectMatch so that a 'wildcard match' will not override this exclude - exludeDirectMatch = true; - // final result - finalResult = true; - } - } else { - // no exact path match, but something common does match - log.vdebug("something 'common' matches the input path"); - auto splitAllowedPaths = pathSplitter(strippedAllowedPath); - string pathToEvaluate = ""; - foreach(base; splitAllowedPaths) { - pathToEvaluate ~= base; - if (path == pathToEvaluate) { - // The input path matches what we want to evaluate against as a direct match - if (!exclude) { - log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item"); - finalResult = false; - // direct match, break and go sync - break; - } else { - log.vdebug("Evaluation against 'sync_list' result: direct match for parental path item but to be excluded"); - finalResult = true; - // do not set excludeMatched = true here, otherwise parental path also gets excluded - } - } - pathToEvaluate ~= dirSeparator; - } - } - } - - // Is path is a subitem/sub-folder of the allowed path? - if (comm.length == allowedPath[offset..$].length) { - // The given path is potentially a subitem of an allowed path - // We want to capture sub-folders / files of allowed paths here, but not explicitly match other items - // if there is no wildcard - auto subItemPathCheck = allowedPath[offset..$] ~ "/"; - if (canFind(path, subItemPathCheck)) { - // The 'path' includes the allowed path, and is 'most likely' a sub-path item - if (!exclude) { - log.vdebug("Evaluation against 'sync_list' result: parental path match"); - finalResult = false; - // parental path matches, break and go sync - break; - } else { - log.vdebug("Evaluation against 'sync_list' result: parental path match but must be excluded"); - finalResult = true; - excludeMatched = true; - } - } - } - - // Does the allowed path contain a wildcard? (*) - if (canFind(allowedPath[offset..$], wildcard)) { - // allowed path contains a wildcard - // manually replace '*' for '.*' to be compatible with regex - string regexCompatiblePath = replace(allowedPath[offset..$], "*", ".*"); - auto allowedMask = regex(regexCompatiblePath); - if (matchAll(path, allowedMask)) { - // regex wildcard evaluation matches - // if we have a prior pattern match for an exclude, excludeMatched = true - if (!exclude && !excludeMatched && !exludeDirectMatch) { - // nothing triggered an exclusion before evaluation against wildcard match attempt - log.vdebug("Evaluation against 'sync_list' result: wildcard pattern match"); - finalResult = false; - } else { - log.vdebug("Evaluation against 'sync_list' result: wildcard pattern matched but must be excluded"); - finalResult = true; - excludeMatched = true; - } - } - } - } - // Interim results - log.vdebug("[F]exclude = ", exclude); - log.vdebug("[F]exludeDirectMatch = ", exludeDirectMatch); - log.vdebug("[F]excludeMatched = ", excludeMatched); - - // If exclude or excludeMatched is true, then finalResult has to be true - if ((exclude) || (excludeMatched) || (exludeDirectMatch)) { - finalResult = true; - } - - // results - if (finalResult) { - log.vdebug("Evaluation against 'sync_list' final result: EXCLUDED"); - } else { - log.vdebug("Evaluation against 'sync_list' final result: included for sync"); - } - return finalResult; -} - -// test if the given path is matched by the regex expression. -// recursively test up the tree. -private bool isPathMatched(string path, Regex!char mask) { - path = buildNormalizedPath(path); - auto paths = pathSplitter(path); - - string prefix = ""; - foreach(base; paths) { - prefix ~= base; - if (!path.matchFirst(mask).empty) { - // the given path matches something which we should skip - return true; - } - prefix ~= dirSeparator; - } - return false; -} - -// unit tests -unittest -{ - assert(isPathExcluded("Documents2", ["Documents"])); - assert(!isPathExcluded("Documents", ["Documents"])); - assert(!isPathExcluded("Documents/a.txt", ["Documents"])); - assert(isPathExcluded("Hello/World", ["Hello/John"])); - assert(!isPathExcluded(".", ["Documents"])); -} diff --git a/src/sqlite.d b/src/sqlite.d index 5e1839ece..cd2053dc0 100644 --- a/src/sqlite.d +++ b/src/sqlite.d @@ -1,27 +1,29 @@ +// What is this module called? module sqlite; + +// What does this module require to function? import std.stdio; import etc.c.sqlite3; import std.string: fromStringz, toStringz; import core.stdc.stdlib; import std.conv; -static import log; + +// What other modules that we have created do we need to import? +import log; extern (C) immutable(char)* sqlite3_errstr(int); // missing from the std library -static this() -{ +static this() { if (sqlite3_libversion_number() < 3006019) { throw new SqliteException("sqlite 3.6.19 or newer is required"); } } -private string ifromStringz(const(char)* cstr) -{ +private string ifromStringz(const(char)* cstr) { return fromStringz(cstr).dup; } -class SqliteException: Exception -{ +class SqliteException: Exception { @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__, Throwable next = null) { super(msg, file, line, next); @@ -33,68 +35,67 @@ class SqliteException: Exception } } -struct Database -{ +struct Database { private sqlite3* pDb; - this(const(char)[] filename) - { + this(const(char)[] filename) { open(filename); } - ~this() - { + ~this() { close(); } - int db_checkpoint() - { + int db_checkpoint() { return sqlite3_wal_checkpoint(pDb, null); } - void dump_open_statements() - { - log.log("Dumpint open statements: \n"); + void dump_open_statements() { + addLogEntry("Dumping open statements:", ["debug"]); auto p = sqlite3_next_stmt(pDb, null); while (p != null) { - log.log (" - " ~ ifromStringz(sqlite3_sql(p)) ~ "\n"); + addLogEntry(" - " ~ to!string(ifromStringz(sqlite3_sql(p)))); p = sqlite3_next_stmt(pDb, p); } } - void open(const(char)[] filename) - { + void open(const(char)[] filename) { // https://www.sqlite.org/c3ref/open.html int rc = sqlite3_open(toStringz(filename), &pDb); if (rc == SQLITE_CANTOPEN) { // Database cannot be opened - log.error("\nThe database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3\n"); + addLogEntry(); + addLogEntry("The database cannot be opened. Please check the permissions of " ~ to!string(filename)); + addLogEntry(); close(); exit(-1); } if (rc != SQLITE_OK) { - log.error("\nA database access error occurred: " ~ getErrorMessage() ~ "\n"); + addLogEntry(); + addLogEntry("A database access error occurred: " ~ getErrorMessage()); + addLogEntry(); close(); exit(-1); } sqlite3_extended_result_codes(pDb, 1); // always use extended result codes } - void exec(const(char)[] sql) - { + void exec(const(char)[] sql) { // https://www.sqlite.org/c3ref/exec.html int rc = sqlite3_exec(pDb, toStringz(sql), null, null, null); if (rc != SQLITE_OK) { - log.error("\nA database execution error occurred: "~ getErrorMessage() ~ "\n"); - log.error("Please retry your command with --resync to fix any local database corruption issues.\n"); + addLogEntry(); + addLogEntry("A database execution error occurred: "~ getErrorMessage()); + addLogEntry(); + addLogEntry("Please retry your command with --resync to fix any local database corruption issues."); + addLogEntry(); close(); exit(-1); } } - int getVersion() - { + int getVersion() { int userVersion; extern (C) int callback(void* user_version, int count, char** column_text, char** column_name) { import core.stdc.stdlib: atoi; @@ -107,20 +108,23 @@ struct Database } return userVersion; } + + int getThreadsafeValue() { + // Get the threadsafe value + auto threadsafeValue = sqlite3_threadsafe(); + return threadsafeValue; + } - string getErrorMessage() - { + string getErrorMessage() { return ifromStringz(sqlite3_errmsg(pDb)); } - void setVersion(int userVersion) - { + void setVersion(int userVersion) { import std.conv: to; exec("PRAGMA user_version=" ~ to!string(userVersion)); } - Statement prepare(const(char)[] zSql) - { + Statement prepare(const(char)[] zSql) { Statement s; // https://www.sqlite.org/c3ref/prepare.html int rc = sqlite3_prepare_v2(pDb, zSql.ptr, cast(int) zSql.length, &s.pStmt, null); @@ -130,46 +134,39 @@ struct Database return s; } - void close() - { + void close() { // https://www.sqlite.org/c3ref/close.html sqlite3_close_v2(pDb); pDb = null; } } -struct Statement -{ - struct Result - { +struct Statement { + struct Result { private sqlite3_stmt* pStmt; private const(char)[][] row; - private this(sqlite3_stmt* pStmt) - { + private this(sqlite3_stmt* pStmt) { this.pStmt = pStmt; step(); // initialize the range } - @property bool empty() - { + @property bool empty() { return row.length == 0; } - @property auto front() - { + @property auto front() { return row; } alias step popFront; - void step() - { + void step() { // https://www.sqlite.org/c3ref/step.html int rc = sqlite3_step(pStmt); if (rc == SQLITE_BUSY) { // Database is locked by another onedrive process - log.error("The database is currently locked by another process - cannot sync"); + addLogEntry("The database is currently locked by another process - cannot sync"); return; } if (rc == SQLITE_DONE) { @@ -185,8 +182,11 @@ struct Statement } } else { string errorMessage = ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt))); - log.error("\nA database statement execution error occurred: "~ errorMessage ~ "\n"); - log.error("Please retry your command with --resync to fix any local database corruption issues.\n"); + addLogEntry(); + addLogEntry("A database statement execution error occurred: "~ errorMessage); + addLogEntry(); + addLogEntry("Please retry your command with --resync to fix any local database corruption issues."); + addLogEntry(); exit(-1); } } @@ -194,14 +194,12 @@ struct Statement private sqlite3_stmt* pStmt; - ~this() - { + ~this() { // https://www.sqlite.org/c3ref/finalize.html sqlite3_finalize(pStmt); } - void bind(int index, const(char)[] value) - { + void bind(int index, const(char)[] value) { reset(); // https://www.sqlite.org/c3ref/bind_blob.html int rc = sqlite3_bind_text(pStmt, index, value.ptr, cast(int) value.length, SQLITE_STATIC); @@ -210,47 +208,16 @@ struct Statement } } - Result exec() - { + Result exec() { reset(); return Result(pStmt); } - private void reset() - { + private void reset() { // https://www.sqlite.org/c3ref/reset.html int rc = sqlite3_reset(pStmt); if (rc != SQLITE_OK) { throw new SqliteException(ifromStringz(sqlite3_errmsg(sqlite3_db_handle(pStmt)))); } } -} - -unittest -{ - auto db = Database(":memory:"); - db.exec("CREATE TABLE test( - id TEXT PRIMARY KEY, - value TEXT - )"); - - assert(db.getVersion() == 0); - db.setVersion(1); - assert(db.getVersion() == 1); - - auto s = db.prepare("INSERT INTO test VALUES (?, ?)"); - s.bind(1, "key1"); - s.bind(2, "value"); - s.exec(); - s.bind(1, "key2"); - s.bind(2, null); - s.exec(); - - s = db.prepare("SELECT * FROM test ORDER BY id ASC"); - auto r = s.exec(); - assert(r.front[0] == "key1"); - r.popFront(); - assert(r.front[1] == null); - r.popFront(); - assert(r.empty); -} +} \ No newline at end of file diff --git a/src/sync.d b/src/sync.d index 346d8c00c..05c9a5a8b 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1,3035 +1,2046 @@ +// What is this module called? +module syncEngine; + +// What does this module require to function? +import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; +import core.thread; +import core.time; import std.algorithm; -import std.array: array; -import std.datetime; -import std.exception: enforce; -import std.file, std.json, std.path; -import std.regex; -import std.stdio, std.string, std.uni, std.uri; +import std.array; +import std.concurrency; +import std.container.rbtree; import std.conv; +import std.datetime; import std.encoding; -import core.time, core.thread; -import core.stdc.stdlib; -import config, itemdb, onedrive, selective, upload, util; -static import log; - -// threshold after which files will be uploaded using an upload session -private long thresholdFileSize = 4 * 2^^20; // 4 MiB - -// flag to set whether local files should be deleted from OneDrive -private bool noRemoteDelete = false; - -// flag to set whether the local file should be deleted once it is successfully uploaded to OneDrive -private bool localDeleteAfterUpload = false; - -// flag to set if we are running as uploadOnly -private bool uploadOnly = false; - -// Do we configure to disable the upload validation routine -private bool disableUploadValidation = false; - -// Do we configure to disable the download validation routine -private bool disableDownloadValidation = false; - -// Do we perform a local cleanup of files that are 'extra' on the local file system, when using --download-only -private bool cleanupLocalFiles = false; - -private bool isItemFolder(const ref JSONValue item) -{ - return ("folder" in item) != null; -} - -private bool isItemFile(const ref JSONValue item) -{ - return ("file" in item) != null; -} - -private bool isItemDeleted(const ref JSONValue item) -{ - return ("deleted" in item) != null; -} - -private bool isItemRoot(const ref JSONValue item) -{ - return ("root" in item) != null; -} - -private bool isItemRemote(const ref JSONValue item) -{ - return ("remoteItem" in item) != null; -} - -private bool hasParentReference(const ref JSONValue item) -{ - return ("parentReference" in item) != null; -} - -private bool hasParentReferenceId(const ref JSONValue item) -{ - return ("id" in item["parentReference"]) != null; -} - -private bool hasParentReferencePath(const ref JSONValue item) -{ - return ("path" in item["parentReference"]) != null; -} +import std.exception; +import std.file; +import std.json; +import std.parallelism; +import std.path; +import std.range; +import std.regex; +import std.stdio; +import std.string; +import std.uni; +import std.uri; +import std.utf; +import std.math; -private bool isMalware(const ref JSONValue item) -{ - return ("malware" in item) != null; -} +// What other modules that we have created do we need to import? +import config; +import log; +import util; +import onedrive; +import itemdb; +import clientSideFiltering; -private bool hasFileSize(const ref JSONValue item) -{ - return ("size" in item) != null; +class jsonResponseException: Exception { + @safe pure this(string inputMessage) { + string msg = format(inputMessage); + super(msg); + } } -private bool hasId(const ref JSONValue item) -{ - return ("id" in item) != null; +class posixException: Exception { + @safe pure this(string localTargetName, string remoteTargetName) { + string msg = format("POSIX 'case-insensitive match' between '%s' (local) and '%s' (online) which violates the Microsoft OneDrive API namespace convention", localTargetName, remoteTargetName); + super(msg); + } } -private bool hasHashes(const ref JSONValue item) -{ - return ("hashes" in item["file"]) != null; +class accountDetailsException: Exception { + @safe pure this() { + string msg = format("Unable to query OneDrive API to obtain required account details"); + super(msg); + } } -private bool hasQuickXorHash(const ref JSONValue item) -{ - return ("quickXorHash" in item["file"]["hashes"]) != null; +class SyncException: Exception { + @nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__) { + super(msg, file, line); + } } -private bool hasSHA256Hash(const ref JSONValue item) -{ - return ("sha256Hash" in item["file"]["hashes"]) != null; +void forceExit() { + // Allow logging to flush and complete + Thread.sleep(dur!("msecs")(500)); + // Force Exit + exit(EXIT_FAILURE); } -private bool isDotFile(const(string) path) -{ - // always allow the root - if (path == ".") return false; - auto paths = pathSplitter(buildNormalizedPath(path)); - foreach(base; paths) { - if (startsWith(base, ".")){ - return true; +class SyncEngine { + // Class Variables + ApplicationConfig appConfig; + OneDriveApi oneDriveApiInstance; + ItemDatabase itemDB; + ClientSideFiltering selectiveSync; + + // Array of directory databaseItem.id to skip while applying the changes. + // These are the 'parent path' id's that are being excluded, so if the parent id is in here, the child needs to be skipped as well + RedBlackTree!string skippedItems = redBlackTree!string(); + // Array of databaseItem.id to delete after the changes have been downloaded + string[2][] idsToDelete; + // Array of JSON items which are files or directories that are not 'root', skipped or to be deleted, that need to be processed + JSONValue[] jsonItemsToProcess; + // Array of JSON items which are files that are not 'root', skipped or to be deleted, that need to be downloaded + JSONValue[] fileJSONItemsToDownload; + // Array of paths that failed to download + string[] fileDownloadFailures; + // Array of all OneDrive driveId's that have been seen + string[] driveIDsArray; + // List of items we fake created when using --dry-run + string[2][] idsFaked; + // List of paths we fake deleted when using --dry-run + string[] pathFakeDeletedArray; + // Array of database Parent Item ID, Item ID & Local Path where the content has changed and needs to be uploaded + string[3][] databaseItemsWhereContentHasChanged; + // Array of local file paths that need to be uploaded as new itemts to OneDrive + string[] newLocalFilesToUploadToOneDrive; + // Array of local file paths that failed to be uploaded to OneDrive + string[] fileUploadFailures; + // List of path names changed online, but not changed locally when using --dry-run + string[] pathsRenamed; + // List of paths that were a POSIX case-insensitive match, thus could not be created online + string[] posixViolationPaths; + // List of local paths, that, when using the OneDrive Business Shared Folders feature, then diabling it, folder still exists locally and online + // This list of local paths need to be skipped + string[] businessSharedFoldersOnlineToSkip; + // List of interrupted uploads session files that need to be resumed + string[] interruptedUploadsSessionFiles; + // List of validated interrupted uploads session JSON items to resume + JSONValue[] jsonItemsToResumeUpload; + + // Flag that there were upload or download failures listed + bool syncFailures = false; + // Is sync_list configured + bool syncListConfigured = false; + // Was --dry-run used? + bool dryRun = false; + // Was --upload-only used? + bool uploadOnly = false; + // Was --remove-source-files used? + // Flag to set whether the local file should be deleted once it is successfully uploaded to OneDrive + bool localDeleteAfterUpload = false; + + // Do we configure to disable the download validation routine due to --disable-download-validation + // We will always validate our downloads + // However, when downloading files from SharePoint, the OneDrive API will not advise the correct file size + // which means that the application thinks the file download has failed as the size is different / hash is different + // See: https://github.com/abraunegg/onedrive/discussions/1667 + bool disableDownloadValidation = false; + + // Do we configure to disable the upload validation routine due to --disable-upload-validation + // We will always validate our uploads + // However, when uploading a file that can contain metadata SharePoint will associate some + // metadata from the library the file is uploaded to directly in the file which breaks this validation. + // See: https://github.com/abraunegg/onedrive/issues/205 + // See: https://github.com/OneDrive/onedrive-api-docs/issues/935 + bool disableUploadValidation = false; + + // Do we perform a local cleanup of files that are 'extra' on the local file system, when using --download-only + bool cleanupLocalFiles = false; + // Are we performing a --single-directory sync ? + bool singleDirectoryScope = false; + string singleDirectoryScopeDriveId; + string singleDirectoryScopeItemId; + // Is National Cloud Deployments configured ? + bool nationalCloudDeployment = false; + // Do we configure not to perform a remote file delete if --upload-only & --no-remote-delete configured + bool noRemoteDelete = false; + // Is bypass_data_preservation set via config file + // Local data loss MAY occur in this scenario + bool bypassDataPreservation = false; + // Maximum file size upload + // https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us + // July 2020, maximum file size for all accounts is 100GB + // January 2021, maximum file size for all accounts is 250GB + ulong maxUploadFileSize = 268435456000; // 250GB + // Threshold after which files will be uploaded using an upload session + ulong sessionThresholdFileSize = 4 * 2^^20; // 4 MiB + // File size limit for file operations that the user has configured + ulong fileSizeLimit; + // Total data to upload + ulong totalDataToUpload; + // How many items have been processed for the active operation + ulong processedCount; + // Are we creating a simulated /delta response? This is critically important in terms of how we 'update' the database + bool generateSimulatedDeltaResponse = false; + // Store the latest DeltaLink + string latestDeltaLink; + + // Configure this class instance + this(ApplicationConfig appConfig, ItemDatabase itemDB, ClientSideFiltering selectiveSync) { + // Configure the class varaible to consume the application configuration + this.appConfig = appConfig; + // Configure the class varaible to consume the database configuration + this.itemDB = itemDB; + // Configure the class variable to consume the selective sync (skip_dir, skip_file and sync_list) configuration + this.selectiveSync = selectiveSync; + + // Configure the dryRun flag to capture if --dry-run was used + // Application startup already flagged we are also in a --dry-run state, so no need to output anything else here + this.dryRun = appConfig.getValueBool("dry_run"); + + // Configure file size limit + if (appConfig.getValueLong("skip_size") != 0) { + fileSizeLimit = appConfig.getValueLong("skip_size") * 2^^20; + fileSizeLimit = (fileSizeLimit == 0) ? ulong.max : fileSizeLimit; } - } - return false; -} - -// construct an Item struct from a JSON driveItem -private Item makeDatabaseItem(const ref JSONValue driveItem) -{ - Item item = { - id: driveItem["id"].str, - name: "name" in driveItem ? driveItem["name"].str : null, // name may be missing for deleted files in OneDrive Biz - eTag: "eTag" in driveItem ? driveItem["eTag"].str : null, // eTag is not returned for the root in OneDrive Biz - cTag: "cTag" in driveItem ? driveItem["cTag"].str : null, // cTag is missing in old files (and all folders in OneDrive Biz) - }; - - // OneDrive API Change: https://github.com/OneDrive/onedrive-api-docs/issues/834 - // OneDrive no longer returns lastModifiedDateTime if the item is deleted by OneDrive - if(isItemDeleted(driveItem)){ - // Set mtime to SysTime(0) - item.mtime = SysTime(0); - } else { - // Item is not in a deleted state - // Resolve 'Key not found: fileSystemInfo' when then item is a remote item - // https://github.com/abraunegg/onedrive/issues/11 - if (isItemRemote(driveItem)) { - // remoteItem is a OneDrive object that exists on a 'different' OneDrive drive id, when compared to account default - // Normally, the 'remoteItem' field will contain 'fileSystemInfo' however, if the user uses the 'Add Shortcut ..' option in OneDrive WebUI - // to create a 'link', this object, whilst remote, does not have 'fileSystemInfo' in the expected place, thus leading to a application crash - // See: https://github.com/abraunegg/onedrive/issues/1533 - if ("fileSystemInfo" in driveItem["remoteItem"]) { - // 'fileSystemInfo' is in 'remoteItem' which will be the majority of cases - item.mtime = SysTime.fromISOExtString(driveItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); - } else { - // is a remote item, but 'fileSystemInfo' is missing from 'remoteItem' - item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); - } - } else { - // item exists on account default drive id - item.mtime = SysTime.fromISOExtString(driveItem["fileSystemInfo"]["lastModifiedDateTime"].str); + + // Is there a sync_list file present? + if (exists(appConfig.syncListFilePath)) this.syncListConfigured = true; + + // Configure the uploadOnly flag to capture if --upload-only was used + if (appConfig.getValueBool("upload_only")) { + addLogEntry("Configuring uploadOnly flag to TRUE as --upload-only passed in or configured", ["debug"]); + this.uploadOnly = true; } - } - if (isItemFile(driveItem)) { - item.type = ItemType.file; - } else if (isItemFolder(driveItem)) { - item.type = ItemType.dir; - } else if (isItemRemote(driveItem)) { - item.type = ItemType.remote; - } else { - // do not throw exception, item will be removed in applyDifferences() - } - - // root and remote items do not have parentReference - if (!isItemRoot(driveItem) && ("parentReference" in driveItem) != null) { - item.driveId = driveItem["parentReference"]["driveId"].str; - if (hasParentReferenceId(driveItem)) { - item.parentId = driveItem["parentReference"]["id"].str; + // Configure the localDeleteAfterUpload flag + if (appConfig.getValueBool("remove_source_files")) { + addLogEntry("Configuring localDeleteAfterUpload flag to TRUE as --remove-source-files passed in or configured", ["debug"]); + this.localDeleteAfterUpload = true; } - } - - // extract the file hash - if (isItemFile(driveItem) && ("hashes" in driveItem["file"])) { - // Get quickXorHash - if ("quickXorHash" in driveItem["file"]["hashes"]) { - item.quickXorHash = driveItem["file"]["hashes"]["quickXorHash"].str; - } else { - log.vdebug("quickXorHash is missing from ", driveItem["id"].str); + + // Configure the disableDownloadValidation flag + if (appConfig.getValueBool("disable_download_validation")) { + addLogEntry("Configuring disableDownloadValidation flag to TRUE as --disable-download-validation passed in or configured", ["debug"]); + this.disableDownloadValidation = true; + } + + // Configure the disableUploadValidation flag + if (appConfig.getValueBool("disable_upload_validation")) { + addLogEntry("Configuring disableUploadValidation flag to TRUE as --disable-upload-validation passed in or configured", ["debug"]); + this.disableUploadValidation = true; + } + + // Do we configure to clean up local files if using --download-only ? + if ((appConfig.getValueBool("download_only")) && (appConfig.getValueBool("cleanup_local_files"))) { + // --download-only and --cleanup-local-files were passed in + addLogEntry("WARNING: Application has been configured to cleanup local files that are not present online."); + addLogEntry("WARNING: Local data loss MAY occur in this scenario if you are expecting data to remain archived locally."); + // Set the flag + this.cleanupLocalFiles = true; + } + + // Do we configure to NOT perform a remote delete if --upload-only & --no-remote-delete configured ? + if ((appConfig.getValueBool("upload_only")) && (appConfig.getValueBool("no_remote_delete"))) { + // --upload-only and --no-remote-delete were passed in + addLogEntry("WARNING: Application has been configured NOT to cleanup remote files that are deleted locally."); + // Set the flag + this.noRemoteDelete = true; + } + + // Are we forcing to use /children scan instead of /delta to simulate National Cloud Deployment use of /children? + if (appConfig.getValueBool("force_children_scan")) { + addLogEntry("Forcing client to use /children API call rather than /delta API to retrieve objects from the OneDrive API"); + this.nationalCloudDeployment = true; + } + + // Are we forcing the client to bypass any data preservation techniques to NOT rename any local files if there is a conflict? + // The enabling of this function could lead to data loss + if (appConfig.getValueBool("bypass_data_preservation")) { + addLogEntry("WARNING: Application has been configured to bypass local data preservation in the event of file conflict."); + addLogEntry("WARNING: Local data loss MAY occur in this scenario."); + this.bypassDataPreservation = true; + } + + // Did the user configure a specific rate limit for the application? + if (appConfig.getValueLong("rate_limit") > 0) { + // User configured rate limit + addLogEntry("User Configured Rate Limit: " ~ to!string(appConfig.getValueLong("rate_limit"))); + + // If user provided rate limit is < 131072, flag that this is too low, setting to the recommended minimum of 131072 + if (appConfig.getValueLong("rate_limit") < 131072) { + // user provided limit too low + addLogEntry("WARNING: User configured rate limit too low for normal application processing and preventing application timeouts. Overriding to recommended minimum of 131072 (128KB/s)"); + appConfig.setValueLong("rate_limit", 131072); + } } - // sha256Hash - if ("sha256Hash" in driveItem["file"]["hashes"]) { - item.sha256Hash = driveItem["file"]["hashes"]["sha256Hash"].str; + + // Did the user downgrade all HTTP operations to force HTTP 1.1 + if (appConfig.getValueBool("force_http_11")) { + // User is forcing downgrade to curl to use HTTP 1.1 for all operations + addLogEntry("Downgrading all HTTP operations to HTTP/1.1 due to user configuration", ["verbose"]); } else { - log.vdebug("sha256Hash is missing from ", driveItem["id"].str); + // Use curl defaults + addLogEntry("Using Curl defaults for HTTP operational protocol version (potentially HTTP/2)", ["debug"]); } - } - - if (isItemRemote(driveItem)) { - item.remoteDriveId = driveItem["remoteItem"]["parentReference"]["driveId"].str; - item.remoteId = driveItem["remoteItem"]["id"].str; - } - - // National Cloud Deployments do not support /delta as a query - // Thus we need to track in the database that this item is in sync - // As we are making an item, set the syncStatus to Y - // ONLY when using a National Cloud Deployment, all the existing DB entries will get set to N - // so when processing /children, it can be identified what the 'deleted' difference is - item.syncStatus = "Y"; - - return item; -} - -private bool testFileHash(const(string) path, const ref Item item) -{ - // Generate QuickXORHash first before others - if (item.quickXorHash) { - if (item.quickXorHash == computeQuickXorHash(path)) return true; - } else if (item.sha256Hash) { - if (item.sha256Hash == computeSHA256Hash(path)) return true; } - return false; -} - -class SyncException: Exception -{ - @nogc @safe pure nothrow this(string msg, string file = __FILE__, size_t line = __LINE__) - { - super(msg, file, line); - } -} - -final class SyncEngine -{ - private Config cfg; - private OneDriveApi onedrive; - private ItemDatabase itemdb; - private UploadSession session; - private SelectiveSync selectiveSync; - // list of items to skip while applying the changes - private string[] skippedItems; - // list of items to delete after the changes has been downloaded - private string[2][] idsToDelete; - // list of items we fake created when using --dry-run - private string[2][] idsFaked; - // list of directory names changed online, but not changed locally when using --dry-run - private string[] pathsRenamed; - // default drive id - private string defaultDriveId; - // default root id - private string defaultRootId; - // type of OneDrive account - private string accountType; - // free space remaining at init() - private long remainingFreeSpace; - // file size limit for a new file - private long newSizeLimit; - // is file malware flag - private bool malwareDetected = false; - // download filesystem issue flag - private bool downloadFailed = false; - // upload failure - OneDrive or filesystem issue (reading data) - private bool uploadFailed = false; - // initialization has been done - private bool initDone = false; - // sync engine dryRun flag - private bool dryRun = false; - // quota details available - private bool quotaAvailable = true; - // quota details restricted - private bool quotaRestricted = false; - // sync business shared folders flag - private bool syncBusinessFolders = false; - // single directory scope flag - private bool singleDirectoryScope = false; - // is sync_list configured - private bool syncListConfigured = false; - // sync_list new folder added, trigger delta scan override - private bool oneDriveFullScanTrigger = false; - // is bypass_data_preservation set via config file - // Local data loss MAY occur in this scenario - private bool bypassDataPreservation = false; - // is National Cloud Deployments configured - private bool nationalCloudDeployment = false; - // has performance processing timings been requested - private bool displayProcessingTime = false; - // array of all OneDrive driveId's for use with OneDrive Business Folders - private string[] driveIDsArray; - this(Config cfg, OneDriveApi onedrive, ItemDatabase itemdb, SelectiveSync selectiveSync) - { - assert(onedrive && itemdb && selectiveSync); - this.cfg = cfg; - this.onedrive = onedrive; - this.itemdb = itemdb; - this.selectiveSync = selectiveSync; - // session = UploadSession(onedrive, cfg.uploadStateFilePath); - this.dryRun = cfg.getValueBool("dry_run"); - this.newSizeLimit = cfg.getValueLong("skip_size") * 2^^20; - this.newSizeLimit = (this.newSizeLimit == 0) ? long.max : this.newSizeLimit; - } + // Initialise the Sync Engine class + bool initialise() { - void reset() - { - initDone=false; - } - - void init() - { - // Set accountType, defaultDriveId, defaultRootId & remainingFreeSpace once and reuse where possible - JSONValue oneDriveDetails; - JSONValue oneDriveRootDetails; - - if (initDone) { - return; - } - - session = UploadSession(onedrive, cfg.uploadStateFilePath); - - // Need to catch 400 or 5xx server side errors at initialization - // Get Default Drive - try { - oneDriveDetails = onedrive.getDefaultDrive(); - } catch (OneDriveException e) { - log.vdebug("oneDriveDetails = onedrive.getDefaultDrive() generated a OneDriveException"); - if (e.httpStatusCode == 400) { - // OneDrive responded with 400 error: Bad Request - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - - // Check this - if (cfg.getValueString("drive_id").length) { - writeln(); - log.error("ERROR: Check your 'drive_id' entry in your configuration file as it may be incorrect"); - writeln(); - } - // Must exit here - onedrive.shutdown(); - exit(-1); + // create a new instance of the OneDrive API + oneDriveApiInstance = new OneDriveApi(appConfig); + if (oneDriveApiInstance.initialise()) { + try { + // Get the relevant default account & drive details + getDefaultDriveDetails(); + } catch (accountDetailsException exception) { + // details could not be queried + addLogEntry(exception.msg); + // Shutdown API instance + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); + // Must force exit here, allow logging to be done + forceExit(); } - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling init();"); - init(); - // return back to original call - return; + + try { + // Get the relevant default account & drive details + getDefaultRootDetails(); + } catch (accountDetailsException exception) { + // details could not be queried + addLogEntry(exception.msg); + // Shutdown API instance + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); + // Must force exit here, allow logging to be done + forceExit(); } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + + try { + // Display details + displaySyncEngineDetails(); + } catch (accountDetailsException exception) { + // details could not be queried + addLogEntry(exception.msg); + // Shutdown API instance + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); + // Must force exit here, allow logging to be done + forceExit(); } + } else { + // API could not be initialised + addLogEntry("OneDrive API could not be initialised with previously used details"); + // Shutdown API instance + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); + // Must force exit here, allow logging to be done + forceExit(); } - // Get Default Root + // API was initialised + addLogEntry("Sync Engine Initialised with new Onedrive API instance", ["verbose"]); + + // Shutdown this API instance, as we will create API instances as required, when required + oneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(oneDriveApiInstance); + return true; + } + + // Get Default Drive Details for this Account + void getDefaultDriveDetails() { + + // Function variables + JSONValue defaultOneDriveDriveDetails; + + // Get Default Drive Details for this Account try { - oneDriveRootDetails = onedrive.getDefaultRoot(); - } catch (OneDriveException e) { - log.vdebug("oneDriveRootDetails = onedrive.getDefaultRoot() generated a OneDriveException"); - if (e.httpStatusCode == 400) { - // OneDrive responded with 400 error: Bad Request - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Check this - if (cfg.getValueString("drive_id").length) { - writeln(); - log.error("ERROR: Check your 'drive_id' entry in your configuration file as it may be incorrect"); - writeln(); - } - // Must exit here - onedrive.shutdown(); - exit(-1); - } - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling init();"); - init(); - // return back to original call - return; - } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + addLogEntry("Getting Account Default Drive Details", ["debug"]); + defaultOneDriveDriveDetails = oneDriveApiInstance.getDefaultDriveDetails(); + } catch (OneDriveException exception) { + addLogEntry("defaultOneDriveDriveDetails = oneDriveApiInstance.getDefaultDriveDetails() generated a OneDriveException", ["debug"]); + string thisFunctionName = getFunctionName!({}); + + if ((exception.httpStatusCode == 400) || (exception.httpStatusCode == 401)) { + // Handle the 400 | 401 error + handleClientUnauthorised(exception.httpStatusCode, exception.msg); } - } - - if ((oneDriveDetails.type() == JSONType.object) && (oneDriveRootDetails.type() == JSONType.object) && (hasId(oneDriveDetails)) && (hasId(oneDriveRootDetails))) { - // JSON elements are valid - // Debug OneDrive Account details response - log.vdebug("OneDrive Account Details: ", oneDriveDetails); - log.vdebug("OneDrive Account Root Details: ", oneDriveRootDetails); - - // Successfully got details from OneDrive without a server side error such as 'HTTP/1.1 500 Internal Server Error' or 'HTTP/1.1 504 Gateway Timeout' - accountType = oneDriveDetails["driveType"].str; - defaultDriveId = oneDriveDetails["id"].str; - defaultRootId = oneDriveRootDetails["id"].str; - - // get the remaining size from OneDrive API - if ("remaining" in oneDriveDetails["quota"]){ - // use the value provided - remainingFreeSpace = oneDriveDetails["quota"]["remaining"].integer; + + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(oneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) ||(exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query Account Default Drive Details - retrying applicable request in 30 seconds"); + addLogEntry("defaultOneDriveDriveDetails = oneDriveApiInstance.getDefaultDriveDetails() previously threw an error - retrying", ["debug"]); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429 and 504 - but loop back calling this function + addLogEntry("Retrying Function: getDefaultDriveDetails()", ["debug"]); + getDefaultDriveDetails(); } else { - // set at zero - remainingFreeSpace = 0; + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } + } + + // If the JSON response is a correct JSON object, and has an 'id' we can set these details + if ((defaultOneDriveDriveDetails.type() == JSONType.object) && (hasId(defaultOneDriveDriveDetails))) { + addLogEntry("OneDrive Account Default Drive Details: " ~ to!string(defaultOneDriveDriveDetails), ["debug"]); + appConfig.accountType = defaultOneDriveDriveDetails["driveType"].str; + appConfig.defaultDriveId = defaultOneDriveDriveDetails["id"].str; - // Make sure that defaultDriveId is in our driveIDs array to use when checking if item is in database - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, defaultDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= defaultDriveId; + // Get the initial remaining size from OneDrive API response JSON + // This will be updated as we upload data to OneDrive + if (hasQuota(defaultOneDriveDriveDetails)) { + if ("remaining" in defaultOneDriveDriveDetails["quota"]){ + // use the value provided + appConfig.remainingFreeSpace = defaultOneDriveDriveDetails["quota"]["remaining"].integer; + } } // In some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero - if (remainingFreeSpace <= 0) { + if (appConfig.remainingFreeSpace <= 0) { // free space is <= 0 .. why ? - if ("remaining" in oneDriveDetails["quota"]){ - // json response contained a 'remaining' value - if (accountType == "personal"){ + if ("remaining" in defaultOneDriveDriveDetails["quota"]) { + if (appConfig.accountType == "personal") { // zero space available - log.error("ERROR: OneDrive account currently has zero space available. Please free up some space online."); - quotaAvailable = false; + addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online."); + appConfig.quotaAvailable = false; } else { // zero space available is being reported, maybe being restricted? - log.error("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRestricted = true; + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); + appConfig.quotaRestricted = true; } } else { // json response was missing a 'remaining' value - if (accountType == "personal"){ - log.error("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online."); - quotaAvailable = false; + if (appConfig.accountType == "personal") { + addLogEntry("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online."); + appConfig.quotaAvailable = false; } else { // quota details not available - log.error("ERROR: OneDrive quota information is being restricted. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRestricted = true; - } + addLogEntry("ERROR: OneDrive quota information is being restricted. Please fix by speaking to your OneDrive / Office 365 Administrator."); + appConfig.quotaRestricted = true; + } } } + // What did we set based on the data from the JSON + addLogEntry("appConfig.accountType = " ~ appConfig.accountType, ["debug"]); + addLogEntry("appConfig.defaultDriveId = " ~ appConfig.defaultDriveId, ["debug"]); + addLogEntry("appConfig.remainingFreeSpace = " ~ to!string(appConfig.remainingFreeSpace), ["debug"]); + addLogEntry("appConfig.quotaAvailable = " ~ appConfig.quotaAvailable, ["debug"]); + addLogEntry("appConfig.quotaRestricted = " ~ appConfig.quotaRestricted, ["debug"]); - // Display accountType, defaultDriveId, defaultRootId & remainingFreeSpace for verbose logging purposes - log.vlog("Application version: ", strip(import("version"))); - log.vlog("Account Type: ", accountType); - log.vlog("Default Drive ID: ", defaultDriveId); - log.vlog("Default Root ID: ", defaultRootId); + // Make sure that appConfig.defaultDriveId is in our driveIDs array to use when checking if item is in database + // Keep the driveIDsArray with unique entries only + if (!canFind(driveIDsArray, appConfig.defaultDriveId)) { + // Add this drive id to the array to search with + driveIDsArray ~= appConfig.defaultDriveId; + } + } else { + // Handle the invalid JSON response + throw new accountDetailsException(); + } + } + + // Get Default Root Details for this Account + void getDefaultRootDetails() { + + // Function variables + JSONValue defaultOneDriveRootDetails; + + // Get Default Root Details for this Account + try { + addLogEntry("Getting Account Default Root Details", ["debug"]); + defaultOneDriveRootDetails = oneDriveApiInstance.getDefaultRootDetails(); + } catch (OneDriveException exception) { + addLogEntry("defaultOneDriveRootDetails = oneDriveApiInstance.getDefaultRootDetails() generated a OneDriveException", ["debug"]); + string thisFunctionName = getFunctionName!({}); + + if ((exception.httpStatusCode == 400) || (exception.httpStatusCode == 401)) { + // Handle the 400 | 401 error + handleClientUnauthorised(exception.httpStatusCode, exception.msg); + } - // What do we display here - if (remainingFreeSpace > 0) { - // Display the actual value - log.vlog("Remaining Free Space: ", remainingFreeSpace); - } else { - // zero or non-zero value or restricted - if (!quotaRestricted){ - log.vlog("Remaining Free Space: 0"); - } else { - log.vlog("Remaining Free Space: Not Available"); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(oneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query Account Default Root Details - retrying applicable request in 30 seconds"); + addLogEntry("defaultOneDriveRootDetails = oneDriveApiInstance.getDefaultRootDetails() previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + getDefaultRootDetails(); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } + } - // If account type is documentLibrary - then most likely this is a SharePoint repository - // and files 'may' be modified after upload. See: https://github.com/abraunegg/onedrive/issues/205 - if(accountType == "documentLibrary") { - // set this flag for SharePoint regardless of --disable-upload-validation being used - setDisableUploadValidation(); + // If the JSON response is a correct JSON object, and has an 'id' we can set these details + if ((defaultOneDriveRootDetails.type() == JSONType.object) && (hasId(defaultOneDriveRootDetails))) { + addLogEntry("OneDrive Account Default Root Details: " ~ to!string(defaultOneDriveRootDetails), ["debug"]); + appConfig.defaultRootId = defaultOneDriveRootDetails["id"].str; + addLogEntry("appConfig.defaultRootId = " ~ appConfig.defaultRootId, ["debug"]); + + // Save the item to the database, so the account root drive is is always going to be present in the DB + saveItem(defaultOneDriveRootDetails); + } else { + // Handle the invalid JSON response + throw new accountDetailsException(); + } + } + + // Reset syncFailures to false + void resetSyncFailures() { + // Reset syncFailures to false if these are both empty + if (syncFailures) { + if ((fileDownloadFailures.empty) && (fileUploadFailures.empty)) { + addLogEntry("Resetting syncFailures = false"); + syncFailures = false; + } else { + addLogEntry("File activity array's not empty - not resetting syncFailures"); } + } + } + + // Perform a sync of the OneDrive Account + // - Query /delta + // - If singleDirectoryScope or nationalCloudDeployment is used we need to generate a /delta like response + // - Process changes (add, changes, moves, deletes) + // - Process any items to add (download data to local) + // - Detail any files that we failed to download + // - Process any deletes (remove local data) + void syncOneDriveAccountToLocalDisk() { + + // performFullScanTrueUp value + addLogEntry("Perform a Full Scan True-Up: " ~ appConfig.fullScanTrueUpRequired, ["debug"]); - // Check the local database to ensure the OneDrive Root details are in the database - checkDatabaseForOneDriveRoot(); + // Fetch the API response of /delta to track changes on OneDrive + fetchOneDriveDeltaAPIResponse(null, null, null); + // Process any download activities or cleanup actions + processDownloadActivities(); - // Check if there is an interrupted upload session - if (session.restore()) { - log.log("Continuing the upload session ..."); - string uploadSessionLocalFilePath = session.getUploadSessionLocalFilePath(); - auto item = session.upload(); + // If singleDirectoryScope is false, we are not targeting a single directory + // but if true, the target 'could' be a shared folder - so dont try and scan it again + if (!singleDirectoryScope) { + // OneDrive Shared Folder Handling + if (appConfig.accountType == "personal") { + // Personal Account Type + // https://github.com/OneDrive/onedrive-api-docs/issues/764 - // is 'item' a valid JSON response and not null - if (item.type() == JSONType.object) { - // Upload did not fail, JSON response contains data - // Are we in an --upload-only & --remove-source-files scenario? - // Use actual config values as we are doing an upload session recovery - if ((cfg.getValueBool("upload_only")) && (cfg.getValueBool("remove_source_files"))) { - // Log that we are deleting a local item - log.log("Removing local file as --upload-only & --remove-source-files configured"); - // are we in a --dry-run scenario? - if (!dryRun) { - // No --dry-run ... process local file delete - if (!uploadSessionLocalFilePath.empty) { - // only perform the delete if we have a valid file path - if (exists(uploadSessionLocalFilePath)) { - // file exists - log.vdebug("Removing local file: ", uploadSessionLocalFilePath); - safeRemove(uploadSessionLocalFilePath); - } + // Get the Remote Items from the Database + Item[] remoteItems = itemDB.selectRemoteItems(); + foreach (remoteItem; remoteItems) { + // Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty + if (appConfig.getValueString("skip_dir") != "") { + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched + if (selectiveSync.isDirNameExcluded(remoteItem.name)) { + // This directory name is excluded + addLogEntry("Skipping item - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]); + continue; + } + } + + // Directory name is not excluded or skip_dir is not populated + if (!appConfig.surpressLoggingOutput) { + addLogEntry("Syncing this OneDrive Personal Shared Folder: " ~ remoteItem.name); + } + // Check this OneDrive Personal Shared Folder for changes + fetchOneDriveDeltaAPIResponse(remoteItem.remoteDriveId, remoteItem.remoteId, remoteItem.name); + // Process any download activities or cleanup actions for this OneDrive Personal Shared Folder + processDownloadActivities(); + } + } else { + // Is this a Business Account with Sync Business Shared Items enabled? + if ((appConfig.accountType == "business") && ( appConfig.getValueBool("sync_business_shared_items"))) { + + // Business Account Shared Items Handling + // - OneDrive Business Shared Folder + // - OneDrive Business Shared Files ?? + // - SharePoint Links + + // Get the Remote Items from the Database + Item[] remoteItems = itemDB.selectRemoteItems(); + + foreach (remoteItem; remoteItems) { + // Check if this path is specifically excluded by 'skip_dir', but only if 'skip_dir' is not empty + if (appConfig.getValueString("skip_dir") != "") { + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched + if (selectiveSync.isDirNameExcluded(remoteItem.name)) { + // This directory name is excluded + addLogEntry("Skipping item - excluded by skip_dir config: " ~ remoteItem.name, ["verbose"]); + continue; } } - // as file is removed, we have nothing to add to the local database - log.vdebug("Skipping adding to database as --upload-only & --remove-source-files configured"); - } else { - // save the item - saveItem(item); + + // Directory name is not excluded or skip_dir is not populated + if (!appConfig.surpressLoggingOutput) { + addLogEntry("Syncing this OneDrive Business Shared Folder: " ~ remoteItem.name); + } + + // Debug log output + addLogEntry("Fetching /delta API response for:", ["debug"]); + addLogEntry(" remoteItem.remoteDriveId: " ~ remoteItem.remoteDriveId, ["debug"]); + addLogEntry(" remoteItem.remoteId: " ~ remoteItem.remoteId, ["debug"]); + + // Check this OneDrive Personal Shared Folder for changes + fetchOneDriveDeltaAPIResponse(remoteItem.remoteDriveId, remoteItem.remoteId, remoteItem.name); + + // Process any download activities or cleanup actions for this OneDrive Personal Shared Folder + processDownloadActivities(); } - } else { - // JSON response was not valid, upload failed - log.error("ERROR: File failed to upload. Increase logging verbosity to determine why."); } } - initDone = true; - } else { - // init failure - initDone = false; - // log why - log.error("ERROR: Unable to query OneDrive to initialize application"); - // Debug OneDrive Account details response - log.vdebug("OneDrive Account Details: ", oneDriveDetails); - log.vdebug("OneDrive Account Root Details: ", oneDriveRootDetails); - // Must exit here - onedrive.shutdown(); - exit(-1); } } - - // Configure uploadOnly if function is called - // By default, uploadOnly = false; - void setUploadOnly() - { - uploadOnly = true; - } - - // Configure noRemoteDelete if function is called - // By default, noRemoteDelete = false; - // Meaning we will process local deletes to delete item on OneDrive - void setNoRemoteDelete() - { - noRemoteDelete = true; - } - - // Configure localDeleteAfterUpload if function is called - // By default, localDeleteAfterUpload = false; - // Meaning we will not delete any local file after upload is successful - void setLocalDeleteAfterUpload() - { - localDeleteAfterUpload = true; - } - - // set the flag that we are going to sync business shared folders - void setSyncBusinessFolders() - { - syncBusinessFolders = true; - } - // Configure singleDirectoryScope if function is called + // Configure singleDirectoryScope = true if this function is called // By default, singleDirectoryScope = false - void setSingleDirectoryScope() - { + void setSingleDirectoryScope(string normalisedSingleDirectoryPath) { + + // Function variables + Item searchItem; + JSONValue onlinePathData; + + // Set the main flag singleDirectoryScope = true; - } - - // Configure disableUploadValidation if function is called - // By default, disableUploadValidation = false; - // Meaning we will always validate our uploads - // However, when uploading a file that can contain metadata SharePoint will associate some - // metadata from the library the file is uploaded to directly in the file - // which breaks this validation. See https://github.com/abraunegg/onedrive/issues/205 - void setDisableUploadValidation() - { - disableUploadValidation = true; - log.vdebug("documentLibrary account type - flagging to disable upload validation checks due to Microsoft SharePoint file modification enrichments"); - } - - // Configure disableDownloadValidation if function is called - // By default, disableDownloadValidation = false; - // Meaning we will always validate our downloads - // However, when downloading files from SharePoint, the OneDrive API will not advise the correct file size - // which means that the application thinks the file download has failed as the size is different / hash is different - // See: https://github.com/abraunegg/onedrive/discussions/1667 - void setDisableDownloadValidation() - { - disableDownloadValidation = true; - log.vdebug("Flagging to disable download validation checks due to user request"); - } - - // Issue #658 Handling - // If an existing folder is moved into a sync_list valid path (where it previously was out of scope due to sync_list), - // then set this flag to true, so that on the second 'true-up' sync, we force a rescan of the OneDrive path to capture any 'files' - void setOneDriveFullScanTrigger() - { - oneDriveFullScanTrigger = true; - log.vdebug("Setting oneDriveFullScanTrigger = true due to new folder creation request in a location that is now in-scope which may have previously out of scope"); - } - - // unset method - void unsetOneDriveFullScanTrigger() - { - oneDriveFullScanTrigger = false; - log.vdebug("Setting oneDriveFullScanTrigger = false"); - } - - // set syncListConfigured to true - void setSyncListConfigured() - { - syncListConfigured = true; - log.vdebug("Setting syncListConfigured = true"); - } - - // set bypassDataPreservation to true - void setBypassDataPreservation() - { - bypassDataPreservation = true; - log.vdebug("Setting bypassDataPreservation = true"); - } - - // set nationalCloudDeployment to true - void setNationalCloudDeployment() - { - nationalCloudDeployment = true; - log.vdebug("Setting nationalCloudDeployment = true"); - } - - // set performance timing flag - void setPerformanceProcessingOutput() - { - displayProcessingTime = true; - log.vdebug("Setting displayProcessingTime = true"); - } - - // get performance timing flag - bool getPerformanceProcessingOutput() - { - return displayProcessingTime; - } - // set cleanupLocalFiles to true - void setCleanupLocalFiles() - { - cleanupLocalFiles = true; - log.vdebug("Setting cleanupLocalFiles = true"); - } - - // return the OneDrive Account Type - auto getAccountType() - { - // return account type in use - return accountType; - } - - // download all new changes from OneDrive - void applyDifferences(bool performFullItemScan) - { - // Set defaults for the root folder - // Use the global's as initialised via init() rather than performing unnecessary additional HTTPS calls - string driveId = defaultDriveId; - string rootId = defaultRootId; - applyDifferences(driveId, rootId, performFullItemScan); - - // Check OneDrive Personal Shared Folders - if (accountType == "personal"){ - // https://github.com/OneDrive/onedrive-api-docs/issues/764 - Item[] items = itemdb.selectRemoteItems(); - foreach (item; items) { - // Only check path if config is != "" - if (cfg.getValueString("skip_dir") != "") { - // The path that needs to be checked needs to include the '/' - // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched - if (selectiveSync.isDirNameExcluded(item.name)) { - // This directory name is excluded - log.vlog("Skipping item - excluded by skip_dir config: ", item.name); - continue; - } - } - // Directory name is not excluded or skip_dir is not populated - log.vdebug("------------------------------------------------------------------"); - if (!cfg.getValueBool("monitor")) { - log.log("Syncing this OneDrive Personal Shared Folder: ", item.name); - } else { - log.vlog("Syncing this OneDrive Personal Shared Folder: ", item.name); - } - // Check this OneDrive Personal Shared Folders - applyDifferences(item.remoteDriveId, item.remoteId, performFullItemScan); - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, item.remoteDriveId)) { - // Add this OneDrive Personal Shared Folder driveId array - driveIDsArray ~= item.remoteDriveId; - } - } + // What are we doing? + addLogEntry("The OneDrive Client was asked to search for this directory online and create it if it's not located: " ~ normalisedSingleDirectoryPath); + + // Query the OneDrive API for the specified path online + // In a --single-directory scenario, we need to travervse the entire path that we are wanting to sync + // and then check the path element does it exist online, if it does, is it a POSIX match, or if it does not, create the path + // Once we have searched online, we have the right drive id and item id so that we can downgrade the sync status, then build up + // any object items from that location + // This is because, in a --single-directory scenario, any folder in the entire path tree could be a 'case-insensitive match' + + try { + onlinePathData = queryOneDriveForSpecificPathAndCreateIfMissing(normalisedSingleDirectoryPath, true); + } catch (posixException e) { + displayPosixErrorMessage(e.msg); + addLogEntry("ERROR: Requested directory to search for and potentially create has a 'case-insensitive match' to an existing directory on OneDrive online."); } - // Check OneDrive Business Shared Folders, if configured to do so - if (syncBusinessFolders){ - // query OneDrive Business Shared Folders shared with me - log.vlog("Attempting to sync OneDrive Business Shared Folders"); - JSONValue graphQuery; - try { - graphQuery = onedrive.getSharedWithMe(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - graphQuery = onedrive.getSharedWithMe();"); - graphQuery = onedrive.getSharedWithMe(); + // Was a valid JSON response provided? + if (onlinePathData.type() == JSONType.object) { + // Valid JSON item was returned + searchItem = makeItem(onlinePathData); + addLogEntry("searchItem: " ~ to!string(searchItem), ["debug"]); + + // Is this item a potential Shared Folder? + // Is this JSON a remote object + if (isItemRemote(onlinePathData)) { + // The path we are seeking is remote to our account drive id + searchItem.driveId = onlinePathData["remoteItem"]["parentReference"]["driveId"].str; + searchItem.id = onlinePathData["remoteItem"]["id"].str; + } + + // Set these items so that these can be used as required + singleDirectoryScopeDriveId = searchItem.driveId; + singleDirectoryScopeItemId = searchItem.id; + } else { + addLogEntry(); + addLogEntry("The requested --single-directory path to sync has generated an error. Please correct this error and try again."); + addLogEntry(); + Thread.sleep(dur!("msecs")(500)); + exit(EXIT_FAILURE); + } + } + + // Query OneDrive API for /delta changes and iterate through items online + void fetchOneDriveDeltaAPIResponse(string driveIdToQuery = null, string itemIdToQuery = null, string sharedFolderName = null) { + + string deltaLink = null; + string currentDeltaLink = null; + string deltaLinkAvailable; + JSONValue deltaChanges; + ulong responseBundleCount; + ulong jsonItemsReceived = 0; + + // Reset jsonItemsToProcess & processedCount + jsonItemsToProcess = []; + processedCount = 0; + + // Was a driveId provided as an input + //if (driveIdToQuery == "") { + if (strip(driveIdToQuery).empty) { + // No provided driveId to query, use the account default + addLogEntry("driveIdToQuery was empty, setting to appConfig.defaultDriveId", ["debug"]); + driveIdToQuery = appConfig.defaultDriveId; + addLogEntry("driveIdToQuery: " ~ driveIdToQuery, ["debug"]); + } + + // Was an itemId provided as an input + //if (itemIdToQuery == "") { + if (strip(itemIdToQuery).empty) { + // No provided itemId to query, use the account default + addLogEntry("itemIdToQuery was empty, setting to appConfig.defaultRootId", ["debug"]); + itemIdToQuery = appConfig.defaultRootId; + addLogEntry("itemIdToQuery: " ~ itemIdToQuery, ["debug"]); + } + + // What OneDrive API query do we use? + // - Are we running against a National Cloud Deployments that does not support /delta ? + // National Cloud Deployments do not support /delta as a query + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // + // - Are we performing a --single-directory sync, which will exclude many items online, focusing in on a specific online directory + // + // - Are we performing a --download-only --cleanup-local-files action? + // - If we are, and we use a normal /delta query, we get all the local 'deleted' objects as well. + // - If the user deletes a folder online, then replaces it online, we download the deletion events and process the new 'upload' via the web iterface .. + // the net effect of this, is that the valid local files we want to keep, are actually deleted ...... not desirable + if ((singleDirectoryScope) || (nationalCloudDeployment) || (cleanupLocalFiles)) { + // Generate a simulated /delta response so that we correctly capture the current online state, less any 'online' delete and replace activity + generateSimulatedDeltaResponse = true; + } + + // What /delta query do we use? + if (!generateSimulatedDeltaResponse) { + // This should be the majority default pathway application use + // Get the current delta link from the database for this DriveID and RootID + deltaLinkAvailable = itemDB.getDeltaLink(driveIdToQuery, itemIdToQuery); + if (!deltaLinkAvailable.empty) { + addLogEntry("Using database stored deltaLink", ["debug"]); + currentDeltaLink = deltaLinkAvailable; + } + + // Do we need to perform a Full Scan True Up? Is 'appConfig.fullScanTrueUpRequired' set to 'true'? + if (appConfig.fullScanTrueUpRequired) { + addLogEntry("Performing a full scan of online data to ensure consistent local state"); + addLogEntry("Setting currentDeltaLink = null", ["debug"]); + currentDeltaLink = null; + } + + // Dynamic output for non-verbose and verbose run so that the user knows something is being retreived from the OneDrive API + if (appConfig.verbosityCount == 0) { + if (!appConfig.surpressLoggingOutput) { + addLogEntry("Fetching items from the OneDrive API for Drive ID: " ~ driveIdToQuery, ["logFileOnly"]); + + // Use the dots to show the application is 'doing something' + addLogEntry("Fetching items from the OneDrive API for Drive ID: " ~ driveIdToQuery ~ " .", ["consoleOnlyNoNewLine"]); } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); - } - } - - if (graphQuery.type() == JSONType.object) { - string sharedFolderName; - foreach (searchResult; graphQuery["value"].array) { - // Configure additional logging items for this array element - string sharedByName; - string sharedByEmail; - // Extra details for verbose logging - if ("sharedBy" in searchResult["remoteItem"]["shared"]) { - if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; - } - if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; - } + } else { + addLogEntry("Fetching /delta response from the OneDrive API for Drive ID: " ~ driveIdToQuery, ["verbose"]); + } + + // Create a new API Instance for querying /delta and initialise it + // Reuse the socket to speed up + bool keepAlive = true; + OneDriveApi getDeltaQueryOneDriveApiInstance; + getDeltaQueryOneDriveApiInstance = new OneDriveApi(appConfig); + getDeltaQueryOneDriveApiInstance.initialise(keepAlive); + + for (;;) { + responseBundleCount++; + // Get the /delta changes via the OneDrive API + // getDeltaChangesByItemId has the re-try logic for transient errors + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, currentDeltaLink, getDeltaQueryOneDriveApiInstance); + + // If the initial deltaChanges response is an invalid JSON object, keep trying .. + if (deltaChanges.type() != JSONType.object) { + while (deltaChanges.type() != JSONType.object) { + // Handle the invalid JSON response adn retry + addLogEntry("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response", ["debug"]); + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, currentDeltaLink, getDeltaQueryOneDriveApiInstance); } + } - // is the shared item with us a 'folder' ? - if (isItemFolder(searchResult)) { - // item returned is a shared folder, not a shared file - sharedFolderName = searchResult["name"].str; - // Output Shared Folder Name early - log.vdebug("Shared Folder Name: ", sharedFolderName); - // Compare this to values in business_shared_folders - if(selectiveSync.isSharedFolderMatched(sharedFolderName)){ - // Folder name matches what we are looking for - // Flags for matching - bool itemInDatabase = false; - bool itemLocalDirExists = false; - bool itemPathIsLocal = false; - - // "what if" there are 2 or more folders shared with me have the "same" name? - // The folder name will be the same, but driveId will be different - // This will then cause these 'shared folders' to cross populate data, which may not be desirable - log.vdebug("Shared Folder Name: MATCHED to any entry in 'business_shared_folders'"); - log.vdebug("Parent Drive Id: ", searchResult["remoteItem"]["parentReference"]["driveId"].str); - log.vdebug("Shared Item Id: ", searchResult["remoteItem"]["id"].str); - Item databaseItem; - - // for each driveid in the existing driveIDsArray - foreach (searchDriveId; driveIDsArray) { - log.vdebug("searching database for: ", searchDriveId, " ", sharedFolderName); - if (itemdb.idInLocalDatabase(searchDriveId, searchResult["remoteItem"]["id"].str)){ - // Shared folder is present - log.vdebug("Found shared folder name in database"); - itemInDatabase = true; - // Query the DB for the details of this item - itemdb.selectByPath(sharedFolderName, searchDriveId, databaseItem); - log.vdebug("databaseItem: ", databaseItem); - // Does the databaseItem.driveId == defaultDriveId? - if (databaseItem.driveId == defaultDriveId) { - itemPathIsLocal = true; - } - } else { - log.vdebug("Shared folder name not found in database"); - // "what if" there is 'already' a local folder with this name - // Check if in the database - // If NOT in the database, but resides on disk, this could be a new local folder created after last sync but before this one - // However we sync 'shared folders' before checking for local changes - string localpath = expandTilde(cfg.getValueString("sync_dir")) ~ "/" ~ sharedFolderName; - if (exists(localpath)) { - // local path exists - log.vdebug("Found shared folder name in local OneDrive sync_dir"); - itemLocalDirExists = true; - } - } - } - - // Shared Folder Evaluation Debugging - log.vdebug("item in database: ", itemInDatabase); - log.vdebug("path exists on disk: ", itemLocalDirExists); - log.vdebug("database drive id matches defaultDriveId: ", itemPathIsLocal); - log.vdebug("database data matches search data: ", ((databaseItem.driveId == searchResult["remoteItem"]["parentReference"]["driveId"].str) && (databaseItem.id == searchResult["remoteItem"]["id"].str))); - - if ( ((!itemInDatabase) || (!itemLocalDirExists)) || (((databaseItem.driveId == searchResult["remoteItem"]["parentReference"]["driveId"].str) && (databaseItem.id == searchResult["remoteItem"]["id"].str)) && (!itemPathIsLocal)) ) { - // This shared folder does not exist in the database - if (!cfg.getValueBool("monitor")) { - log.log("Syncing this OneDrive Business Shared Folder: ", sharedFolderName); - } else { - log.vlog("Syncing this OneDrive Business Shared Folder: ", sharedFolderName); - } - Item businessSharedFolder = makeItem(searchResult); - - // Log who shared this to assist with sync data correlation - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName); - } - } - - // Do the actual sync - applyDifferences(businessSharedFolder.remoteDriveId, businessSharedFolder.remoteId, performFullItemScan); - // add this parent drive id to the array to search for, ready for next use - string newDriveID = searchResult["remoteItem"]["parentReference"]["driveId"].str; - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, newDriveID)) { - // Add this drive id to the array to search with - driveIDsArray ~= newDriveID; - } - } else { - // Shared Folder Name Conflict ... - log.log("WARNING: Skipping shared folder due to existing name conflict: ", sharedFolderName); - log.log("WARNING: Skipping changes of Path ID: ", searchResult["remoteItem"]["id"].str); - log.log("WARNING: To sync this shared folder, this shared folder needs to be renamed"); - - // Log who shared this to assist with conflict resolution - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("WARNING: Conflict Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("WARNING: Conflict Shared By: ", sharedByName); - } - } - } - } else { - log.vdebug("Shared Folder Name: NO MATCH to any entry in 'business_shared_folders'"); - } - } else { - // not a folder, is this a file? - if (isItemFile(searchResult)) { - // shared item is a file - string sharedFileName = searchResult["name"].str; - // log that this is not supported - log.vlog("WARNING: Not syncing this OneDrive Business Shared File: ", sharedFileName); - - // Log who shared this to assist with sync data correlation - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("OneDrive Business Shared File - Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("OneDrive Business Shared File - Shared By: ", sharedByName); - } - } - } else { - // something else entirely - log.log("WARNING: Not syncing this OneDrive Business Shared item: ", searchResult["name"].str); - } + ulong nrChanges = count(deltaChanges["value"].array); + int changeCount = 0; + + if (appConfig.verbosityCount == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + addLogEntry(".", ["consoleOnlyNoNewLine"]); } + } else { + addLogEntry("Processing API Response Bundle: " ~ to!string(responseBundleCount) ~ " - Quantity of 'changes|items' in this bundle to process: " ~ to!string(nrChanges), ["verbose"]); } - } else { - // Log that an invalid JSON object was returned - log.error("ERROR: onedrive.getSharedWithMe call returned an invalid JSON Object"); - } - } - } - - // download all new changes from a specified folder on OneDrive - void applyDifferencesSingleDirectory(const(string) path) - { - // Ensure we check the 'right' location for this directory on OneDrive - // It could come from the following places: - // 1. My OneDrive Root - // 2. My OneDrive Root as an Office 365 Shared Library - // 3. A OneDrive Business Shared Folder - // If 1 & 2, the configured default items are what we need - // If 3, we need to query OneDrive - - string driveId = defaultDriveId; - string rootId = defaultRootId; - string folderId; - string itemId; - JSONValue onedrivePathDetails; - - // Check OneDrive Business Shared Folders, if configured to do so - if (syncBusinessFolders){ - log.vlog("Attempting to sync OneDrive Business Shared Folders"); - // query OneDrive Business Shared Folders shared with me - JSONValue graphQuery; - try { - graphQuery = onedrive.getSharedWithMe(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); + + jsonItemsReceived = jsonItemsReceived + nrChanges; + + // We have a valid deltaChanges JSON array. This means we have at least 200+ JSON items to process. + // The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed + foreach (onedriveJSONItem; deltaChanges["value"].array) { + // increment change count for this item + changeCount++; + // Process the OneDrive object item JSON + processDeltaJSONItem(onedriveJSONItem, nrChanges, changeCount, responseBundleCount, singleDirectoryScope); } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - graphQuery = onedrive.getSharedWithMe();"); - graphQuery = onedrive.getSharedWithMe(); + + // The response may contain either @odata.deltaLink or @odata.nextLink + if ("@odata.deltaLink" in deltaChanges) { + // Log action + addLogEntry("Setting next currentDeltaLink to (@odata.deltaLink): " ~ deltaChanges["@odata.deltaLink"].str, ["debug"]); + // Update currentDeltaLink + currentDeltaLink = deltaChanges["@odata.deltaLink"].str; + // Store this for later use post processing jsonItemsToProcess items + latestDeltaLink = deltaChanges["@odata.deltaLink"].str; } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + + // Update deltaLink to next changeSet bundle + if ("@odata.nextLink" in deltaChanges) { + // Log action + addLogEntry("Setting next currentDeltaLink & deltaLinkAvailable to (@odata.nextLink): " ~ deltaChanges["@odata.nextLink"].str, ["debug"]); + // Update currentDeltaLink + currentDeltaLink = deltaChanges["@odata.nextLink"].str; + // Update deltaLinkAvailable to next changeSet bundle to quantify how many changes we have to process + deltaLinkAvailable = deltaChanges["@odata.nextLink"].str; + // Store this for later use post processing jsonItemsToProcess items + latestDeltaLink = deltaChanges["@odata.nextLink"].str; } + else break; } - if (graphQuery.type() == JSONType.object) { - // valid response from OneDrive - string sharedFolderName; - foreach (searchResult; graphQuery["value"].array) { - // set sharedFolderName - sharedFolderName = searchResult["name"].str; - // Configure additional logging items for this array element - string sharedByName; - string sharedByEmail; - - // Extra details for verbose logging - if ("sharedBy" in searchResult["remoteItem"]["shared"]) { - if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; - } - if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; - } - } - - // Compare this to values in business_shared_folders - if(selectiveSync.isSharedFolderMatched(sharedFolderName)){ - // Matched sharedFolderName to item in business_shared_folders - log.vdebug("Matched sharedFolderName in business_shared_folders: ", sharedFolderName); - // But is this shared folder what we are looking for as part of --single-directory? - // User could be using 'directory' or 'directory/directory1/directory2/directory3/' - // Can we find 'sharedFolderName' in the given 'path' - if (canFind(path, sharedFolderName)) { - // Found 'sharedFolderName' in the given 'path' - log.vdebug("Matched 'sharedFolderName' in the given 'path'"); - // What was the matched folder JSON - log.vdebug("Matched sharedFolderName in business_shared_folders JSON: ", searchResult); - // Path we want to sync is on a OneDrive Business Shared Folder - // Set the correct driveId - driveId = searchResult["remoteItem"]["parentReference"]["driveId"].str; - // Set this items id - itemId = searchResult["remoteItem"]["id"].str; - log.vdebug("Updated the driveId to a new value: ", driveId); - log.vdebug("Updated the itemId to a new value: ", itemId); - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, driveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= driveId; - } - - // Log who shared this to assist with sync data correlation - if ((sharedByName != "") && (sharedByEmail != "")) { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.vlog("OneDrive Business Shared Folder - Shared By: ", sharedByName); - } - } - } - } + // To finish off the JSON processing items, this is needed to reflect this in the log + addLogEntry("------------------------------------------------------------------", ["debug"]); + + // Shutdown the API + getDeltaQueryOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(getDeltaQueryOneDriveApiInstance); + + // Log that we have finished querying the /delta API + if (appConfig.verbosityCount == 0) { + if (!appConfig.surpressLoggingOutput) { + // Close out the '....' being printed to the console + addLogEntry("\n", ["consoleOnlyNoNewLine"]); } } else { - // Log that an invalid JSON object was returned - log.error("ERROR: onedrive.getSharedWithMe call returned an invalid JSON Object"); + addLogEntry("Finished processing /delta JSON response from the OneDrive API", ["verbose"]); } - } - - // Test if the path we are going to sync from actually exists on OneDrive - log.vlog("Getting path details from OneDrive ..."); - try { - // Need to use different calls here - one call for majority, another if this is a OneDrive Business Shared Folder - if (!syncBusinessFolders){ - // Not a OneDrive Business Shared Folder - log.vdebug("Calling onedrive.getPathDetailsByDriveId(driveId, path) with: ", driveId, ", ", path); - onedrivePathDetails = onedrive.getPathDetailsByDriveId(driveId, path); - } else { - // OneDrive Business Shared Folder - Use another API call using the folders correct driveId and itemId - log.vdebug("Calling onedrive.getPathDetailsByDriveIdAndItemId(driveId, itemId) with: ", driveId, ", ", itemId); - onedrivePathDetails = onedrive.getPathDetailsByDriveIdAndItemId(driveId, itemId); + + // If this was set, now unset it, as this will have been completed, so that for a true up, we dont do a double full scan + if (appConfig.fullScanTrueUpRequired) { + addLogEntry("Unsetting fullScanTrueUpRequired as this has been performed", ["debug"]); + appConfig.fullScanTrueUpRequired = false; } - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(path) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found - if (syncBusinessFolders){ - // 404 was returned when trying to use a specific driveId and itemId .. which 'should' work .... but didnt - // Try the query with the path as a backup failsafe - log.vdebug("Calling onedrive.getPathDetailsByDriveId(driveId, path) as backup with: ", driveId, ", ", path); - try { - // try calling using the path - onedrivePathDetails = onedrive.getPathDetailsByDriveId(driveId, path); - } catch (OneDriveException e) { - - if (e.httpStatusCode == 404) { - log.error("ERROR: The requested single directory to sync was not found on OneDrive - Check folder permissions and sharing status with folder owner"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferencesSingleDirectory(path);"); - applyDifferencesSingleDirectory(path); - // return back to original call - return; - } - - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - } else { - // Not a OneDrive Business Shared folder operation - log.error("ERROR: The requested single directory to sync was not found on OneDrive"); - return; - } + } else { + // Why are are generating a /delta response + addLogEntry("Why are we generating a /delta response:", ["debug"]); + addLogEntry(" singleDirectoryScope: " ~ to!string(singleDirectoryScope), ["debug"]); + addLogEntry(" nationalCloudDeployment: " ~ to!string(nationalCloudDeployment), ["debug"]); + addLogEntry(" cleanupLocalFiles: " ~ to!string(cleanupLocalFiles), ["debug"]); + + // What 'path' are we going to start generating the response for + string pathToQuery; + + // If --single-directory has been called, use the value that has been set + if (singleDirectoryScope) { + pathToQuery = appConfig.getValueString("single_directory"); } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferencesSingleDirectory(path);"); - applyDifferencesSingleDirectory(path); - // return back to original call - return; + // We could also be syncing a Shared Folder of some description + if (!sharedFolderName.empty) { + pathToQuery = sharedFolderName; } - - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + + // Generate the simulated /delta response + // + // The generated /delta response however contains zero deleted JSON items, so the only way that we can track this, is if the object was in sync + // we have the object in the database, thus, what we need to do is for every DB object in the tree of items, flag 'syncStatus' as 'N', then when we process + // the returned JSON items from the API, we flag the item as back in sync, then we can cleanup any out-of-sync items + // + // The flagging of the local database items to 'N' is handled within the generateDeltaResponse() function + // + // When these JSON items are then processed, if the item exists online, and is in the DB, and that the values match, the DB item is flipped back to 'Y' + // This then allows the application to look for any remaining 'N' values, and delete these as no longer needed locally + deltaChanges = generateDeltaResponse(pathToQuery); + + ulong nrChanges = count(deltaChanges["value"].array); + int changeCount = 0; + addLogEntry("API Response Bundle: " ~ to!string(responseBundleCount) ~ " - Quantity of 'changes|items' in this bundle to process: " ~ to!string(nrChanges), ["debug"]); + jsonItemsReceived = jsonItemsReceived + nrChanges; + + // The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed + foreach (onedriveJSONItem; deltaChanges["value"].array) { + // increment change count for this item + changeCount++; + // Process the OneDrive object item JSON + processDeltaJSONItem(onedriveJSONItem, nrChanges, changeCount, responseBundleCount, singleDirectoryScope); + } + + // To finish off the JSON processing items, this is needed to reflect this in the log + addLogEntry("------------------------------------------------------------------", ["debug"]); + + // Log that we have finished generating our self generated /delta response + if (!appConfig.surpressLoggingOutput) { + addLogEntry("Finished processing self generated /delta JSON response from the OneDrive API"); } } - // OK - the path on OneDrive should exist, get the driveId and rootId for this folder - // Was the response a valid JSON Object? - if (onedrivePathDetails.type() == JSONType.object) { - // OneDrive Personal Shared Folder handling - // Is this item a remote item? - if(isItemRemote(onedrivePathDetails)){ - // 2 step approach: - // 1. Ensure changes for the root remote path are captured - // 2. Download changes specific to the remote path + // Cleanup deltaChanges as this is no longer needed + object.destroy(deltaChanges); + + // We have JSON items received from the OneDrive API + addLogEntry("Number of JSON Objects received from OneDrive API: " ~ to!string(jsonItemsReceived), ["debug"]); + addLogEntry("Number of JSON Objects already processed (root and deleted items): " ~ to!string((jsonItemsReceived - jsonItemsToProcess.length)), ["debug"]); + + // We should have now at least processed all the JSON items as returned by the /delta call + // Additionally, we should have a new array, that now contains all the JSON items we need to process that are non 'root' or deleted items + addLogEntry("Number of JSON items to process is: " ~ to!string(jsonItemsToProcess.length), ["debug"]); + + // Are there items to process? + if (jsonItemsToProcess.length > 0) { + // Lets deal with the JSON items in a batch process + ulong batchSize = 500; + ulong batchCount = (jsonItemsToProcess.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; + + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + // Logfile entry + addLogEntry("Processing " ~ to!string(jsonItemsToProcess.length) ~ " applicable changes and items received from Microsoft OneDrive", ["logFileOnly"]); + // Console only output + addLogEntry("Processing " ~ to!string(jsonItemsToProcess.length) ~ " applicable changes and items received from Microsoft OneDrive ", ["consoleOnlyNoNewLine"]); - // root remote - applyDifferences(defaultDriveId, onedrivePathDetails["id"].str, false); + if (appConfig.verbosityCount != 0) { + // Close out the console only processing line above, if we are doing verbose or above logging + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } + } - // remote changes - driveId = onedrivePathDetails["remoteItem"]["parentReference"]["driveId"].str; // Should give something like 66d53be8a5056eca - folderId = onedrivePathDetails["remoteItem"]["id"].str; // Should give something like BC7D88EC1F539DCF!107 + // For each batch, process the JSON items that need to be now processed. + // 'root' and deleted objects have already been handled + foreach (batchOfJSONItems; jsonItemsToProcess.chunks(batchSize)) { + // Chunk the total items to process into 500 lot items + batchesProcessed++; - // Apply any differences found on OneDrive for this path (download data) - applyDifferences(driveId, folderId, false); - } else { - // use the item id as folderId - folderId = onedrivePathDetails["id"].str; // Should give something like 12345ABCDE1234A1!101 - // Apply any differences found on OneDrive for this path (download data) - // Use driveId rather than defaultDriveId as this will be updated if path was matched to another parent driveId - applyDifferences(driveId, folderId, false); + if (appConfig.verbosityCount == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + addLogEntry(".", ["consoleOnlyNoNewLine"]); + } + } else { + addLogEntry("Processing OneDrive JSON item batch [" ~ to!string(batchesProcessed) ~ "/" ~ to!string(batchCount) ~ "] to ensure consistent local state", ["verbose"]); + } + + // Process the batch + processJSONItemsInBatch(batchOfJSONItems, batchesProcessed, batchCount); + + // To finish off the JSON processing items, this is needed to reflect this in the log + addLogEntry("------------------------------------------------------------------", ["debug"]); } - } else { - // Log that an invalid JSON object was returned - log.vdebug("onedrive.getPathDetails call returned an invalid JSON Object"); - } - } - - // make sure the OneDrive root is in our database - auto checkDatabaseForOneDriveRoot() - { - log.vlog("Fetching details for OneDrive Root"); - JSONValue rootPathDetails = onedrive.getDefaultRoot(); // Returns a JSON Value - - // validate object is a JSON value - if (rootPathDetails.type() == JSONType.object) { - // valid JSON object - Item rootPathItem = makeItem(rootPathDetails); - // configure driveId and rootId for the OneDrive Root - // Set defaults for the root folder - string driveId = rootPathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - string rootId = rootPathDetails["id"].str; // Should give something like 12345ABCDE1234A1!101 - - // Query the database - if (!itemdb.selectById(driveId, rootId, rootPathItem)) { - log.vlog("OneDrive Root does not exist in the database. We need to add it."); - applyDifference(rootPathDetails, driveId, true); - log.vlog("Added OneDrive Root to the local database"); - } else { - log.vlog("OneDrive Root exists in the database"); + + if (appConfig.verbosityCount == 0) { + // close off '.' output + if (!appConfig.surpressLoggingOutput) { + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } } + + // Free up memory and items processed as it is pointless now having this data around + jsonItemsToProcess = []; + + // Debug output - what was processed + addLogEntry("Number of JSON items to process is: " ~ to!string(jsonItemsToProcess.length), ["debug"]); + addLogEntry("Number of JSON items processed was: " ~ to!string(processedCount), ["debug"]); } else { - // Log that an invalid JSON object was returned - log.error("ERROR: Unable to query OneDrive for account details"); - log.vdebug("onedrive.getDefaultRoot call returned an invalid JSON Object"); - // Must exit here as we cant configure our required variables - onedrive.shutdown(); - exit(-1); + if (!appConfig.surpressLoggingOutput) { + addLogEntry("No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive"); + } } + + // Update the deltaLink in the database so that we can reuse this now that jsonItemsToProcess has been processed + if (!latestDeltaLink.empty) { + addLogEntry("Updating completed deltaLink in DB to: " ~ latestDeltaLink, ["debug"]); + itemDB.setDeltaLink(driveIdToQuery, itemIdToQuery, latestDeltaLink); + } + + // Keep the driveIDsArray with unique entries only + if (!canFind(driveIDsArray, driveIdToQuery)) { + // Add this driveId to the array of driveId's we know about + driveIDsArray ~= driveIdToQuery; + } } - // create a directory on OneDrive without syncing - auto createDirectoryNoSync(const(string) path) - { - // Attempt to create the requested path within OneDrive without performing a sync - log.vlog("Attempting to create the requested path within OneDrive"); - - // Handle the remote folder creation and updating of the local database without performing a sync - uploadCreateDir(path); - } - - // delete a directory on OneDrive without syncing - auto deleteDirectoryNoSync(const(string) path) - { - // Use the global's as initialised via init() rather than performing unnecessary additional HTTPS calls - const(char)[] rootId = defaultRootId; + // Process the /delta API JSON response items + void processDeltaJSONItem(JSONValue onedriveJSONItem, ulong nrChanges, int changeCount, ulong responseBundleCount, bool singleDirectoryScope) { - // Attempt to delete the requested path within OneDrive without performing a sync - log.vlog("Attempting to delete the requested path within OneDrive"); + // Variables for this foreach loop + string thisItemId; + bool itemIsRoot = false; + bool handleItemAsRootObject = false; + bool itemIsDeletedOnline = false; + bool itemHasParentReferenceId = false; + bool itemHasParentReferencePath = false; + bool itemIdMatchesDefaultRootId = false; + bool itemNameExplicitMatchRoot = false; + string objectParentDriveId; - // test if the path we are going to exists on OneDrive - try { - onedrive.getPathDetails(path); - } catch (OneDriveException e) { - log.vdebug("onedrive.getPathDetails(path) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found on OneDrive - no need to delete it - log.vlog("The requested directory to delete was not found on OneDrive - skipping removing the remote directory as it doesn't exist"); - return; - } + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Processing OneDrive Item " ~ to!string(changeCount) ~ " of " ~ to!string(nrChanges) ~ " from API Response Bundle " ~ to!string(responseBundleCount), ["debug"]); + addLogEntry("Raw JSON OneDrive Item: " ~ to!string(onedriveJSONItem), ["debug"]); + + // What is this item's id + thisItemId = onedriveJSONItem["id"].str; + // Is this a deleted item - only calculate this once + itemIsDeletedOnline = isItemDeleted(onedriveJSONItem); + + if(!itemIsDeletedOnline){ + // This is not a deleted item + addLogEntry("This item is not a OneDrive deletion change", ["debug"]); - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling deleteDirectoryNoSync(path);"); - deleteDirectoryNoSync(path); - // return back to original call - return; - } + // Only calculate this once + itemIsRoot = isItemRoot(onedriveJSONItem); + itemHasParentReferenceId = hasParentReferenceId(onedriveJSONItem); + itemIdMatchesDefaultRootId = (thisItemId == appConfig.defaultRootId); + itemNameExplicitMatchRoot = (onedriveJSONItem["name"].str == "root"); + objectParentDriveId = onedriveJSONItem["parentReference"]["driveId"].str; - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // Test is this is the OneDrive Users Root? + // Debug output of change evaluation items + addLogEntry("defaultRootId = " ~ appConfig.defaultRootId, ["debug"]); + addLogEntry("'search id' = " ~ thisItemId, ["debug"]); + addLogEntry("id == defaultRootId = " ~ to!string(itemIdMatchesDefaultRootId), ["debug"]); + addLogEntry("isItemRoot(onedriveJSONItem) = " ~ to!string(itemIsRoot), ["debug"]); + addLogEntry("onedriveJSONItem['name'].str == 'root' = " ~ to!string(itemNameExplicitMatchRoot), ["debug"]); + addLogEntry("itemHasParentReferenceId = " ~ to!string(itemHasParentReferenceId), ["debug"]); + + if ( (itemIdMatchesDefaultRootId || singleDirectoryScope) && itemIsRoot && itemNameExplicitMatchRoot) { + // This IS a OneDrive Root item or should be classified as such in the case of 'singleDirectoryScope' + addLogEntry("JSON item will flagged as a 'root' item", ["debug"]); + handleItemAsRootObject = true; } } - Item item; - // Need to check all driveid's we know about, not just the defaultDriveId - bool itemInDB = false; - foreach (searchDriveId; driveIDsArray) { - if (itemdb.selectByPath(path, searchDriveId, item)) { - // item was found in the DB - itemInDB = true; - break; - } - } - // Was the item found in the DB - if (!itemInDB) { - // this is odd .. this directory is not in the local database - just go delete it - log.vlog("The requested directory to delete was not found in the local database - pushing delete request direct to OneDrive"); - uploadDeleteItem(item, path); + // How do we handle this JSON item from the OneDrive API? + // Is this a confirmed 'root' item, has no Parent ID, or is a Deleted Item + if (handleItemAsRootObject || !itemHasParentReferenceId || itemIsDeletedOnline){ + // Is a root item, has no id in parentReference or is a OneDrive deleted item + addLogEntry("objectParentDriveId = " ~ objectParentDriveId, ["debug"]); + addLogEntry("handleItemAsRootObject = " ~ to!string(handleItemAsRootObject), ["debug"]); + addLogEntry("itemHasParentReferenceId = " ~ to!string(itemHasParentReferenceId), ["debug"]); + addLogEntry("itemIsDeletedOnline = " ~ to!string(itemIsDeletedOnline), ["debug"]); + addLogEntry("Handling change immediately as 'root item', or has no parent reference id or is a deleted item", ["debug"]); + + // OK ... do something with this JSON post here .... + processRootAndDeletedJSONItems(onedriveJSONItem, objectParentDriveId, handleItemAsRootObject, itemIsDeletedOnline, itemHasParentReferenceId); } else { - // the folder was in the local database - // Handle the deletion and saving any update to the local database - log.vlog("The requested directory to delete was found in the local database. Processing the deletion normally"); - deleteByPath(path); - } - } - - // rename a directory on OneDrive without syncing - auto renameDirectoryNoSync(string source, string destination) - { - try { - // test if the local path exists on OneDrive - onedrive.getPathDetails(source); - } catch (OneDriveException e) { - log.vdebug("onedrive.getPathDetails(source); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found - log.vlog("The requested directory to rename was not found on OneDrive"); - return; + // Do we need to update this RAW JSON from OneDrive? + if ( (objectParentDriveId != appConfig.defaultDriveId) && (appConfig.accountType == "business") && (appConfig.getValueBool("sync_business_shared_items")) ) { + // Potentially need to update this JSON data + addLogEntry("Potentially need to update this source JSON .... need to check the database", ["debug"]); + + // Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id + Item remoteDBItem; + itemDB.selectByRemoteId(objectParentDriveId, thisItemId, remoteDBItem); + + // Is the data that was returned from the database what we are looking for? + if ((remoteDBItem.remoteDriveId == objectParentDriveId) && (remoteDBItem.remoteId == thisItemId)) { + // Yes, this is the record we are looking for + addLogEntry("DB Item response for remoteDBItem: " ~ to!string(remoteDBItem), ["debug"]); + + // Must compare remoteDBItem.name with remoteItem.name + if (remoteDBItem.name != onedriveJSONItem["name"].str) { + // Update JSON Item + string actualOnlineName = onedriveJSONItem["name"].str; + addLogEntry("Updating source JSON 'name' to that which is the actual local directory", ["debug"]); + addLogEntry("onedriveJSONItem['name'] was: " ~ onedriveJSONItem["name"].str, ["debug"]); + addLogEntry("Updating onedriveJSONItem['name'] to: " ~ remoteDBItem.name, ["debug"]); + onedriveJSONItem["name"] = remoteDBItem.name; + addLogEntry("onedriveJSONItem['name'] now: " ~ onedriveJSONItem["name"].str, ["debug"]); + // Add the original name to the JSON + onedriveJSONItem["actualOnlineName"] = actualOnlineName; + } + } } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling renameDirectoryNoSync(source, destination);"); - renameDirectoryNoSync(source, destination); - // return back to original call - return; + + // If we are not self-generating a /delta response, check this initial /delta JSON bundle item against the basic checks + // of applicability against 'skip_file', 'skip_dir' and 'sync_list' + // We only do this if we did not generate a /delta response, as generateDeltaResponse() performs the checkJSONAgainstClientSideFiltering() + // against elements as it is building the /delta compatible response + // If we blindly just 'check again' all JSON responses then there is potentially double JSON processing going on if we used generateDeltaResponse() + bool discardDeltaJSONItem = false; + if (!generateSimulatedDeltaResponse) { + // Check applicability against 'skip_file', 'skip_dir' and 'sync_list' + discardDeltaJSONItem = checkJSONAgainstClientSideFiltering(onedriveJSONItem); } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // Add this JSON item for further processing if this is not being discarded + if (!discardDeltaJSONItem) { + addLogEntry("Adding this Raw JSON OneDrive Item to jsonItemsToProcess array for further processing", ["debug"]); + jsonItemsToProcess ~= onedriveJSONItem; } } - // The OneDrive API returned a 200 OK status, so the folder exists - // Rename the requested directory on OneDrive without performing a sync - moveByPath(source, destination); } - // download the new changes of a specific item - // id is the root of the drive or a shared folder - private void applyDifferences(string driveId, const(char)[] id, bool performFullItemScan) - { - log.vlog("Applying changes of Path ID: " ~ id); - // function variables - char[] idToQuery; - JSONValue changes; - JSONValue changesAvailable; - JSONValue idDetails; - JSONValue currentDriveQuota; - string syncFolderName; - string syncFolderPath; - string syncFolderChildPath; - string deltaLink; - string deltaLinkAvailable; - bool nationalCloudChildrenScan = false; + // Process 'root' and 'deleted' OneDrive JSON items + void processRootAndDeletedJSONItems(JSONValue onedriveJSONItem, string driveId, bool handleItemAsRootObject, bool itemIsDeletedOnline, bool itemHasParentReferenceId) { - // Tracking processing performance - SysTime startFunctionProcessingTime; - SysTime endFunctionProcessingTime; - SysTime startBundleProcessingTime; - SysTime endBundleProcessingTime; - ulong cumulativeOneDriveItemCount = 0; - - if (displayProcessingTime) { - writeln("============================================================"); - writeln("Querying OneDrive API for relevant 'changes|items' stored online for this account"); - startFunctionProcessingTime = Clock.currTime(); - writeln("Start Function Processing Time: ", startFunctionProcessingTime); - } - - // Update the quota details for this driveId, as this could have changed since we started the application - the user could have added / deleted data online, or purchased additional storage - // Quota details are ONLY available for the main default driveId, as the OneDrive API does not provide quota details for shared folders - try { - currentDriveQuota = onedrive.getDriveQuota(driveId); - } catch (OneDriveException e) { - log.vdebug("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException"); - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferences(driveId, id, performFullItemScan);"); - applyDifferences(driveId, id, performFullItemScan); - // return back to original call - return; - } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } + // Use the JSON elements rather can computing a DB struct via makeItem() + string thisItemId = onedriveJSONItem["id"].str; + string thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + + // Check if the item has been seen before + Item existingDatabaseItem; + bool existingDBEntry = itemDB.selectById(thisItemDriveId, thisItemId, existingDatabaseItem); - // validate that currentDriveQuota is a JSON value - if (currentDriveQuota.type() == JSONType.object) { - // Response from API contains valid data - // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - // If 'business' accounts, if driveId == defaultDriveId, then we will have data - // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be 0 values - if ("quota" in currentDriveQuota){ - if (driveId == defaultDriveId) { - // We potentially have updated quota remaining details available - // However in some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero - if ("remaining" in currentDriveQuota["quota"]){ - // We have valid quota details returned for the drive id - remainingFreeSpace = currentDriveQuota["quota"]["remaining"].integer; - if (remainingFreeSpace <= 0) { - if (accountType == "personal"){ - // zero space available - log.error("ERROR: OneDrive account currently has zero space available. Please free up some space online."); - quotaAvailable = false; - } else { - // zero space available is being reported, maybe being restricted? - log.error("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRestricted = true; - } - } else { - // Display the updated value - log.vlog("Updated Remaining Free Space: ", remainingFreeSpace); - } - } - } else { - // quota details returned, but for a drive id that is not ours - if ("remaining" in currentDriveQuota["quota"]){ - // remaining is in the quota JSON response - if (currentDriveQuota["quota"]["remaining"].integer <= 0) { - // value returned is 0 or less than 0 - log.vlog("OneDrive quota information is set at zero, as this is not our drive id, ignoring"); - } - } + // Is the item deleted online? + if(!itemIsDeletedOnline) { + + // Is the item a confirmed root object? + + // The JSON item should be considered a 'root' item if: + // 1. Contains a ["root"] element + // 2. Has no ["parentReference"]["id"] ... #323 & #324 highlighted that this is false as some 'root' shared objects now can have an 'id' element .. OneDrive API change + // 2. Has no ["parentReference"]["path"] + // 3. Was detected by an input flag as to be handled as a root item regardless of actual status + + if ((handleItemAsRootObject) || (!itemHasParentReferenceId)) { + addLogEntry("Handing JSON object as OneDrive 'root' object", ["debug"]); + if (!existingDBEntry) { + // we have not seen this item before + saveItem(onedriveJSONItem); } + } + } else { + // Change is to delete an item + addLogEntry("Handing a OneDrive Deleted Item", ["debug"]); + if (existingDBEntry) { + // Flag to delete + addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]); + idsToDelete ~= [thisItemDriveId, thisItemId]; } else { - // No quota details returned - if (driveId == defaultDriveId) { - // no quota details returned for current drive id - log.error("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online."); - } else { - // quota details not available - log.vdebug("OneDrive quota information is being restricted as this is not our drive id."); - } + // Flag to ignore + addLogEntry("Flagging item to skip: " ~ to!string(onedriveJSONItem), ["debug"]); + skippedItems.insert(thisItemId); } } - - // Query OneDrive API for the name of this folder id - try { - idDetails = onedrive.getPathDetailsById(driveId, id); - } catch (OneDriveException e) { - log.vdebug("idDetails = onedrive.getPathDetailsById(driveId, id) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // id was not found - possibly a remote (shared) folder - log.vlog("No details returned for given Path ID"); - return; - } + } + + // Process each of the elements contained in jsonItemsToProcess[] + void processJSONItemsInBatch(JSONValue[] array, ulong batchGroup, ulong batchCount) { + + ulong batchElementCount = array.length; + + foreach (i, onedriveJSONItem; array.enumerate) { + // Use the JSON elements rather can computing a DB struct via makeItem() + ulong elementCount = i +1; - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling applyDifferences(driveId, id, performFullItemScan);"); - applyDifferences(driveId, id, performFullItemScan); - // return back to original call - return; - } + // To show this is the processing for this particular item, start off with this breaker line + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Processing OneDrive JSON item " ~ to!string(elementCount) ~ " of " ~ to!string(batchElementCount) ~ " as part of JSON Item Batch " ~ to!string(batchGroup) ~ " of " ~ to!string(batchCount), ["debug"]); + addLogEntry("Raw JSON OneDrive Item: " ~ to!string(onedriveJSONItem), ["debug"]); - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - - // validate that idDetails is a JSON value - if (idDetails.type() == JSONType.object) { - // Get the name of this 'Path ID' - if (("id" in idDetails) != null) { - // valid response from onedrive.getPathDetailsById(driveId, id) - a JSON item object present - if ((idDetails["id"].str == id) && (!isItemFile(idDetails))){ - // Is a Folder or Remote Folder - syncFolderName = idDetails["name"].str; - } - - // Debug output of path details as queried from OneDrive - log.vdebug("OneDrive Path Details: ", idDetails); - - // OneDrive Personal Folder Item Reference (24/4/2019) - // "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('66d53be8a5056eca')/items/$entity", - // "cTag": "adDo2NkQ1M0JFOEE1MDU2RUNBITEwMS42MzY5MTY5NjQ1ODcwNzAwMDA", - // "eTag": "aNjZENTNCRThBNTA1NkVDQSExMDEuMQ", - // "fileSystemInfo": { - // "createdDateTime": "2018-06-06T20:45:24.436Z", - // "lastModifiedDateTime": "2019-04-24T07:09:31.29Z" - // }, - // "folder": { - // "childCount": 3, - // "view": { - // "sortBy": "takenOrCreatedDateTime", - // "sortOrder": "ascending", - // "viewType": "thumbnails" - // } - // }, - // "id": "66D53BE8A5056ECA!101", - // "name": "root", - // "parentReference": { - // "driveId": "66d53be8a5056eca", - // "driveType": "personal" - // }, - // "root": {}, - // "size": 0 - - // OneDrive Personal Remote / Shared Folder Item Reference (4/9/2019) - // "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('driveId')/items/$entity", - // "cTag": "cTag", - // "eTag": "eTag", - // "id": "itemId", - // "name": "shared", - // "parentReference": { - // "driveId": "driveId", - // "driveType": "personal", - // "id": "parentItemId", - // "path": "/drive/root:" - // }, - // "remoteItem": { - // "fileSystemInfo": { - // "createdDateTime": "2019-01-14T18:54:43.2666667Z", - // "lastModifiedDateTime": "2019-04-24T03:47:22.53Z" - // }, - // "folder": { - // "childCount": 0, - // "view": { - // "sortBy": "takenOrCreatedDateTime", - // "sortOrder": "ascending", - // "viewType": "thumbnails" - // } - // }, - // "id": "remoteItemId", - // "parentReference": { - // "driveId": "remoteDriveId", - // "driveType": "personal" - // "id": "id", - // "name": "name", - // "path": "/drives//items/:/" - // }, - // "size": 0, - // "webUrl": "webUrl" - // } - - // OneDrive Business Folder & Shared Folder Item Reference (24/4/2019) - // "@odata.context": "https://graph.microsoft.com/v1.0/$metadata#drives('driveId')/items/$entity", - // "@odata.etag": "\"{eTag},1\"", - // "cTag": "\"c:{cTag},0\"", - // "eTag": "\"{eTag},1\"", - // "fileSystemInfo": { - // "createdDateTime": "2019-04-17T04:00:43Z", - // "lastModifiedDateTime": "2019-04-17T04:00:43Z" - // }, - // "folder": { - // "childCount": 2 - // }, - // "id": "itemId", - // "name": "shared_folder", - // "parentReference": { - // "driveId": "parentDriveId", - // "driveType": "business", - // "id": "parentId", - // "path": "/drives/driveId/root:" - // }, - // "size": 0 - - // To evaluate a change received from OneDrive, this must be set correctly - if (hasParentReferencePath(idDetails)) { - // Path from OneDrive has a parentReference we can use - log.vdebug("Item details returned contains parent reference path - potentially shared folder object"); - syncFolderPath = idDetails["parentReference"]["path"].str; - syncFolderChildPath = syncFolderPath ~ "/" ~ idDetails["name"].str ~ "/"; - } else { - // No parentReference, set these to blank - log.vdebug("Item details returned no parent reference path"); - syncFolderPath = ""; - syncFolderChildPath = ""; - } - - // Debug Output - log.vdebug("Sync Folder Name: ", syncFolderName); - log.vdebug("Sync Folder Parent Path: ", syncFolderPath); - log.vdebug("Sync Folder Child Path: ", syncFolderChildPath); - } - } else { - // Log that an invalid JSON object was returned - log.vdebug("onedrive.getPathDetailsById call returned an invalid JSON Object"); - } - - // Issue #658 - // If we are using a sync_list file, using deltaLink will actually 'miss' changes (moves & deletes) on OneDrive as using sync_list discards changes - // Use the performFullItemScan boolean to control whether we perform a full object scan of use the delta link for the root folder - // When using --synchronize the normal process order is: - // 1. Scan OneDrive for changes - // 2. Scan local folder for changes - // 3. Scan OneDrive for changes - // When using sync_list and performing a full scan, what this means is a full scan is performed twice, which leads to massive processing & time overheads - // Control this via performFullItemScan - - // Get the current delta link - deltaLinkAvailable = itemdb.getDeltaLink(driveId, id); - // if sync_list is not configured, syncListConfigured should be false - log.vdebug("syncListConfigured = ", syncListConfigured); - // oneDriveFullScanTrigger should be false unless set by actions on OneDrive and only if sync_list or skip_dir is used - log.vdebug("oneDriveFullScanTrigger = ", oneDriveFullScanTrigger); - // should only be set if 10th scan in monitor mode or as final true up sync in stand alone mode - log.vdebug("performFullItemScan = ", performFullItemScan); - - // do we override performFullItemScan if it is currently false and oneDriveFullScanTrigger is true? - if ((!performFullItemScan) && (oneDriveFullScanTrigger)) { - // forcing a full scan earlier than potentially normal - // oneDriveFullScanTrigger = true due to new folder creation request in a location that is now in-scope which was previously out of scope - performFullItemScan = true; - log.vdebug("overriding performFullItemScan as oneDriveFullScanTrigger was set"); - } - - // depending on the scan type (--monitor or --synchronize) performFullItemScan is set depending on the number of sync passes performed (--monitor) or ALWAYS if just --synchronize is used - if (!performFullItemScan){ - // performFullItemScan == false - // use delta link - log.vdebug("performFullItemScan is false, using the deltaLink as per database entry"); - if (deltaLinkAvailable == ""){ - deltaLink = ""; - log.vdebug("deltaLink was requested to be used, but contains no data - resulting API query will be treated as a full scan of OneDrive"); - } else { - deltaLink = deltaLinkAvailable; - log.vdebug("deltaLink contains valid data - resulting API query will be treated as a delta scan of OneDrive"); - } - } else { - // performFullItemScan == true - // do not use delta-link - deltaLink = ""; - log.vdebug("performFullItemScan is true, not using the database deltaLink so that we query all objects on OneDrive to compare against all local objects"); - } - - for (;;) { - - if (displayProcessingTime) { - writeln("------------------------------------------------------------"); - startBundleProcessingTime = Clock.currTime(); - writeln("Start 'change|item' API Response Bundle Processing Time: ", startBundleProcessingTime); - } + string thisItemId = onedriveJSONItem["id"].str; + string thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + string thisItemParentId = onedriveJSONItem["parentReference"]["id"].str; + string thisItemName = onedriveJSONItem["name"].str; - // Due to differences in OneDrive API's between personal and business we need to get changes only from defaultRootId - // If we used the 'id' passed in & when using --single-directory with a business account we get: - // 'HTTP request returned status code 501 (Not Implemented): view.delta can only be called on the root.' - // To view changes correctly, we need to use the correct path id for the request - if (driveId == defaultDriveId) { - // The drive id matches our users default drive id - log.vdebug("Configuring 'idToQuery' as defaultRootId duplicate"); - idToQuery = defaultRootId.dup; - } else { - // The drive id does not match our users default drive id - // Potentially the 'path id' we are requesting the details of is a Shared Folder (remote item) - // Use the 'id' that was passed in (folderId) - log.vdebug("Configuring 'idToQuery' as 'id' duplicate"); - idToQuery = id.dup; - } - // what path id are we going to query? - log.vdebug("Path object to query configured as 'idToQuery' = ", idToQuery); - long deltaChanges = 0; + // Create an empty item struct for an existing DB item + Item existingDatabaseItem; - // What query do we use? - // National Cloud Deployments do not support /delta as a query - // https://docs.microsoft.com/en-us/graph/deployments#supported-features - // Are we running against a National Cloud Deployments that does not support /delta - if (nationalCloudDeployment) { - // National Cloud Deployment that does not support /delta query - // Have to query /children and build our own /delta response - nationalCloudChildrenScan = true; - log.vdebug("Using /children call to query drive for items to populate 'changes' and 'changesAvailable'"); - // In a OneDrive Business Shared Folder scenario + nationalCloudDeployment, if ALL items are downgraded, then this leads to local file deletion - // Downgrade ONLY files associated with this driveId and idToQuery - log.vdebug("Downgrading all children for this driveId (" ~ driveId ~ ") and idToQuery (" ~ idToQuery ~ ") to an out-of-sync state"); - - // Before we get any data, flag any object in the database as out-of-sync for this driveID & ID - auto drivePathChildren = itemdb.selectChildren(driveId, idToQuery); - if (count(drivePathChildren) > 0) { - // Children to process and flag as out-of-sync - foreach (drivePathChild; drivePathChildren) { - // Flag any object in the database as out-of-sync for this driveID & ID - log.vdebug("Downgrading item as out-of-sync: ", drivePathChild.id); - itemdb.downgradeSyncStatusFlag(drivePathChild.driveId, drivePathChild.id); - } - } + // Do we NOT want this item? + bool unwanted = false; // meaning by default we will WANT this item + // Is this parent is in the database + bool parentInDatabase = false; + // What is the path of the new item + string newItemPath; + + // Configure the remoteItem - so if it is used, it can be utilised later + Item remoteItem; + + // Check the database for an existing entry for this JSON item + bool existingDBEntry = itemDB.selectById(thisItemDriveId, thisItemId, existingDatabaseItem); + + // Calculate if the Parent Item is in the database so that it can be re-used + parentInDatabase = itemDB.idInLocalDatabase(thisItemDriveId, thisItemParentId); + + // Calculate the path of this JSON item, but we can only do this if the parent is in the database + if (parentInDatabase) { + // Calculate this items path + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); + } else { + // Parent not in the database + // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? + addLogEntry("Parent ID is not in DB .. ", ["debug"]); - // Build own 'changes' response to simulate a /delta response - try { - // we have to 'build' our own JSON response that looks like /delta - changes = generateDeltaResponse(driveId, idToQuery); - if (changes.type() == JSONType.object) { - log.vdebug("Query 'changes = generateDeltaResponse(driveId, idToQuery)' performed successfully"); - } - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: changes = generateDeltaResponse(driveId, idToQuery)"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - return; - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive items"); - } + // Why? + if (thisItemDriveId == appConfig.defaultDriveId) { + // Flagging as unwanted + addLogEntry("Flagging as unwanted: thisItemDriveId (" ~ thisItemDriveId ~ "), thisItemParentId (" ~ thisItemParentId ~ ") not in local database", ["debug"]); - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + if (thisItemParentId in skippedItems) { + addLogEntry("Reason: thisItemParentId listed within skippedItems", ["debug"]); } - - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // If an error is returned when querying 'changes' and we recall the original function, we go into a never ending loop where the sync never ends - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query OneDrive drive items - retrying applicable request"); - log.vdebug("changes = generateDeltaResponse(driveId, idToQuery) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - log.vdebug("Retrying Query - using original deltaLink after delay"); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: changes = generateDeltaResponse(driveId, idToQuery)"); - changes = generateDeltaResponse(driveId, idToQuery); - log.vdebug("Query 'changes = generateDeltaResponse(driveId, idToQuery)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: changes = generateDeltaResponse(driveId, idToQuery) on re-try after delay"); - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + unwanted = true; + } else { + // Edge case as the parent (from another users OneDrive account) will never be in the database - potentially a shared object? + addLogEntry("The reported parentId is not in the database. This potentially is a shared folder as 'remoteItem.driveId' != 'appConfig.defaultDriveId'. Relevant Details: remoteItem.driveId (" ~ remoteItem.driveId ~ "), remoteItem.parentId (" ~ remoteItem.parentId ~ ")", ["debug"]); + addLogEntry("Potential Shared Object JSON: " ~ to!string(onedriveJSONItem), ["debug"]); + + // Format the OneDrive change into a consumable object for the database + remoteItem = makeItem(onedriveJSONItem); + + if (appConfig.accountType == "personal") { + // Personal Account Handling + addLogEntry("Handling a Personal Shared Item JSON object", ["debug"]); + + if (hasSharedElement(onedriveJSONItem)) { + // Has the Shared JSON structure + addLogEntry("Personal Shared Item JSON object has the 'shared' JSON structure", ["debug"]); + + // Create a DB Tie Record for this parent object + addLogEntry("Creating a DB Tie for this Personal Shared Folder", ["debug"]); + + // DB Tie + Item parentItem; + parentItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str; + parentItem.id = onedriveJSONItem["parentReference"]["id"].str; + parentItem.name = "root"; + parentItem.type = ItemType.dir; + parentItem.mtime = remoteItem.mtime; + parentItem.parentId = null; + + // Add this DB Tie parent record to the local database + addLogEntry("Insert local database with remoteItem parent details: " ~ to!string(parentItem), ["debug"]); + itemDB.upsert(parentItem); } + + // Ensure that this item has no parent + addLogEntry("Setting remoteItem.parentId to be null", ["debug"]); + remoteItem.parentId = null; + // Add this record to the local database + addLogEntry("Update/Insert local database with remoteItem details with remoteItem.parentId as null: " ~ to!string(remoteItem), ["debug"]); + itemDB.upsert(remoteItem); } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // Business or SharePoint Account Handling + addLogEntry("Handling a Business or SharePoint Shared Item JSON object", ["debug"]); + + if (appConfig.accountType == "business") { + // Create a DB Tie Record for this parent object + addLogEntry("Creating a DB Tie for this Business Shared Folder", ["debug"]); + + // DB Tie + Item parentItem; + parentItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str; + parentItem.id = onedriveJSONItem["parentReference"]["id"].str; + parentItem.name = "root"; + parentItem.type = ItemType.dir; + parentItem.mtime = remoteItem.mtime; + parentItem.parentId = null; + + // Add this DB Tie parent record to the local database + addLogEntry("Insert local database with remoteItem parent details: " ~ to!string(parentItem), ["debug"]); + itemDB.upsert(parentItem); + + // Ensure that this item has no parent + addLogEntry("Setting remoteItem.parentId to be null", ["debug"]); + remoteItem.parentId = null; + + // Check the DB for 'remote' objects, searching 'remoteDriveId' and 'remoteId' items for this remoteItem.driveId and remoteItem.id + Item remoteDBItem; + itemDB.selectByRemoteId(remoteItem.driveId, remoteItem.id, remoteDBItem); + + // Must compare remoteDBItem.name with remoteItem.name + if ((!remoteDBItem.name.empty) && (remoteDBItem.name != remoteItem.name)) { + // Update DB Item + addLogEntry("The shared item stored in OneDrive, has a different name to the actual name on the remote drive", ["debug"]); + addLogEntry("Updating remoteItem.name JSON data with the actual name being used on account drive and local folder", ["debug"]); + addLogEntry("remoteItem.name was: " ~ remoteItem.name, ["debug"]); + addLogEntry("Updating remoteItem.name to: " ~ remoteDBItem.name, ["debug"]); + remoteItem.name = remoteDBItem.name; + addLogEntry("Setting remoteItem.remoteName to: " ~ onedriveJSONItem["name"].str, ["debug"]); + + // Update JSON Item + remoteItem.remoteName = onedriveJSONItem["name"].str; + addLogEntry("Updating source JSON 'name' to that which is the actual local directory", ["debug"]); + addLogEntry("onedriveJSONItem['name'] was: " ~ onedriveJSONItem["name"].str, ["debug"]); + addLogEntry("Updating onedriveJSONItem['name'] to: " ~ remoteDBItem.name, ["debug"]); + onedriveJSONItem["name"] = remoteDBItem.name; + addLogEntry("onedriveJSONItem['name'] now: " ~ onedriveJSONItem["name"].str, ["debug"]); + + // Update newItemPath value + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ remoteDBItem.name; + addLogEntry("New Item updated calculated full path is: " ~ newItemPath, ["debug"]); + } + + // Add this record to the local database + addLogEntry("Update/Insert local database with remoteItem details: " ~ to!string(remoteItem), ["debug"]); + itemDB.upsert(remoteItem); + } else { + // Sharepoint account type + addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED ........ ", ["debug"]); + + } } } - } else { - log.vdebug("Using /delta call to query drive for items to populate 'changes' and 'changesAvailable'"); - // query for changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - try { - // Fetch the changes relative to the path id we want to query - log.vdebug("Attempting query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)'"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("Previous deltaLink: ", deltaLink); - // changes with or without deltaLink - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - if (changes.type() == JSONType.object) { - log.vdebug("Query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)' performed successfully"); - log.vdebug("OneDrive API /delta response: ", changes); - } - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)"); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - return; - } - - // HTTP request returned status code 410 (The requested resource is no longer available at the server) - if (e.httpStatusCode == 410) { - log.vdebug("Delta link expired for 'onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)', setting 'deltaLink = null'"); - deltaLink = null; - continue; - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query changes from OneDrive using deltaLink"); - } - - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + } + + // Check the skippedItems array for the parent id of this JSONItem if this is something we need to skip + if (!unwanted) { + if (thisItemParentId in skippedItems) { + // Flag this JSON item as unwanted + addLogEntry("Flagging as unwanted: find(thisItemParentId).length != 0", ["debug"]); + unwanted = true; - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // If an error is returned when querying 'changes' and we recall the original function, we go into a never ending loop where the sync never ends - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - log.vdebug("Retrying Query - using original deltaLink after delay"); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)"); - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - log.vdebug("Query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink) on re-try after delay"); - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink) previously threw an error - retrying with empty deltaLink"); - try { - // try query with empty deltaLink value - deltaLink = null; - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - log.vdebug("Query 'changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // Tried 3 times, give up - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // Is this item id in the database? + if (existingDBEntry) { + // item exists in database, most likely moved out of scope for current client configuration + addLogEntry("This item was previously synced / seen by the client", ["debug"]); + + if (("name" in onedriveJSONItem["parentReference"]) != null) { + + // How is this out of scope? + // is sync_list configured + if (syncListConfigured) { + // sync_list configured and in use + if (selectiveSync.isPathExcludedViaSyncList(onedriveJSONItem["parentReference"]["name"].str)) { + // Previously synced item is now out of scope as it has been moved out of what is included in sync_list + addLogEntry("This previously synced item is now excluded from being synced due to sync_list exclusion", ["debug"]); } - } else { - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; } + // flag to delete local file as it now is no longer in sync with OneDrive + addLogEntry("Flagging to delete item locally: ", ["debug"]); + idsToDelete ~= [thisItemDriveId, thisItemId]; } + } + } + } + + // Check the item type - if it not an item type that we support, we cant process the JSON item + if (!unwanted) { + if (isItemFile(onedriveJSONItem)) { + addLogEntry("The item we are syncing is a file", ["debug"]); + } else if (isItemFolder(onedriveJSONItem)) { + addLogEntry("The item we are syncing is a folder", ["debug"]); + } else if (isItemRemote(onedriveJSONItem)) { + addLogEntry("The item we are syncing is a remote item", ["debug"]); + } else { + // Why was this unwanted? + if (newItemPath.empty) { + // Compute this item path & need the full path for this file + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); + } + // Microsoft OneNote container objects present as neither folder or file but has file size + if ((!isItemFile(onedriveJSONItem)) && (!isItemFolder(onedriveJSONItem)) && (hasFileSize(onedriveJSONItem))) { + // Log that this was skipped as this was a Microsoft OneNote item and unsupported + addLogEntry("The Microsoft OneNote Notebook '" ~ newItemPath ~ "' is not supported by this client", ["verbose"]); } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // Issue #1174 handling where stored deltaLink is invalid - if ((e.httpStatusCode == 400) && (deltaLink != "")) { - // Set deltaLink to an empty entry so invalid URL is not reused - string emptyDeltaLink = ""; - itemdb.setDeltaLink(driveId, idToQuery, emptyDeltaLink); - } - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + // Log that this item was skipped as unsupported + addLogEntry("The OneDrive item '" ~ newItemPath ~ "' is not supported by this client", ["verbose"]); } + unwanted = true; + addLogEntry("Flagging as unwanted: item type is not supported", ["debug"]); } - - // query for changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - try { - // Fetch the changes relative to the path id we want to query - log.vdebug("Attempting query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)'"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("deltaLinkAvailable: ", deltaLinkAvailable); - // changes based on deltaLink - changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - if (changesAvailable.type() == JSONType.object) { - log.vdebug("Query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)' performed successfully"); - // are there any delta changes? - if (("value" in changesAvailable) != null) { - deltaChanges = count(changesAvailable["value"].array); - log.vdebug("changesAvailable query reports that there are " , deltaChanges , " changes that need processing on OneDrive"); + } + + // Check if this is excluded by config option: skip_dir + if (!unwanted) { + // Only check path if config is != "" + if (!appConfig.getValueString("skip_dir").empty) { + // Is the item a folder? + if (isItemFolder(onedriveJSONItem)) { + // work out the 'snippet' path where this folder would be created + string simplePathToCheck = ""; + string complexPathToCheck = ""; + string matchDisplay = ""; + + if (hasParentReference(onedriveJSONItem)) { + // we need to workout the FULL path for this item + // simple path + if (("name" in onedriveJSONItem["parentReference"]) != null) { + simplePathToCheck = onedriveJSONItem["parentReference"]["name"].str ~ "/" ~ onedriveJSONItem["name"].str; + } else { + simplePathToCheck = onedriveJSONItem["name"].str; + } + addLogEntry("skip_dir path to check (simple): " ~ simplePathToCheck, ["debug"]); + + // complex path + if (parentInDatabase) { + // build up complexPathToCheck + complexPathToCheck = buildNormalizedPath(newItemPath); + } else { + addLogEntry("Parent details not in database - unable to compute complex path to check", ["debug"]); + } + if (!complexPathToCheck.empty) { + addLogEntry("skip_dir path to check (complex): " ~ complexPathToCheck, ["debug"]); + } + } else { + simplePathToCheck = onedriveJSONItem["name"].str; + } + + // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder + // then isDirNameExcluded matching will not work + // Clean up 'root:' if present + if (startsWith(simplePathToCheck, "root:")){ + addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]); + simplePathToCheck = strip(simplePathToCheck, "root:"); + } + if (startsWith(complexPathToCheck, "root:")){ + addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]); + complexPathToCheck = strip(complexPathToCheck, "root:"); + } + + // OK .. what checks are we doing? + if ((!simplePathToCheck.empty) && (complexPathToCheck.empty)) { + // just a simple check + addLogEntry("Performing a simple check only", ["debug"]); + unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); + } else { + // simple and complex + addLogEntry("Performing a simple then complex path match if required", ["debug"]); + + // simple first + addLogEntry("Performing a simple check first", ["debug"]); + unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); + matchDisplay = simplePathToCheck; + if (!unwanted) { + // simple didnt match, perform a complex check + addLogEntry("Simple match was false, attempting complex match", ["debug"]); + unwanted = selectiveSync.isDirNameExcluded(complexPathToCheck); + matchDisplay = complexPathToCheck; + } + } + // result + addLogEntry("skip_dir exclude result (directory based): " ~ to!string(unwanted), ["debug"]); + if (unwanted) { + // This path should be skipped + addLogEntry("Skipping item - excluded by skip_dir config: " ~ matchDisplay, ["verbose"]); } } - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)"); - - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - return; - } - - // HTTP request returned status code 410 (The requested resource is no longer available at the server) - if (e.httpStatusCode == 410) { - log.vdebug("Delta link expired for 'onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)', setting 'deltaLinkAvailable = null'"); - deltaLinkAvailable = null; - continue; - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query changes from OneDrive using deltaLinkAvailable"); - } - - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + // Is the item a file? + // We need to check to see if this files path is excluded as well + if (isItemFile(onedriveJSONItem)) { - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // If an error is returned when querying 'changes' and we recall the original function, we go into a never ending loop where the sync never ends - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - log.vdebug("Retrying Query - using original deltaLinkAvailable after delay"); + string pathToCheck; + // does the newItemPath start with '/'? + if (!startsWith(newItemPath, "/")){ + // path does not start with '/', but we need to check skip_dir entries with and without '/' + // so always make sure we are checking a path with '/' + pathToCheck = '/' ~ dirName(newItemPath); + } else { + pathToCheck = dirName(newItemPath); } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)"); - changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - log.vdebug("Query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)' performed successfully on re-try"); - if (changesAvailable.type() == JSONType.object) { - // are there any delta changes? - if (("value" in changesAvailable) != null) { - deltaChanges = count(changesAvailable["value"].array); - log.vdebug("changesAvailable query reports that there are " , deltaChanges , " changes that need processing on OneDrive"); - } - } - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable) on re-try after delay"); - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query for changes - retrying applicable request"); - log.vdebug("changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable) previously threw an error - retrying with empty deltaLinkAvailable"); - // Increase delay and wait again before retry - log.vdebug("Thread sleeping for 90 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(90)); - log.vdebug("Retrying Query - using a null deltaLinkAvailable after delay"); - try { - // try query with empty deltaLinkAvailable value - deltaLinkAvailable = null; - changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable); - log.vdebug("Query 'changesAvailable = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLinkAvailable)' performed successfully on re-try"); - if (changesAvailable.type() == JSONType.object) { - // are there any delta changes? - if (("value" in changesAvailable) != null) { - deltaChanges = count(changesAvailable["value"].array); - log.vdebug("changesAvailable query reports that there are " , deltaChanges , " changes that need processing on OneDrive when using a null deltaLink value"); - } - } - } catch (OneDriveException e) { - // Tried 3 times, give up - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - - // OK .. if this was a 504, and running with --download-only & --cleanup-local-files - // need to exit to preserve local data, otherwise potential files will be deleted that should not be deleted - // leading to undesirable potential data loss scenarios - if ((e.httpStatusCode == 504) && (cleanupLocalFiles)) { - // log why we are exiting - log.log("Exiting application due to OneDrive API Gateway Timeout & --download-only & --cleanup-local-files configured to preserve local data"); - // Must exit here - onedrive.shutdown(); - exit(-1); - } - return; - } - } else { - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } + + // perform the check + unwanted = selectiveSync.isDirNameExcluded(pathToCheck); + // result + addLogEntry("skip_dir exclude result (file based): " ~ to!string(unwanted), ["debug"]); + if (unwanted) { + // this files path should be skipped + addLogEntry("Skipping item - file path is excluded by skip_dir config: " ~ newItemPath, ["verbose"]); } - } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; } } } - // In some OneDrive Business scenarios, the shared folder /delta response lacks the 'root' drive details - // When this occurs, this creates the following error: A database statement execution error occurred: foreign key constraint failed - // Ensure we query independently the root details for this shared folder and ensure that it is added before we process the /delta response - - // However, if we are using a National Cloud Deployment, these deployments do not support /delta, so we generate a /delta response via generateDeltaResponse() - // This specifically adds the root drive details to the self generated /delta response - if ((!nationalCloudDeployment) && (driveId!= defaultDriveId) && (syncBusinessFolders)) { - // fetch this driveId root details to ensure we add this to the database for this remote drive - JSONValue rootData; - - try { - rootData = onedrive.getDriveIdRoot(driveId); - } catch (OneDriveException e) { - log.vdebug("rootData = onedrive.getDriveIdRoot(driveId) generated a OneDriveException"); - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - if (e.httpStatusCode == 429) { - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - retrying applicable request"); - handleOneDriveThrottleRequest(); + // Check if this is excluded by config option: skip_file + if (!unwanted) { + // Is the JSON item a file? + if (isItemFile(onedriveJSONItem)) { + // skip_file can contain 4 types of entries: + // - wildcard - *.txt + // - text + wildcard - name*.txt + // - full path + combination of any above two - /path/name*.txt + // - full path to file - /path/to/file.txt + + // is the parent id in the database? + if (parentInDatabase) { + // Compute this item path & need the full path for this file + if (newItemPath.empty) { + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); } - if (e.httpStatusCode == 504) { - log.vdebug("Retrying original request that generated the HTTP 504 (Gateway Timeout) - retrying applicable request"); - Thread.sleep(dur!"seconds"(30)); + + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched + // However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks + string exclusionTestPath = ""; + if (!startsWith(newItemPath, "/")){ + // Add '/' to the path + exclusionTestPath = '/' ~ newItemPath; } - // Retry original request by calling function again to avoid replicating any further error handling - rootData = onedrive.getDriveIdRoot(driveId); + addLogEntry("skip_file item to check: " ~ exclusionTestPath, ["debug"]); + unwanted = selectiveSync.isFileNameExcluded(exclusionTestPath); + addLogEntry("Result: " ~ to!string(unwanted), ["debug"]); + if (unwanted) addLogEntry("Skipping item - excluded by skip_file config: " ~ thisItemName, ["verbose"]); } else { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + // parent id is not in the database + unwanted = true; + addLogEntry("Skipping file - parent path not present in local database", ["verbose"]); } } - - // apply this root drive data - applyDifference(rootData, driveId, true); } - // Process /delta response from OneDrive - // is changes a valid JSON response - if (changes.type() == JSONType.object) { - // Are there any changes to process? - if ((("value" in changes) != null) && ((deltaChanges > 0) || (oneDriveFullScanTrigger) || (nationalCloudChildrenScan) || (syncBusinessFolders) )) { - auto nrChanges = count(changes["value"].array); - auto changeCount = 0; - - // Display the number of changes or OneDrive objects we are processing - // OneDrive ships 'changes' in ~200 bundles. We display that we are processing X number of objects - // Do not display anything unless we are doing a verbose debug as due to #658 we are essentially doing a --resync each time when using sync_list - - // performance logging output - if (displayProcessingTime) { - writeln("Number of 'change|item' in this API Response Bundle from OneDrive to process: ", nrChanges); + // Check if this is included or excluded by use of sync_list + if (!unwanted) { + // No need to try and process something against a sync_list if it has been configured + if (syncListConfigured) { + // Compute the item path if empty - as to check sync_list we need an actual path to check + if (newItemPath.empty) { + // Calculate this items path + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); } - // is nrChanges >= min_notify_changes (default of min_notify_changes = 5) - if (nrChanges >= cfg.getValueLong("min_notify_changes")) { - // nrChanges is >= than min_notify_changes - // verbose log, no 'notify' .. it is over the top - if (!syncListConfigured) { - // sync_list is not being used - lets use the right messaging here - if (oneDriveFullScanTrigger) { - // full scan was triggered out of cycle - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being triggered by actions on OneDrive"); - // unset now the full scan trigger if set - unsetOneDriveFullScanTrigger(); - } else { - // no sync_list in use, oneDriveFullScanTrigger not set via sync_list or skip_dir - if (performFullItemScan){ - // performFullItemScan was set - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being requested"); - } else { - // default processing message - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state"); - } - } + // What path are we checking? + addLogEntry("sync_list item to check: " ~ newItemPath, ["debug"]); + + // Unfortunatly there is no avoiding this call to check if the path is excluded|included via sync_list + if (selectiveSync.isPathExcludedViaSyncList(newItemPath)) { + // selective sync advised to skip, however is this a file and are we configured to upload / download files in the root? + if ((isItemFile(onedriveJSONItem)) && (appConfig.getValueBool("sync_root_files")) && (rootName(newItemPath) == "") ) { + // This is a file + // We are configured to sync all files in the root + // This is a file in the logical root + unwanted = false; } else { - // sync_list is being used - why are we going through the entire OneDrive contents? - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to sync_list being used"); - } - } else { - // There are valid changes but less than the min_notify_changes configured threshold - // We will only output the number of changes being processed to debug log if this is set to assist with debugging - // As this is debug logging, messaging can be the same, regardless of sync_list being used or not - - // is performFullItemScan set due to a full scan required? - // is oneDriveFullScanTrigger set due to a potentially out-of-scope item now being in-scope - if ((performFullItemScan) || (oneDriveFullScanTrigger)) { - // oneDriveFullScanTrigger should be false unless set by actions on OneDrive and only if sync_list or skip_dir is used - log.vdebug("performFullItemScan or oneDriveFullScanTrigger = true"); - // full scan was requested or triggered - // use the right message - if (oneDriveFullScanTrigger) { - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being triggered by actions on OneDrive"); - // unset now the full scan trigger if set - unsetOneDriveFullScanTrigger(); - } else { - log.vlog("Processing ", nrChanges, " OneDrive items to ensure consistent local state due to a full scan being requested"); + // path is unwanted + unwanted = true; + addLogEntry("Skipping item - excluded by sync_list config: " ~ newItemPath, ["verbose"]); + // flagging to skip this item now, but does this exist in the DB thus needs to be removed / deleted? + if (existingDBEntry) { + // flag to delete + addLogEntry("Flagging item for local delete as item exists in database: " ~ newItemPath, ["verbose"]); + idsToDelete ~= [thisItemDriveId, thisItemId]; } - } else { - // standard message - log.vlog("Number of items from OneDrive to process: ", nrChanges); } } - - // Add nrChanges to cumulativeOneDriveItemCount so we can detail how may items in total were processed - cumulativeOneDriveItemCount = cumulativeOneDriveItemCount + nrChanges; - - foreach (item; changes["value"].array) { - bool isRoot = false; - string thisItemParentPath; - string thisItemFullPath; - changeCount++; - - // Change as reported by OneDrive - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Processing change ", changeCount, " of ", nrChanges); - log.vdebug("OneDrive Change: ", item); - - // Deleted items returned from onedrive.viewChangesByItemId or onedrive.viewChangesByDriveId (/delta) do not have a 'name' attribute - // Thus we cannot name check for 'root' below on deleted items - if(!isItemDeleted(item)){ - // This is not a deleted item - log.vdebug("Not a OneDrive deleted item change"); - // Test is this is the OneDrive Users Root? - // Debug output of change evaluation items - log.vdebug("defaultRootId = ", defaultRootId); - log.vdebug("'search id' = ", id); - log.vdebug("id == defaultRootId = ", (id == defaultRootId)); - log.vdebug("isItemRoot(item) = ", (isItemRoot(item))); - log.vdebug("item['name'].str == 'root' = ", (item["name"].str == "root")); - log.vdebug("singleDirectoryScope = ", (singleDirectoryScope)); - - // Use the global's as initialised via init() rather than performing unnecessary additional HTTPS calls - // In a --single-directory scenario however, '(id == defaultRootId) = false' for root items - if ( ((id == defaultRootId) || (singleDirectoryScope)) && (isItemRoot(item)) && (item["name"].str == "root")) { - // This IS a OneDrive Root item - log.vdebug("Change will flagged as a 'root' item change"); - isRoot = true; - } - } - - // How do we handle this change? - if (isRoot || !hasParentReferenceId(item) || isItemDeleted(item)){ - // Is a root item, has no id in parentReference or is a OneDrive deleted item - log.vdebug("isRoot = ", isRoot); - log.vdebug("!hasParentReferenceId(item) = ", (!hasParentReferenceId(item))); - log.vdebug("isItemDeleted(item) = ", (isItemDeleted(item))); - log.vdebug("Handling change as 'root item', has no parent reference or is a deleted item"); - applyDifference(item, driveId, isRoot); - } else { - // What is this item's parent path? - if (hasParentReferencePath(item)) { - thisItemParentPath = item["parentReference"]["path"].str; - thisItemFullPath = thisItemParentPath ~ "/" ~ item["name"].str; - } else { - thisItemParentPath = ""; - } - - // Special case handling flags - bool singleDirectorySpecialCase = false; - bool sharedFoldersSpecialCase = false; - - // Debug output of change evaluation items - log.vdebug("'parentReference id' = ", item["parentReference"]["id"].str); - log.vdebug("search criteria: syncFolderName = ", syncFolderName); - log.vdebug("search criteria: syncFolderPath = ", syncFolderPath); - log.vdebug("search criteria: syncFolderChildPath = ", syncFolderChildPath); - log.vdebug("thisItemId = ", item["id"].str); - log.vdebug("thisItemParentPath = ", thisItemParentPath); - log.vdebug("thisItemFullPath = ", thisItemFullPath); - log.vdebug("'item id' matches search 'id' = ", (item["id"].str == id)); - log.vdebug("'parentReference id' matches search 'id' = ", (item["parentReference"]["id"].str == id)); - log.vdebug("'thisItemParentPath' contains 'syncFolderChildPath' = ", (canFind(thisItemParentPath, syncFolderChildPath))); - log.vdebug("'thisItemParentPath' contains search 'id' = ", (canFind(thisItemParentPath, id))); - - // Special case handling - --single-directory - // If we are in a --single-directory sync scenario, and, the DB does not contain any parent details, or --single-directory is used with --resync - // all changes will be discarded as 'Remote change discarded - not in --single-directory sync scope (not in DB)' even though, some of the changes - // are actually valid and required as they are part of the parental path - if (singleDirectoryScope){ - // What is the full path for this item from OneDrive - log.vdebug("'syncFolderChildPath' contains 'thisItemFullPath' = ", (canFind(syncFolderChildPath, thisItemFullPath))); - if (canFind(syncFolderChildPath, thisItemFullPath)) { - singleDirectorySpecialCase = true; - } - } - - // Special case handling - Shared Business Folders - // - IF we are syncing shared folders, and the shared folder is not the 'top level' folder being shared out - // canFind(thisItemParentPath, syncFolderChildPath) will never match: - // Syncing this OneDrive Business Shared Folder: MyFolderName - // OneDrive Business Shared By: Firstname Lastname (email@address) - // Applying changes of Path ID: pathId - // [DEBUG] Sync Folder Name: MyFolderName - // [DEBUG] Sync Folder Path: /drives/driveId/root:/TopLevel/ABCD - // [DEBUG] Sync Folder Child Path: /drives/driveId/root:/TopLevel/ABCD/MyFolderName/ - // ... - // [DEBUG] 'item id' matches search 'id' = false - // [DEBUG] 'parentReference id' matches search 'id' = false - // [DEBUG] 'thisItemParentPath' contains 'syncFolderChildPath' = false - // [DEBUG] 'thisItemParentPath' contains search 'id' = false - // [DEBUG] Change does not match any criteria to apply - // Remote change discarded - not in business shared folders sync scope - - if ((!canFind(thisItemParentPath, syncFolderChildPath)) && (syncBusinessFolders)) { - // Syncing Shared Business folders & we dont have a path match - // is this a reverse path match? - log.vdebug("'thisItemParentPath' contains 'syncFolderName' = ", (canFind(thisItemParentPath, syncFolderName))); - if (canFind(thisItemParentPath, syncFolderName)) { - sharedFoldersSpecialCase = true; - } - } - - // Check this item's path to see if this is a change on the path we want: - // 1. 'item id' matches 'id' - // 2. 'parentReference id' matches 'id' - // 3. 'item path' contains 'syncFolderChildPath' - // 4. 'item path' contains 'id' - // 5. Special Case was triggered - if ( (item["id"].str == id) || (item["parentReference"]["id"].str == id) || (canFind(thisItemParentPath, syncFolderChildPath)) || (canFind(thisItemParentPath, id)) || (singleDirectorySpecialCase) || (sharedFoldersSpecialCase) ){ - // This is a change we want to apply - if ((!singleDirectorySpecialCase) && (!sharedFoldersSpecialCase)) { - log.vdebug("Change matches search criteria to apply"); - } else { - if (singleDirectorySpecialCase) log.vdebug("Change matches search criteria to apply - special case criteria - reverse path matching used (--single-directory)"); - if (sharedFoldersSpecialCase) log.vdebug("Change matches search criteria to apply - special case criteria - reverse path matching used (Shared Business Folders)"); - } - // Apply OneDrive change - applyDifference(item, driveId, isRoot); - } else { - // No item ID match or folder sync match - log.vdebug("Change does not match any criteria to apply"); - - // Before discarding change - does this ID still exist on OneDrive - as in IS this - // potentially a --single-directory sync and the user 'moved' the file out of the 'sync-dir' to another OneDrive folder - // This is a corner edge case - https://github.com/skilion/onedrive/issues/341 - - // What is the original local path for this ID in the database? Does it match 'syncFolderChildPath' - if (itemdb.idInLocalDatabase(driveId, item["id"].str)){ - // item is in the database - string originalLocalPath = computeItemPath(driveId, item["id"].str); - - if (canFind(originalLocalPath, syncFolderChildPath)){ - JSONValue oneDriveMovedNotDeleted; - try { - oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item["id"].str); - } catch (OneDriveException e) { - log.vdebug("oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item['id'].str); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // No .. that ID is GONE - log.vlog("Remote change discarded - item cannot be found"); - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry request after delay - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item['id'].str);"); - try { - oneDriveMovedNotDeleted = onedrive.getPathDetailsById(driveId, item["id"].str); - } catch (OneDriveException e) { - // A further error was generated - // Rather than retry original function, retry the actual call and replicate error handling - if (e.httpStatusCode == 404) { - // No .. that ID is GONE - log.vlog("Remote change discarded - item cannot be found"); - } else { - // not a 404 - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - } - } else { - // not a 404 or a 429 - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - } - - // Yes .. ID is still on OneDrive but elsewhere .... #341 edge case handling - // This 'change' relates to an item that WAS in 'syncFolderChildPath' but is now - // stored elsewhere on OneDrive - outside the path we are syncing from - // Remove this item locally as it's local path is now obsolete - idsToDelete ~= [driveId, item["id"].str]; - } else { - // out of scope for some other reason - if (singleDirectoryScope){ - log.vlog("Remote change discarded - not in --single-directory sync scope (in DB)"); - } else { - log.vlog("Remote change discarded - not in sync scope"); - } - log.vdebug("Remote change discarded: ", item); - } - } else { - // item is not in the database - if (singleDirectoryScope){ - // We are syncing a single directory, so this is the reason why it is out of scope - log.vlog("Remote change discarded - not in --single-directory sync scope (not in DB)"); - log.vdebug("Remote change discarded: ", item); - } else { - // Not a single directory sync - if (syncBusinessFolders) { - // if we are syncing shared business folders, a 'change' may be out of scope as we are not syncing that 'folder' - // but we are sent all changes from the 'parent root' as we cannot query the 'delta' for this folder - // as that is a 501 error - not implemented - log.vlog("Remote change discarded - not in business shared folders sync scope"); - log.vdebug("Remote change discarded: ", item); - } else { - // out of scope for some other reason - log.vlog("Remote change discarded - not in sync scope"); - log.vdebug("Remote change discarded: ", item); - } - } - } - } + } + } + + // Check if the user has configured to skip downloading .files or .folders: skip_dotfiles + if (!unwanted) { + if (appConfig.getValueBool("skip_dotfiles")) { + if (isDotFile(newItemPath)) { + addLogEntry("Skipping item - .file or .folder: " ~ newItemPath, ["verbose"]); + unwanted = true; + } + } + } + + // Check if this should be skipped due to a --check-for-nosync directive (.nosync)? + if (!unwanted) { + if (appConfig.getValueBool("check_nosync")) { + // need the parent path for this object + string parentPath = dirName(newItemPath); + // Check for the presence of a .nosync in the parent path + if (exists(parentPath ~ "/.nosync")) { + addLogEntry("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: " ~ newItemPath, ["verbose"]); + unwanted = true; + } + } + } + + // Check if this is excluded by a user set maximum filesize to download + if (!unwanted) { + if (isItemFile(onedriveJSONItem)) { + if (fileSizeLimit != 0) { + if (onedriveJSONItem["size"].integer >= fileSizeLimit) { + addLogEntry("Skipping item - excluded by skip_size config: " ~ thisItemName ~ " (" ~ to!string(onedriveJSONItem["size"].integer/2^^20) ~ " MB)", ["verbose"]); } } - } else { - // No changes reported on OneDrive - log.vdebug("OneDrive Reported no delta changes - Local path and OneDrive in-sync"); - } - - // the response may contain either @odata.deltaLink or @odata.nextLink - if ("@odata.deltaLink" in changes) { - deltaLink = changes["@odata.deltaLink"].str; - log.vdebug("Setting next deltaLink to (@odata.deltaLink): ", deltaLink); - } - if (deltaLink != "") { - // we initialise deltaLink to a blank string - if it is blank, dont update the DB to be empty - log.vdebug("Updating completed deltaLink in DB to: ", deltaLink); - itemdb.setDeltaLink(driveId, id, deltaLink); - } - - // Processing Timing for this bundle - if (displayProcessingTime) { - endBundleProcessingTime = Clock.currTime(); - writeln("End 'change|item' API Response Bundle Processing Time: ", endBundleProcessingTime); - writeln("Elapsed Processing Time: ", (endBundleProcessingTime - startBundleProcessingTime)); } + } + + // At this point all the applicable checks on this JSON object from OneDrive are complete: + // - skip_file + // - skip_dir + // - sync_list + // - skip_dotfiles + // - check_nosync + // - skip_size + // - We know if this item exists in the DB or not in the DB + + // We know if this JSON item is unwanted or not + if (unwanted) { + // This JSON item is NOT wanted - it is excluded + addLogEntry("Skipping OneDrive change as this is determined to be unwanted", ["debug"]); - if ("@odata.nextLink" in changes) { - // Update deltaLink to next changeSet bundle - deltaLink = changes["@odata.nextLink"].str; - // Update deltaLinkAvailable to next changeSet bundle to quantify how many changes we have to process - deltaLinkAvailable = changes["@odata.nextLink"].str; - log.vdebug("Setting next deltaLink & deltaLinkAvailable to (@odata.nextLink): ", deltaLink); + // Add to the skippedItems array, but only if it is a directory ... pointless adding 'files' here, as it is the 'id' we check as the parent path which can only be a directory + if (!isItemFile(onedriveJSONItem)) { + skippedItems.insert(thisItemId); } - else break; } else { - // Log that an invalid JSON object was returned - if ((driveId == defaultDriveId) || (!syncBusinessFolders)) { - log.vdebug("onedrive.viewChangesByItemId call returned an invalid JSON Object"); - } else { - log.vdebug("onedrive.viewChangesByDriveId call returned an invalid JSON Object"); - } - } - } - - // delete items in idsToDelete - if (idsToDelete.length > 0) deleteItems(); - // empty the skipped items - skippedItems.length = 0; - assumeSafeAppend(skippedItems); - - // Processing timing and metrics for everything that was processed - if (displayProcessingTime) { - endFunctionProcessingTime = Clock.currTime(); - // complete the bundle output - writeln("------------------------------------------------------------"); - writeln("Start Function Processing Time: ", startFunctionProcessingTime); - writeln("End Function Processing Time: ", endFunctionProcessingTime); - writeln("Elapsed Function Processing Time: ", (endFunctionProcessingTime - startFunctionProcessingTime)); - writeln("Total number of OneDrive items processed: ", cumulativeOneDriveItemCount); - writeln("============================================================"); - } - } - - // process the change of a single DriveItem - private void applyDifference(JSONValue driveItem, string driveId, bool isRoot) - { - // Format the OneDrive change into a consumable object for the database - Item item = makeItem(driveItem); - - // Reset the malwareDetected flag for this item - malwareDetected = false; - - // Reset the downloadFailed flag for this item - downloadFailed = false; - - // Path we will be using - string path = ""; - - if(isItemDeleted(driveItem)){ - // Change is to delete an item - log.vdebug("Remote deleted item"); - } else { - // Is the change from OneDrive a 'root' item - // The change should be considered a 'root' item if: - // 1. Contains a ["root"] element - // 2. Has no ["parentReference"]["id"] ... #323 & #324 highlighted that this is false as some 'root' shared objects now can have an 'id' element .. OneDrive API change - // 2. Has no ["parentReference"]["path"] - // 3. Was detected by an input flag as to be handled as a root item regardless of actual status - if (isItemRoot(driveItem) || !hasParentReferencePath(driveItem) || isRoot) { - log.vdebug("Handing a OneDrive 'root' change"); - item.parentId = null; // ensures that it has no parent - item.driveId = driveId; // HACK: makeItem() cannot set the driveId property of the root - log.vdebug("Update/Insert local database with item details"); - itemdb.upsert(item); - log.vdebug("item details: ", item); - return; - } - } - - bool unwanted; - // Check if the parent id is something we need to skip - if (skippedItems.find(item.parentId).length != 0) { - // Potentially need to flag as unwanted - log.vdebug("Flagging as unwanted: find(item.parentId).length != 0"); - unwanted = true; - - // Is this item id in the database? - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ - // item exists in database, most likely moved out of scope for current client configuration - log.vdebug("This item was previously synced / seen by the client"); - if (("name" in driveItem["parentReference"]) != null) { - // How is this out of scope? - // is sync_list configured - if (syncListConfigured) { - // sync_list configured and in use - if (selectiveSync.isPathExcludedViaSyncList(driveItem["parentReference"]["name"].str)) { - // Previously synced item is now out of scope as it has been moved out of what is included in sync_list - log.vdebug("This previously synced item is now excluded from being synced due to sync_list exclusion"); - } - } - // flag to delete local file as it now is no longer in sync with OneDrive - log.vdebug("Flagging to delete item locally"); - idsToDelete ~= [item.driveId, item.id]; - } - } - } - - // Check if this is excluded by config option: skip_dir - if (!unwanted) { - // Only check path if config is != "" - if (cfg.getValueString("skip_dir") != "") { - // Is the item a folder and not a deleted item? - if ((isItemFolder(driveItem)) && (!isItemDeleted(driveItem))) { - // work out the 'snippet' path where this folder would be created - string simplePathToCheck = ""; - string complexPathToCheck = ""; - string matchDisplay = ""; - - if (hasParentReference(driveItem)) { - // we need to workout the FULL path for this item - string parentDriveId = driveItem["parentReference"]["driveId"].str; - string parentItem = driveItem["parentReference"]["id"].str; - // simple path - if (("name" in driveItem["parentReference"]) != null) { - simplePathToCheck = driveItem["parentReference"]["name"].str ~ "/" ~ driveItem["name"].str; - } else { - simplePathToCheck = driveItem["name"].str; - } - log.vdebug("skip_dir path to check (simple): ", simplePathToCheck); - // complex path - if (itemdb.idInLocalDatabase(parentDriveId, parentItem)){ - // build up complexPathToCheck - complexPathToCheck = computeItemPath(parentDriveId, parentItem) ~ "/" ~ driveItem["name"].str; - complexPathToCheck = buildNormalizedPath(complexPathToCheck); - } else { - log.vdebug("Parent details not in database - unable to compute complex path to check"); - } - log.vdebug("skip_dir path to check (complex): ", complexPathToCheck); - } else { - simplePathToCheck = driveItem["name"].str; - } - - // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder - // then isDirNameExcluded matching will not work - // Clean up 'root:' if present - if (startsWith(simplePathToCheck, "root:")){ - log.vdebug("Updating simplePathToCheck to remove 'root:'"); - simplePathToCheck = strip(simplePathToCheck, "root:"); - } - if (startsWith(complexPathToCheck, "root:")){ - log.vdebug("Updating complexPathToCheck to remove 'root:'"); - complexPathToCheck = strip(complexPathToCheck, "root:"); - } - - // OK .. what checks are we doing? - if ((simplePathToCheck != "") && (complexPathToCheck == "")) { - // just a simple check - log.vdebug("Performing a simple check only"); - unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); - } else { - // simple and complex - log.vdebug("Performing a simple & complex path match if required"); - // simple first - unwanted = selectiveSync.isDirNameExcluded(simplePathToCheck); - matchDisplay = simplePathToCheck; - if (!unwanted) { - log.vdebug("Simple match was false, attempting complex match"); - // simple didnt match, perform a complex check - unwanted = selectiveSync.isDirNameExcluded(complexPathToCheck); - matchDisplay = complexPathToCheck; - } - } - - log.vdebug("Result: ", unwanted); - if (unwanted) log.vlog("Skipping item - excluded by skip_dir config: ", matchDisplay); - } - } - } - - // Check if this is excluded by config option: skip_file - if (!unwanted) { - // Is the item a file and not a deleted item? - if ((isItemFile(driveItem)) && (!isItemDeleted(driveItem))) { - // skip_file can contain 4 types of entries: - // - wildcard - *.txt - // - text + wildcard - name*.txt - // - full path + combination of any above two - /path/name*.txt - // - full path to file - /path/to/file.txt + // This JSON item is wanted - we need to process this JSON item further + // Take the JSON item and create a consumable object for eventual database insertion + Item newDatabaseItem = makeItem(onedriveJSONItem); - // is the parent id in the database? - if (itemdb.idInLocalDatabase(item.driveId, item.parentId)){ - // Compute this item path & need the full path for this file - path = computeItemPath(item.driveId, item.parentId) ~ "/" ~ item.name; - - // The path that needs to be checked needs to include the '/' - // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched - // However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks - string exclusionTestPath = ""; - if (!startsWith(path, "/")){ - // Add '/' to the path - exclusionTestPath = '/' ~ path; - } + if (existingDBEntry) { + // The details of this JSON item are already in the DB + // Is the item in the DB the same as the JSON data provided - or is the JSON data advising this is an updated file? + addLogEntry("OneDrive change is an update to an existing local item", ["debug"]); - log.vdebug("skip_file item to check: ", exclusionTestPath); - unwanted = selectiveSync.isFileNameExcluded(exclusionTestPath); - log.vdebug("Result: ", unwanted); - if (unwanted) log.vlog("Skipping item - excluded by skip_file config: ", item.name); - } else { - // parent id is not in the database - unwanted = true; - log.vlog("Skipping file - parent path not present in local database"); - } - } - } - - // check the item type - if (!unwanted) { - if (isItemFile(driveItem)) { - log.vdebug("The item we are syncing is a file"); - } else if (isItemFolder(driveItem)) { - log.vdebug("The item we are syncing is a folder"); - } else if (isItemRemote(driveItem)) { - log.vdebug("The item we are syncing is a remote item"); - assert(isItemFolder(driveItem["remoteItem"]), "The remote item is not a folder"); - } else { - // Why was this unwanted? - if (path.empty) { - // Compute this item path & need the full path for this file - path = computeItemPath(item.driveId, item.parentId) ~ "/" ~ item.name; - } - // Microsoft OneNote container objects present as neither folder or file but has file size - if ((!isItemFile(driveItem)) && (!isItemFolder(driveItem)) && (hasFileSize(driveItem))) { - // Log that this was skipped as this was a Microsoft OneNote item and unsupported - log.vlog("The Microsoft OneNote Notebook '", path, "' is not supported by this client"); + // Compute the existing item path + // NOTE: + // string existingItemPath = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.id); + // + // This will calculate the path as follows: + // + // existingItemPath: Document.txt + // + // Whereas above we use the following + // + // newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name; + // + // Which generates the following path: + // + // changedItemPath: ./Document.txt + // + // Need to be consistent here with how 'newItemPath' was calculated + string existingItemPath = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name; + // Attempt to apply this changed item + applyPotentiallyChangedItem(existingDatabaseItem, existingItemPath, newDatabaseItem, newItemPath, onedriveJSONItem); } else { - // Log that this item was skipped as unsupported - log.vlog("The OneDrive item '", path, "' is not supported by this client"); + // Action this JSON item as a new item as we have no DB record of it + // The actual item may actually exist locally already, meaning that just the database is out-of-date or missing the data due to --resync + // But we also cannot compute the newItemPath as the parental objects may not exist as well + addLogEntry("OneDrive change is potentially a new local item", ["debug"]); + + // Attempt to apply this potentially new item + applyPotentiallyNewLocalItem(newDatabaseItem, onedriveJSONItem, newItemPath); } - unwanted = true; - log.vdebug("Flagging as unwanted: item type is not supported"); } + + // Tracking as to if this item was processed + processedCount++; } - - // Check if this is included by use of sync_list - if (!unwanted) { - // Is the item parent in the local database? - if (itemdb.idInLocalDatabase(item.driveId, item.parentId)){ - // parent item is in the local database - // compute the item path if empty - if (path.empty) { - path = computeItemPath(item.driveId, item.parentId) ~ "/" ~ item.name; - } - // what path are we checking - log.vdebug("sync_list item to check: ", path); - - // Unfortunatly there is no avoiding this call to check if the path is excluded|included via sync_list - if (selectiveSync.isPathExcludedViaSyncList(path)) { - // selective sync advised to skip, however is this a file and are we configured to upload / download files in the root? - if ((isItemFile(driveItem)) && (cfg.getValueBool("sync_root_files")) && (rootName(path) == "") ) { - // This is a file - // We are configured to sync all files in the root - // This is a file in the logical root - unwanted = false; - } else { - // path is unwanted - unwanted = true; - log.vlog("Skipping item - excluded by sync_list config: ", path); - // flagging to skip this file now, but does this exist in the DB thus needs to be removed / deleted? - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ - log.vlog("Flagging item for local delete as item exists in database: ", path); - // flag to delete - idsToDelete ~= [item.driveId, item.id]; - } - } - } - } else { - // Parent not in the database - // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? - if (defaultDriveId == item.driveId){ - // Flagging as unwanted - log.vdebug("Flagging as unwanted: item.driveId (", item.driveId,"), item.parentId (", item.parentId,") not in local database"); - unwanted = true; + } + + // Perform the download of any required objects in parallel + void processDownloadActivities() { + + // Are there any items to delete locally? Cleanup space locally first + if (!idsToDelete.empty) { + // There are elements that potentially need to be deleted locally + addLogEntry("Items to potentially delete locally: " ~ to!string(idsToDelete.length), ["verbose"]); + + if (appConfig.getValueBool("download_only")) { + // Download only has been configured + if (cleanupLocalFiles) { + // Process online deleted items + addLogEntry("Processing local deletion activity as --download-only & --cleanup-local-files configured", ["verbose"]); + processDeleteItems(); } else { - // Edge case as the parent (from another users OneDrive account) will never be in the database - log.vdebug("The reported parentId is not in the database. This potentially is a shared folder as 'item.driveId' != 'defaultDriveId'. Relevant Details: item.driveId (", item.driveId,"), item.parentId (", item.parentId,")"); - // If we are syncing OneDrive Business Shared Folders, a 'folder' shared with us, has a 'parent' that is not shared with us hence the above message - // What we need to do is query the DB for this 'item.driveId' and use the response from the DB to set the 'item.parentId' for this new item we are trying to add to the database - if (syncBusinessFolders) { - foreach(dbItem; itemdb.selectByDriveId(item.driveId)) { - if (dbItem.name == "root") { - // Ensure that this item uses the root id as parent - log.vdebug("Falsifying item.parentId to be ", dbItem.id); - item.parentId = dbItem.id; - } - } - } else { - // Ensure that this item has no parent - log.vdebug("Setting item.parentId to be null"); - item.parentId = null; - } - log.vdebug("Update/Insert local database with item details"); - itemdb.upsert(item); - log.vdebug("item details: ", item); - return; + // Not cleaning up local files + addLogEntry("Skipping local deletion activity as --download-only has been used", ["verbose"]); } - } - } - - // skip downloading dot files if configured - if (cfg.getValueBool("skip_dotfiles")) { - if (isDotFile(path)) { - log.vlog("Skipping item - .file or .folder: ", path); - unwanted = true; - } - } - - // skip unwanted items early - if (unwanted) { - log.vdebug("Skipping OneDrive change as this is determined to be unwanted"); - skippedItems ~= item.id; - return; - } - - // check if the item has been seen before - Item oldItem; - bool cached = itemdb.selectById(item.driveId, item.id, oldItem); - - // check if the item is going to be deleted - if (isItemDeleted(driveItem)) { - // item.name is not available, so we get a bunch of meaningless log output - // Item name we will attempt to delete will be printed out later - if (cached) { - // flag to delete - log.vdebug("Flagging item for deletion: ", item); - idsToDelete ~= [item.driveId, item.id]; } else { - // flag to ignore - log.vdebug("Flagging item to skip: ", item); - skippedItems ~= item.id; - } - return; - } - - // rename the local item if it is unsynced and there is a new version of it on OneDrive - string oldPath; - if (cached && item.eTag != oldItem.eTag) { - // Is the item in the local database - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ - log.vdebug("OneDrive item ID is present in local database"); - // Compute this item path - oldPath = computeItemPath(item.driveId, item.id); - // Query DB for existing local item in specified path - string itemSource = "database"; - if (!isItemSynced(oldItem, oldPath, itemSource)) { - if (exists(oldPath)) { - // Is the local file technically 'newer' based on UTC timestamp? - SysTime localModifiedTime = timeLastModified(oldPath).toUTC(); - localModifiedTime.fracSecs = Duration.zero; - item.mtime.fracSecs = Duration.zero; - - // debug the output of time comparison - log.vdebug("localModifiedTime (local file): ", localModifiedTime); - log.vdebug("item.mtime (OneDrive item): ", item.mtime); - - // Compare file on disk modified time with modified time provided by OneDrive API - if (localModifiedTime >= item.mtime) { - // local file is newer or has the same time than the item on OneDrive - log.vdebug("Skipping OneDrive change as this is determined to be unwanted due to local item modified time being newer or equal to item modified time from OneDrive"); - // no local rename - // no download needed - if (localModifiedTime == item.mtime) { - log.vlog("Local item modified time is equal to OneDrive item modified time based on UTC time conversion - keeping local item"); - } else { - log.vlog("Local item modified time is newer than OneDrive item modified time based on UTC time conversion - keeping local item"); - } - skippedItems ~= item.id; - return; - } else { - // remote file is newer than local item - log.vlog("Remote item modified time is newer based on UTC time conversion"); // correct message, remote item is newer - auto ext = extension(oldPath); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; - - // has the user configured to IGNORE local data protection rules? - if (bypassDataPreservation) { - // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename - log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", oldPath); - } else { - // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", oldPath, " -> ", newPath); - - // perform the rename action - if (!dryRun) { - safeRename(oldPath); - } else { - // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist - log.vdebug("DRY-RUN: Skipping local file rename"); - } - } - } - } - cached = false; - } + // Not using --download-only process normally + processDeleteItems(); } + // Cleanup array memory + idsToDelete = []; } - - // update the item - if (cached) { - // the item is in the items.sqlite3 database - log.vdebug("OneDrive change is an update to an existing local item"); - applyChangedItem(oldItem, oldPath, item, path); - } else { - log.vdebug("OneDrive change is potentially a new local item"); - // Check if file should be skipped based on size limit - if (isItemFile(driveItem)) { - if (cfg.getValueLong("skip_size") != 0) { - if (driveItem["size"].integer >= this.newSizeLimit) { - log.vlog("Skipping item - excluded by skip_size config: ", item.name, " (", driveItem["size"].integer/2^^20, " MB)"); - return; - } - } - } - // apply this new item - applyNewItem(item, path); + + // Are there any items to download post fetching and processing the /delta data? + if (!fileJSONItemsToDownload.empty) { + // There are elements to download + addLogEntry("Number of items to download from OneDrive: " ~ to!string(fileJSONItemsToDownload.length), ["verbose"]); + downloadOneDriveItems(); + // Cleanup array memory + fileJSONItemsToDownload = []; } - - if ((malwareDetected == false) && (downloadFailed == false)){ - // save the item in the db - // if the file was detected as malware and NOT downloaded, we dont want to falsify the DB as downloading it as otherwise the next pass will think it was deleted, thus delete the remote item - // Likewise if the download failed, we dont want to falsify the DB as downloading it as otherwise the next pass will think it was deleted, thus delete the remote item - if (cached) { - // the item is in the items.sqlite3 database - // Do we need to update the database with the details that were provided by the OneDrive API? - // Is the last modified timestamp in the DB the same as the API data? - SysTime localModifiedTime = oldItem.mtime; - localModifiedTime.fracSecs = Duration.zero; - SysTime remoteModifiedTime = item.mtime; - remoteModifiedTime.fracSecs = Duration.zero; - - // If the timestamp is different, or we are running on a National Cloud Deployment that does not support /delta queries - we have to update the DB with the details from OneDrive - // Unfortunatly because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes - // This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using Nataional Cloud Deployments - // as all records are touched / updated when performing the OneDrive sync operations. The only way to change this, is for Microsoft to support /delta queries for Nataional Cloud Deployments - if ((localModifiedTime != remoteModifiedTime) || (nationalCloudDeployment)) { - // Database update needed for this item because our local record is out-of-date - log.vdebug("Updating local database with item details from OneDrive as local record needs to be updated"); - itemdb.update(item); - } - } else { - // item is not in the items.sqlite3 database - log.vdebug("Inserting new item details to local database"); - itemdb.insert(item); - } - // What was the item that was saved - log.vdebug("item details: ", item); - } else { - // flag was tripped, which was it - if (downloadFailed) { - log.vdebug("Download or creation of local directory failed"); - } - if (malwareDetected) { - log.vdebug("OneDrive reported that file contained malware"); - } + + // Are there any skipped items still? + if (!skippedItems.empty) { + // Cleanup array memory + skippedItems.clear(); } } - - // download an item that was not synced before - private void applyNewItem(const ref Item item, const(string) path) - { - // Test for the local path existence - if (exists(path)) { + + // If the JSON item is not in the database, it is potentially a new item that we need to action + void applyPotentiallyNewLocalItem(Item newDatabaseItem, JSONValue onedriveJSONItem, string newItemPath) { + + // The JSON and Database items being passed in here have passed the following checks: + // - skip_file + // - skip_dir + // - sync_list + // - skip_dotfiles + // - check_nosync + // - skip_size + // - Is not currently cached in the local database + // As such, we should not be doing any other checks here to determine if the JSON item is wanted .. it is + + if (exists(newItemPath)) { // Issue #2209 fix - test if path is a bad symbolic link - if (isSymlink(path)) { - log.vdebug("Path on local disk is a symbolic link ........"); - if (!exists(readLink(path))) { + if (isSymlink(newItemPath)) { + addLogEntry("Path on local disk is a symbolic link ........", ["debug"]); + if (!exists(readLink(newItemPath))) { // reading the symbolic link failed - log.vdebug("Reading the symbolic link target failed ........ "); - log.logAndNotify("Skipping item - invalid symbolic link: ", path); + addLogEntry("Reading the symbolic link target failed ........ ", ["debug"]); + addLogEntry("Skipping item - invalid symbolic link: " ~ newItemPath, ["info", "notify"]); return; } } - - // path exists locally, is not a bad symbolic link - // Query DB for new remote item in specified path + + // Path exists locally, is not a bad symbolic link + // Test if this item is actually in-sync + // What is the source of this item data? string itemSource = "remote"; - if (isItemSynced(item, path, itemSource)) { - // file details from OneDrive and local file details in database are in-sync - log.vdebug("The item to sync is already present on the local file system and is in-sync with the local database"); + if (isItemSynced(newDatabaseItem, newItemPath, itemSource)) { + // Item details from OneDrive and local item details in database are in-sync + addLogEntry("The item to sync is already present on the local filesystem and is in-sync with what is reported online", ["debug"]); + addLogEntry("Update/Insert local database with item details: " ~ to!string(newDatabaseItem), ["debug"]); + itemDB.upsert(newDatabaseItem); return; } else { - // file is not in sync with the database - // is the local file technically 'newer' based on UTC timestamp? - SysTime localModifiedTime = timeLastModified(path).toUTC(); - SysTime itemModifiedTime = item.mtime; - // HACK: reduce time resolution to seconds before comparing + // Item details from OneDrive and local item details in database are NOT in-sync + addLogEntry("The item to sync exists locally but is NOT in the local database - otherwise this would be handled as changed item", ["debug"]); + + // Which object is newer? The local file or the remote file? + SysTime localModifiedTime = timeLastModified(newItemPath).toUTC(); + SysTime itemModifiedTime = newDatabaseItem.mtime; + // Reduce time resolution to seconds before comparing localModifiedTime.fracSecs = Duration.zero; itemModifiedTime.fracSecs = Duration.zero; - // is the local modified time greater than that from OneDrive? + // Is the local modified time greater than that from OneDrive? if (localModifiedTime > itemModifiedTime) { - // local file is newer than item on OneDrive based on file modified time + // Local file is newer than item on OneDrive based on file modified time // Is this item id in the database? - if (itemdb.idInLocalDatabase(item.driveId, item.id)){ + if (itemDB.idInLocalDatabase(newDatabaseItem.driveId, newDatabaseItem.id)) { // item id is in the database // no local rename // no download needed - log.vlog("Local item modified time is newer based on UTC time conversion - keeping local item as this exists in the local database"); - log.vdebug("Skipping OneDrive change as this is determined to be unwanted due to local item modified time being newer than OneDrive item and present in the sqlite database"); - return; + addLogEntry("Local item modified time is newer based on UTC time conversion - keeping local item as this exists in the local database", ["verbose"]); + addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local item modified time being newer than OneDrive item and present in the sqlite database", ["debug"]); } else { // item id is not in the database .. maybe a --resync ? - // Should this 'download' be skipped? - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - // need the parent path for this object - string parentPath = dirName(path); - if (exists(parentPath ~ "/.nosync")) { - log.vlog("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: ", path); - // flag that this download failed, otherwise the 'item' is added to the database - then, as not present on the local disk, would get deleted from OneDrive - downloadFailed = true; - // clean up this partial file, otherwise every sync we will get theis warning - log.vlog("Removing previous partial file download due to .nosync found in parent folder & --check-for-nosync is enabled"); - safeRemove(path); - return; - } - } // file exists locally but is not in the sqlite database - maybe a failed download? - log.vlog("Local item does not exist in local database - replacing with file from OneDrive - failed download?"); + addLogEntry("Local item does not exist in local database - replacing with file from OneDrive - failed download?", ["verbose"]); + // In a --resync scenario or if items.sqlite3 was deleted before startup we have zero way of knowing IF the local file is meant to be the right file + // To this pint we have passed the following checks: + // 1. Any client side filtering checks - this determined this is a file that is wanted + // 2. A file with the exact name exists locally + // 3. The local modified time > remote modified time + // 4. The id of the item from OneDrive is not in the database - // in a --resync scenario or if items.sqlite3 was deleted before startup we have zero way of knowing IF the local file is meant to be the right file - // we have passed the following checks: - // 1. file exists locally - // 2. local modified time > remote modified time - // 3. id is not in the database - - auto ext = extension(path); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; - // has the user configured to IGNORE local data protection rules? + // Has the user configured to IGNORE local data protection rules? if (bypassDataPreservation) { // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename - log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", path); + addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { - // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ", path, " -> ", newPath); - // perform the rename action of the local file - if (!dryRun) { - safeRename(path); - } else { - // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist - log.vdebug("DRY-RUN: Skipping local file rename"); - } + // local data protection is configured, rename the local file, passing in if we are performing a --dry-run or not + safeBackup(newItemPath, dryRun); } - } } else { - // remote file is newer than local item - log.vlog("Remote item modified time is newer based on UTC time conversion"); // correct message, remote item is newer - log.vdebug("localModifiedTime (local file): ", localModifiedTime); - log.vdebug("itemModifiedTime (OneDrive item): ", itemModifiedTime); - - auto ext = extension(path); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName ~ ext; + // Remote file is newer than the existing local item + addLogEntry("Remote item modified time is newer based on UTC time conversion", ["verbose"]); // correct message, remote item is newer + addLogEntry("localModifiedTime (local file): " ~ to!string(localModifiedTime), ["debug"]); + addLogEntry("itemModifiedTime (OneDrive item): " ~ to!string(itemModifiedTime), ["debug"]); - // has the user configured to IGNORE local data protection rules? + // Has the user configured to IGNORE local data protection rules? if (bypassDataPreservation) { // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename - log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", path); + addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { - // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", path, " -> ", newPath); - // perform the rename action of the local file - if (!dryRun) { - safeRename(path); - } else { - // Expectation here is that there is a new file locally (newPath) however as we don't create this, the "new file" will not be uploaded as it does not exist - log.vdebug("DRY-RUN: Skipping local file rename"); - } + // local data protection is configured, rename the local file, passing in if we are performing a --dry-run or not + safeBackup(newItemPath, dryRun); } } } - } else { - // Path does not exist locally - this will be a new file download or folder creation - - // Should this 'download' be skipped due to 'skip_dir' directive - if (cfg.getValueString("skip_dir") != "") { - string pathToCheck; - // does the path start with '/'? - if (!startsWith(path, "/")){ - // path does not start with '/', but we need to check skip_dir entries with and without '/' - // so always make sure we are checking a path with '/' - // If this is a file, we need to check the parent path - if (item.type == ItemType.file) { - // use parent path and add '/' - pathToCheck = '/' ~ dirName(path); - } else { - // use path and add '/' - pathToCheck = '/' ~ path; + } + + // Path does not exist locally (should not exist locally if renamed file) - this will be a new file download or new folder creation + // How to handle this Potentially New Local Item JSON ? + final switch (newDatabaseItem.type) { + case ItemType.file: + // Add to the items to download array for processing + fileJSONItemsToDownload ~= onedriveJSONItem; + break; + case ItemType.dir: + case ItemType.remote: + addLogEntry("Creating local directory: " ~ newItemPath); + if (!dryRun) { + try { + // Create the new directory + addLogEntry("Requested path does not exist, creating directory structure: " ~ newItemPath, ["debug"]); + mkdirRecurse(newItemPath); + // Configure the applicable permissions for the folder + addLogEntry("Setting directory permissions for: " ~ newItemPath, ["debug"]); + newItemPath.setAttributes(appConfig.returnRequiredDirectoryPermisions()); + // Update the time of the folder to match the last modified time as is provided by OneDrive + // If there are any files then downloaded into this folder, the last modified time will get + // updated by the local Operating System with the latest timestamp - as this is normal operation + // as the directory has been modified + addLogEntry("Setting directory lastModifiedDateTime for: " ~ newItemPath ~ " to " ~ to!string(newDatabaseItem.mtime), ["debug"]); + addLogEntry("Calling setTimes() for this file: " ~ newItemPath, ["debug"]); + setTimes(newItemPath, newDatabaseItem.mtime, newDatabaseItem.mtime); + // Save the item to the database + saveItem(onedriveJSONItem); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } + } else { + // we dont create the directory, but we need to track that we 'faked it' + idsFaked ~= [newDatabaseItem.driveId, newDatabaseItem.id]; + // Save the item to the dry-run database + saveItem(onedriveJSONItem); } + break; + case ItemType.unknown: + // Unknown type - we dont action or sync these items + break; + } + } + + // If the JSON item IS in the database, this will be an update to an existing in-sync item + void applyPotentiallyChangedItem(Item existingDatabaseItem, string existingItemPath, Item changedOneDriveItem, string changedItemPath, JSONValue onedriveJSONItem) { - // perform the check - if (selectiveSync.isDirNameExcluded(pathToCheck)) { - // this path should be skipped - if (item.type == ItemType.file) { - log.vlog("Skipping item - file path is excluded by skip_dir config: ", path); + // If we are moving the item, we do not need to download it again + bool itemWasMoved = false; + + // Do we need to actually update the database with the details that were provided by the OneDrive API? + // Calculate these time items from the provided items + SysTime existingItemModifiedTime = existingDatabaseItem.mtime; + existingItemModifiedTime.fracSecs = Duration.zero; + SysTime changedOneDriveItemModifiedTime = changedOneDriveItem.mtime; + changedOneDriveItemModifiedTime.fracSecs = Duration.zero; + + if (existingDatabaseItem.eTag != changedOneDriveItem.eTag) { + // The eTag has changed to what we previously cached + if (existingItemPath != changedItemPath) { + // Log that we are changing / moving an item to a new name + addLogEntry("Moving " ~ existingItemPath ~ " to " ~ changedItemPath); + // Is the destination path empty .. or does something exist at that location? + if (exists(changedItemPath)) { + // Destination we are moving to exists ... + Item changedLocalItem; + // Query DB for this changed item in specified path that exists and see if it is in-sync + if (itemDB.selectByPath(changedItemPath, changedOneDriveItem.driveId, changedLocalItem)) { + // The 'changedItemPath' is in the database + string itemSource = "database"; + if (isItemSynced(changedLocalItem, changedItemPath, itemSource)) { + // The destination item is in-sync + addLogEntry("Destination is in sync and will be overwritten", ["verbose"]); + } else { + // The destination item is different + addLogEntry("The destination is occupied with a different item, renaming the conflicting file...", ["verbose"]); + // Backup this item, passing in if we are performing a --dry-run or not + safeBackup(changedItemPath, dryRun); + } } else { - log.vlog("Skipping item - excluded by skip_dir config: ", path); + // The to be overwritten item is not already in the itemdb, so it should saved to avoid data loss + addLogEntry("The destination is occupied by an existing un-synced file, renaming the conflicting file...", ["verbose"]); + // Backup this item, passing in if we are performing a --dry-run or not + safeBackup(changedItemPath, dryRun); } - // flag that this download failed, otherwise the 'item' is added to the database - then, as not present on the local disk, would get deleted from OneDrive - downloadFailed = true; - return; } - } - - // Should this 'download' be skipped due to nosync directive? - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - // need the parent path for this object - string parentPath = dirName(path); - if (exists(parentPath ~ "/.nosync")) { - log.vlog("Skipping downloading item - .nosync found in parent folder & --check-for-nosync is enabled: ", path); - // flag that this download failed, otherwise the 'item' is added to the database - then, as not present on the local disk, would get deleted from OneDrive - downloadFailed = true; - return; - } - } - } - - // how to handle this item? - final switch (item.type) { - case ItemType.file: - downloadFileItem(item, path); - if (dryRun) { - // we dont download the file, but we need to track that we 'faked it' - idsFaked ~= [item.driveId, item.id]; - } - break; - case ItemType.dir: - case ItemType.remote: - log.log("Creating local directory: ", path); - - // Issue #658 handling - is sync_list in use? - if (syncListConfigured) { - // sync_list configured and in use - // path to create was previously checked if this should be included / excluded. No need to check again. - log.vdebug("Issue #658 handling"); - setOneDriveFullScanTrigger(); - } - - // Issue #865 handling - is skip_dir in use? - if (cfg.getValueString("skip_dir") != "") { - // we have some entries in skip_dir - // path to create was previously checked if this should be included / excluded. No need to check again. - log.vdebug("Issue #865 handling"); - setOneDriveFullScanTrigger(); - } - - if (!dryRun) { + + // Try and rename path, catch any exception generated try { - // Does the path exist locally? - if (!exists(path)) { - // Create the new directory - log.vdebug("Requested path does not exist, creating directory structure: ", path); - mkdirRecurse(path); - // Configure the applicable permissions for the folder - log.vdebug("Setting directory permissions for: ", path); - path.setAttributes(cfg.returnRequiredDirectoryPermisions()); - // Update the time of the folder to match the last modified time as is provided by OneDrive - // If there are any files then downloaded into this folder, the last modified time will get - // updated by the local Operating System with the latest timestamp - as this is normal operation - // as the directory has been modified - log.vdebug("Setting directory lastModifiedDateTime for: ", path , " to ", item.mtime); - setTimes(path, item.mtime, item.mtime); + // Rename this item, passing in if we are performing a --dry-run or not + safeBackup(changedItemPath, dryRun); + + // If the item is a file, make sure that the local timestamp now is the same as the timestamp online + // Otherwise when we do the DB check, the move on the file system, the file technically has a newer timestamp + // which is 'correct' .. but we need to report locally the online timestamp here as the move was made online + if (changedOneDriveItem.type == ItemType.file) { + setTimes(changedItemPath, changedOneDriveItem.mtime, changedOneDriveItem.mtime); + } + + // Flag that the item was moved | renamed + itemWasMoved = true; + + // If we are in a --dry-run situation, the actual rename did not occur - but we need to track like it did + if (dryRun) { + // Track this as a faked id item + idsFaked ~= [changedOneDriveItem.driveId, changedOneDriveItem.id]; + // We also need to track that we did not rename this path + pathsRenamed ~= [existingItemPath]; } } catch (FileException e) { // display the error message displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - // flag that this failed - downloadFailed = true; - return; } - } else { - // we dont create the directory, but we need to track that we 'faked it' - idsFaked ~= [item.driveId, item.id]; } - break; - } - } - - // update a local item - // the local item is assumed to be in sync with the local db - private void applyChangedItem(Item oldItem, string oldPath, Item newItem, string newPath) - { - assert(oldItem.driveId == newItem.driveId); - assert(oldItem.id == newItem.id); - assert(oldItem.type == newItem.type); - assert(oldItem.remoteDriveId == newItem.remoteDriveId); - assert(oldItem.remoteId == newItem.remoteId); - - if (oldItem.eTag != newItem.eTag) { - // handle changed name/path - if (oldPath != newPath) { - log.log("Moving ", oldPath, " to ", newPath); - if (exists(newPath)) { - Item localNewItem; - if (itemdb.selectByPath(newPath, defaultDriveId, localNewItem)) { - // Query DB for new local item in specified path - string itemSource = "database"; - if (isItemSynced(localNewItem, newPath, itemSource)) { - log.vlog("Destination is in sync and will be overwritten"); - } else { - // TODO: force remote sync by deleting local item - log.vlog("The destination is occupied, renaming the conflicting file..."); - if (!dryRun) { - safeRename(newPath); - } - } - } else { - // to be overwritten item is not already in the itemdb, so it should - // be synced. Do a safe rename here, too. - // TODO: force remote sync by deleting local item - log.vlog("The destination is occupied by new file, renaming the conflicting file..."); - if (!dryRun) { - safeRename(newPath); - } + + // What sort of changed item is this? + // Is it a file, and we did not move it .. + if ((changedOneDriveItem.type == ItemType.file) && (!itemWasMoved)) { + // The eTag is notorious for being 'changed' online by some backend Microsoft process + if (existingDatabaseItem.quickXorHash != changedOneDriveItem.quickXorHash) { + // Add to the items to download array for processing - the file hash we previously recorded is not the same as online + fileJSONItemsToDownload ~= onedriveJSONItem; + } else { + // If the timestamp is different, or we are running a client operational mode that does not support /delta queries - we have to update the DB with the details from OneDrive + // Unfortunatly because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes + // This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using these operational modes + // as all records are touched / updated when performing the OneDrive sync operations. The impacted operational modes are: + // - National Cloud Deployments do not support /delta as a query + // - When using --single-directory + // - When using --download-only --cleanup-local-files + + // Is the last modified timestamp in the DB the same as the API data or are we running an operational mode where we simulated the /delta response? + if ((existingItemModifiedTime != changedOneDriveItemModifiedTime) || (generateSimulatedDeltaResponse)) { + // Save this item in the database + // Add to the local database + addLogEntry("Adding changed OneDrive Item to database: " ~ to!string(changedOneDriveItem), ["debug"]); + itemDB.upsert(changedOneDriveItem); } } - // try and rename path, catch exception - try { - log.vdebug("Calling rename(oldPath, newPath)"); - if (!dryRun) { - // rename physical path on disk - rename(oldPath, newPath); - } else { - // track this as a faked id item - idsFaked ~= [newItem.driveId, newItem.id]; - // we also need to track that we did not rename this path - pathsRenamed ~= [oldPath]; - } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } else { + // Save this item in the database + saveItem(onedriveJSONItem); + + // If the 'Add shortcut to My files' link was the item that was actually renamed .. we have to update our DB records + if (changedOneDriveItem.type == ItemType.remote) { + // Select remote item data from the database + Item existingRemoteDbItem; + itemDB.selectById(changedOneDriveItem.remoteDriveId, changedOneDriveItem.remoteId, existingRemoteDbItem); + // Update the 'name' in existingRemoteDbItem and save it back to the database + // This is the local name stored on disk that was just 'moved' + existingRemoteDbItem.name = changedOneDriveItem.name; + itemDB.upsert(existingRemoteDbItem); } } - // handle changed content and mtime - // HACK: use mtime+hash instead of cTag because of https://github.com/OneDrive/onedrive-api-docs/issues/765 - if (newItem.type == ItemType.file && oldItem.mtime != newItem.mtime && !testFileHash(newPath, newItem)) { - downloadFileItem(newItem, newPath); - } + } else { + // The existingDatabaseItem.eTag == changedOneDriveItem.eTag .. nothing has changed eTag wise - // handle changed time - if (newItem.type == ItemType.file && oldItem.mtime != newItem.mtime) { - try { - log.vdebug("Calling setTimes() for this file: ", newPath); - setTimes(newPath, newItem.mtime, newItem.mtime); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } - } - - // downloads a File resource - private void downloadFileItem(const ref Item item, const(string) path) - { - static import std.exception; - assert(item.type == ItemType.file); - write("Downloading file ", path, " ... "); - JSONValue fileDetails; + // If the timestamp is different, or we are running a client operational mode that does not support /delta queries - we have to update the DB with the details from OneDrive + // Unfortunatly because of the consequence of Nataional Cloud Deployments not supporting /delta queries, the application uses the local database to flag what is out-of-date / track changes + // This means that the constant disk writing to the database fix implemented with https://github.com/abraunegg/onedrive/pull/2004 cannot be utilised when using these operational modes + // as all records are touched / updated when performing the OneDrive sync operations. The impacted operational modes are: + // - National Cloud Deployments do not support /delta as a query + // - When using --single-directory + // - When using --download-only --cleanup-local-files - try { - fileDetails = onedrive.getFileDetails(item.driveId, item.id); - } catch (OneDriveException e) { - log.error("ERROR: Query of OneDrive for file details failed"); - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - downloadFailed = true; - return; + // Is the last modified timestamp in the DB the same as the API data or are we running an operational mode where we simulated the /delta response? + if ((existingItemModifiedTime != changedOneDriveItemModifiedTime) || (generateSimulatedDeltaResponse)) { + // Database update needed for this item because our local record is out-of-date + // Add to the local database + addLogEntry("Adding changed OneDrive Item to database: " ~ to!string(changedOneDriveItem), ["debug"]); + itemDB.upsert(changedOneDriveItem); } } + } + + // Download new file items as identified + void downloadOneDriveItems() { + // Lets deal with all the JSON items that need to be downloaded in a batch process + ulong batchSize = appConfig.concurrentThreads; + ulong batchCount = (fileJSONItemsToDownload.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; - // fileDetails has to be a valid JSON object - if (fileDetails.type() == JSONType.object){ - if (isMalware(fileDetails)){ - // OneDrive reports that this file is malware - log.error("ERROR: MALWARE DETECTED IN FILE - DOWNLOAD SKIPPED"); - // set global flag - malwareDetected = true; - return; - } - } else { - // Issue #550 handling - log.error("ERROR: Query of OneDrive for file details failed"); - log.vdebug("onedrive.getFileDetails call returned an invalid JSON Object"); - // We want to return, cant download - downloadFailed = true; - return; + foreach (chunk; fileJSONItemsToDownload.chunks(batchSize)) { + // send an array containing 'appConfig.concurrentThreads' (16) JSON items to download + downloadOneDriveItemsInParallel(chunk); + } + } + + // Download items in parallel + void downloadOneDriveItemsInParallel(JSONValue[] array) { + // This function recieved an array of 16 JSON items to download + foreach (i, onedriveJSONItem; taskPool.parallel(array)) { + // Take each JSON item and + downloadFileItem(onedriveJSONItem); } + } + + // Perform the actual download of an object from OneDrive + void downloadFileItem(JSONValue onedriveJSONItem) { + + bool downloadFailed = false; + string OneDriveFileXORHash; + string OneDriveFileSHA256Hash; + ulong jsonFileSize = 0; - if (!dryRun) { - ulong onlineFileSize = 0; - string OneDriveFileHash; - - // fileDetails should be a valid JSON due to prior check - if (hasFileSize(fileDetails)) { - // Use the configured onlineFileSize as reported by OneDrive - onlineFileSize = fileDetails["size"].integer; + // Download item specifics + string downloadDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + string downloadParentId = onedriveJSONItem["parentReference"]["id"].str; + string downloadItemName = onedriveJSONItem["name"].str; + string downloadItemId = onedriveJSONItem["id"].str; + + // Calculate this items path + string newItemPath = computeItemPath(downloadDriveId, downloadParentId) ~ "/" ~ downloadItemName; + addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); + + // Is the item reported as Malware ? + if (isMalware(onedriveJSONItem)){ + // OneDrive reports that this file is malware + addLogEntry("ERROR: MALWARE DETECTED IN FILE - DOWNLOAD SKIPPED: " ~ newItemPath, ["info", "notify"]); + downloadFailed = true; + } else { + // Grab this file's filesize + if (hasFileSize(onedriveJSONItem)) { + // Use the configured filesize as reported by OneDrive + jsonFileSize = onedriveJSONItem["size"].integer; } else { // filesize missing - log.vdebug("WARNING: fileDetails['size'] is missing"); + addLogEntry("ERROR: onedriveJSONItem['size'] is missing", ["debug"]); } - - if (hasHashes(fileDetails)) { + + // Configure the hashes for comparison post download + if (hasHashes(onedriveJSONItem)) { // File details returned hash details // QuickXorHash - if (hasQuickXorHash(fileDetails)) { - // Use the configured quickXorHash as reported by OneDrive - if (fileDetails["file"]["hashes"]["quickXorHash"].str != "") { - OneDriveFileHash = fileDetails["file"]["hashes"]["quickXorHash"].str; + if (hasQuickXorHash(onedriveJSONItem)) { + // Use the provided quickXorHash as reported by OneDrive + if (onedriveJSONItem["file"]["hashes"]["quickXorHash"].str != "") { + OneDriveFileXORHash = onedriveJSONItem["file"]["hashes"]["quickXorHash"].str; } } else { - // Check for sha256Hash as quickXorHash did not exist - if (hasSHA256Hash(fileDetails)) { - // Use the configured sha256Hash as reported by OneDrive - if (fileDetails["file"]["hashes"]["sha256Hash"].str != "") { - OneDriveFileHash = fileDetails["file"]["hashes"]["sha256Hash"].str; + // Fallback: Check for SHA256Hash + if (hasSHA256Hash(onedriveJSONItem)) { + // Use the provided sha256Hash as reported by OneDrive + if (onedriveJSONItem["file"]["hashes"]["sha256Hash"].str != "") { + OneDriveFileSHA256Hash = onedriveJSONItem["file"]["hashes"]["sha256Hash"].str; } } } } else { // file hash data missing - log.vdebug("WARNING: fileDetails['file']['hashes'] is missing - unable to compare file hash after download"); + addLogEntry("ERROR: onedriveJSONItem['file']['hashes'] is missing - unable to compare file hash after download", ["debug"]); + } + + // Is this a --download-only scenario? + if (appConfig.getValueBool("download_only")) { + if (exists(newItemPath)) { + // file exists locally already + Item databaseItem; + bool fileFoundInDB = false; + + foreach (driveId; driveIDsArray) { + if (itemDB.selectByPath(newItemPath, driveId, databaseItem)) { + fileFoundInDB = true; + break; + } + } + + // Log the DB details + addLogEntry("File to download exists locally and this is the DB record: " ~ to!string(databaseItem), ["debug"]); + + // Does the DB (what we think is in sync) hash match the existing local file hash? + if (!testFileHash(newItemPath, databaseItem)) { + // local file is different to what we know to be true + addLogEntry("The local file to replace (" ~ newItemPath ~ ") has been modified locally since the last download. Renaming it to avoid potential local data loss."); + + // Perform the local rename of the existing local file, passing in if we are performing a --dry-run or not + safeBackup(newItemPath, dryRun); + } + } } // Is there enough free space locally to download the file @@ -3037,192 +2048,207 @@ final class SyncEngine ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace(".")); // So that we are not responsible in making the disk 100% full if we can download the file, compare the current available space against the reservation set and file size // The reservation value is user configurable in the config file, 50MB by default - ulong freeSpaceReservation = cfg.getValueLong("space_reservation"); + ulong freeSpaceReservation = appConfig.getValueLong("space_reservation"); // debug output - log.vdebug("Local Disk Space Actual: ", localActualFreeSpace); - log.vdebug("Free Space Reservation: ", freeSpaceReservation); - log.vdebug("File Size to Download: ", onlineFileSize); + addLogEntry("Local Disk Space Actual: " ~ to!string(localActualFreeSpace), ["debug"]); + addLogEntry("Free Space Reservation: " ~ to!string(freeSpaceReservation), ["debug"]); + addLogEntry("File Size to Download: " ~ to!string(jsonFileSize), ["debug"]); - // calculate if we can download file - if ((localActualFreeSpace < freeSpaceReservation) || (onlineFileSize > localActualFreeSpace)) { + // Calculate if we can actually download file - is there enough free space? + if ((localActualFreeSpace < freeSpaceReservation) || (jsonFileSize > localActualFreeSpace)) { // localActualFreeSpace is less than freeSpaceReservation .. insufficient free space - // onlineFileSize is greater than localActualFreeSpace .. insufficient free space - writeln("failed!"); - log.log("Insufficient local disk space to download file"); + // jsonFileSize is greater than localActualFreeSpace .. insufficient free space + addLogEntry("Downloading file " ~ newItemPath ~ " ... failed!"); + addLogEntry("Insufficient local disk space to download file"); downloadFailed = true; - return; - } - - // Attempt to download the file - try { - onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); - } catch (OneDriveException e) { - log.vdebug("onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); generated a OneDriveException"); - // 408 = Request Time Out - // 429 = Too Many Requests - need to delay - if (e.httpStatusCode == 408) { - // 408 error handling - request time out - // https://github.com/abraunegg/onedrive/issues/694 - // Back off & retry with incremental delay - int retryCount = 10; - int retryAttempts = 1; - int backoffInterval = 2; - while (retryAttempts < retryCount){ - // retry in 2,4,8,16,32,64,128,256,512,1024 seconds - Thread.sleep(dur!"seconds"(retryAttempts*backoffInterval)); - try { - onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); - // successful download - retryAttempts = retryCount; - } catch (OneDriveException e) { - log.vdebug("onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); generated a OneDriveException"); - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 408)) { - // If another 408 .. - if (e.httpStatusCode == 408) { - // Increment & loop around - log.vdebug("HTTP 408 generated - incrementing retryAttempts"); - retryAttempts++; - } - // If a 429 .. - if (e.httpStatusCode == 429) { - // Increment & loop around - handleOneDriveThrottleRequest(); - log.vdebug("HTTP 429 generated - incrementing retryAttempts"); - retryAttempts++; - } - } else { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } else { + // If we are in a --dry-run situation - if not, actually perform the download + if (!dryRun) { + // Attempt to download the file as there is enough free space locally + OneDriveApi downloadFileOneDriveApiInstance; + downloadFileOneDriveApiInstance = new OneDriveApi(appConfig); + try { + downloadFileOneDriveApiInstance.initialise(); + downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); + downloadFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(downloadFileOneDriveApiInstance); + } catch (OneDriveException exception) { + addLogEntry("downloadFileOneDriveApiInstance.downloadById(downloadDriveId, downloadItemId, newItemPath, jsonFileSize); generated a OneDriveException", ["debug"]); + string thisFunctionName = getFunctionName!({}); + + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(downloadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to download an item from OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + downloadFileItem(onedriveJSONItem); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); } + + } catch (FileException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + downloadFailed = true; + } catch (ErrnoException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + downloadFailed = true; } - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests) - // https://github.com/abraunegg/onedrive/issues/133 - int retryCount = 10; - int retryAttempts = 1; - while (retryAttempts < retryCount){ - // retry after waiting the timeout value from the 429 HTTP response header Retry-After - handleOneDriveThrottleRequest(); - try { - onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); - // successful download - retryAttempts = retryCount; - } catch (OneDriveException e) { - log.vdebug("onedrive.downloadById(item.driveId, item.id, path, onlineFileSize); generated a OneDriveException"); - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 408)) { - // If another 408 .. - if (e.httpStatusCode == 408) { - // Increment & loop around - log.vdebug("HTTP 408 generated - incrementing retryAttempts"); - retryAttempts++; - } - // If a 429 .. - if (e.httpStatusCode == 429) { - // Increment & loop around - handleOneDriveThrottleRequest(); - log.vdebug("HTTP 429 generated - incrementing retryAttempts"); - retryAttempts++; + + // If we get to this point, something was downloaded .. does it match what we expected? + if (exists(newItemPath)) { + // When downloading some files from SharePoint, the OneDrive API reports one file size, + // but the SharePoint HTTP Server sends a totally different byte count for the same file + // we have implemented --disable-download-validation to disable these checks + + if (!disableDownloadValidation) { + // A 'file' was downloaded - does what we downloaded = reported jsonFileSize or if there is some sort of funky local disk compression going on + // Does the file hash OneDrive reports match what we have locally? + string onlineFileHash; + string downloadedFileHash; + ulong downloadFileSize = getSize(newItemPath); + + if (!OneDriveFileXORHash.empty) { + onlineFileHash = OneDriveFileXORHash; + // Calculate the QuickXOHash for this file + downloadedFileHash = computeQuickXorHash(newItemPath); + } else { + onlineFileHash = OneDriveFileSHA256Hash; + // Fallback: Calculate the SHA256 Hash for this file + downloadedFileHash = computeSHA256Hash(newItemPath); + } + + if ((downloadFileSize == jsonFileSize) && (downloadedFileHash == onlineFileHash)) { + // Downloaded file matches size and hash + addLogEntry("Downloaded file matches reported size and reported file hash", ["debug"]); + + try { + // get the mtime from the JSON data + SysTime itemModifiedTime; + if (isItemRemote(onedriveJSONItem)) { + // remote file item + itemModifiedTime = SysTime.fromISOExtString(onedriveJSONItem["remoteItem"]["fileSystemInfo"]["lastModifiedDateTime"].str); + } else { + // not a remote item + itemModifiedTime = SysTime.fromISOExtString(onedriveJSONItem["fileSystemInfo"]["lastModifiedDateTime"].str); + } + + // set the correct time on the downloaded file + addLogEntry("Calling setTimes() for this file: " ~ newItemPath, ["debug"]); + setTimes(newItemPath, itemModifiedTime, itemModifiedTime); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } else { - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // Downloaded file does not match size or hash .. which is it? + bool downloadValueMismatch = false; + // Size error? + if (downloadFileSize != jsonFileSize) { + // downloaded file size does not match + downloadValueMismatch = true; + addLogEntry("Actual file size on disk: " ~ to!string(downloadFileSize), ["debug"]); + addLogEntry("OneDrive API reported size: " ~ to!string(jsonFileSize), ["debug"]); + addLogEntry("ERROR: File download size mis-match. Increase logging verbosity to determine why."); + } + // Hash Error + if (downloadedFileHash != onlineFileHash) { + // downloaded file hash does not match + downloadValueMismatch = true; + addLogEntry("Actual local file hash: " ~ downloadedFileHash, ["debug"]); + addLogEntry("OneDrive API reported hash: " ~ onlineFileHash, ["debug"]); + addLogEntry("ERROR: File download hash mis-match. Increase logging verbosity to determine why."); + } + // .heic data loss check + // - https://github.com/abraunegg/onedrive/issues/2471 + // - https://github.com/OneDrive/onedrive-api-docs/issues/1532 + // - https://github.com/OneDrive/onedrive-api-docs/issues/1723 + if (downloadValueMismatch && (toLower(extension(newItemPath)) == ".heic")) { + // Need to display a message to the user that they have experienced data loss + addLogEntry("DATA-LOSS: File downloaded has experienced data loss due to a Microsoft OneDrive API bug. DO NOT DELETE THIS FILE ONLINE: " ~ newItemPath, ["info", "notify"]); + addLogEntry(" Please read https://github.com/OneDrive/onedrive-api-docs/issues/1723 for more details.", ["verbose"]); + } + + // Add some workaround messaging for SharePoint + if (appConfig.accountType == "documentLibrary"){ + // It has been seen where SharePoint / OneDrive API reports one size via the JSON + // but the content length and file size written to disk is totally different - example: + // From JSON: "size": 17133 + // From HTTPS Server: < Content-Length: 19340 + // with no logical reason for the difference, except for a 302 redirect before file download + addLogEntry("INFO: It is most likely that a SharePoint OneDrive API issue is the root cause. Add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); + } else { + // other account types + addLogEntry("INFO: Potentially add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); + } + // We do not want this local file to remain on the local file system as it failed the integrity checks + addLogEntry("Removing file " ~ newItemPath ~ " due to failed integrity checks"); + if (!dryRun) { + safeRemove(newItemPath); + } + downloadFailed = true; } - } + } else { + // Download validation checks were disabled + addLogEntry("Downloaded file validation disabled due to --disable-download-validation", ["debug"]); + addLogEntry("WARNING: Skipping download integrity check for: " ~ newItemPath, ["verbose"]); + } // end of (!disableDownloadValidation) + } else { + addLogEntry("ERROR: File failed to download. Increase logging verbosity to determine why."); + downloadFailed = true; } } - } catch (FileException e) { - // There was a file system error - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - downloadFailed = true; - return; - } catch (std.exception.ErrnoException e) { - // There was a file system error - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - downloadFailed = true; - return; } - // file has to have downloaded in order to set the times / data for the file - if (exists(path)) { - // When downloading some files from SharePoint, the OneDrive API reports one file size, but the SharePoint HTTP Server sends a totally different byte count - // for the same file - // we have implemented --disable-download-validation to disable these checks + + // File should have been downloaded + if (!downloadFailed) { + // Download did not fail + addLogEntry("Downloading file " ~ newItemPath ~ " ... done"); + // Save this item into the database + saveItem(onedriveJSONItem); - if (!disableDownloadValidation) { - // A 'file' was downloaded - does what we downloaded = reported onlineFileSize or if there is some sort of funky local disk compression going on - // does the file hash OneDrive reports match what we have locally? - string quickXorHash = computeQuickXorHash(path); - // Compute the local file size - ulong localFileSize = getSize(path); - - if ((localFileSize == onlineFileSize) || (OneDriveFileHash == quickXorHash)) { - // downloaded matches either size or hash - log.vdebug("Downloaded file matches reported size and or reported file hash"); - try { - log.vdebug("Calling setTimes() for this file: ", path); - setTimes(path, item.mtime, item.mtime); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } else { - // size error? - if (localFileSize != onlineFileSize) { - // downloaded file size does not match - log.vdebug("Actual file size on disk: ", localFileSize); - log.vdebug("OneDrive API reported size: ", onlineFileSize); - log.error("ERROR: File download size mis-match. Increase logging verbosity to determine why."); - } - // hash error? - if (OneDriveFileHash != quickXorHash) { - // downloaded file hash does not match - log.vdebug("Actual local file hash: ", quickXorHash); - log.vdebug("OneDrive API reported hash: ", OneDriveFileHash); - log.error("ERROR: File download hash mis-match. Increase logging verbosity to determine why."); - } - // add some workaround messaging - if (accountType == "documentLibrary"){ - // It has been seen where SharePoint / OneDrive API reports one size via the JSON - // but the content length and file size written to disk is totally different - example: - // From JSON: "size": 17133 - // From HTTPS Server: < Content-Length: 19340 - // with no logical reason for the difference, except for a 302 redirect before file download - log.error("INFO: It is most likely that a SharePoint OneDrive API issue is the root cause. Add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); - } else { - // other account types - log.error("INFO: Potentially add --disable-download-validation to work around this issue but downloaded data integrity cannot be guaranteed."); - } - - // we do not want this local file to remain on the local file system - safeRemove(path); - downloadFailed = true; - return; - } - } else { - // download checks have been disabled - log.vdebug("Downloaded file validation disabled due to --disable-download-validation "); + // If we are in a --dry-run situation - if we are, we need to track that we faked the download + if (dryRun) { + // track that we 'faked it' + idsFaked ~= [downloadDriveId, downloadItemId]; } } else { - log.error("ERROR: File failed to download. Increase logging verbosity to determine why."); - downloadFailed = true; - return; + // Output download failed + addLogEntry("Downloading file " ~ newItemPath ~ " ... failed!"); + // Add the path to a list of items that failed to download + fileDownloadFailures ~= newItemPath; } } - - if (!downloadFailed) { - writeln("done."); - log.fileOnly("Downloading file ", path, " ... done."); - } else { - writeln("failed!"); - log.fileOnly("Downloading file ", path, " ... failed!"); - } } - - // returns true if the given item corresponds to the local one - private bool isItemSynced(const ref Item item, const(string) path, string itemSource) - { + + // Test if the given item is in-sync. Returns true if the given item corresponds to the local one + bool isItemSynced(Item item, string path, string itemSource) { + if (!exists(path)) return false; final switch (item.type) { case ItemType.file: @@ -3232,29 +2258,51 @@ final class SyncEngine // local file is readable SysTime localModifiedTime = timeLastModified(path).toUTC(); SysTime itemModifiedTime = item.mtime; - // HACK: reduce time resolution to seconds before comparing + // Reduce time resolution to seconds before comparing localModifiedTime.fracSecs = Duration.zero; itemModifiedTime.fracSecs = Duration.zero; if (localModifiedTime == itemModifiedTime) { return true; } else { - log.vlog("The local item has a different modified time ", localModifiedTime, " when compared to ", itemSource, " modified time ", itemModifiedTime); + addLogEntry("Local item time discrepancy detected: " ~ path, ["verbose"]); + addLogEntry("This local item has a different modified time " ~ to!string(localModifiedTime) ~ " when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime), ["verbose"]); + // The file has been modified ... is the hash the same? // Test the file hash as the date / time stamp is different - // Generating a hash is computationally expensive - only generate the hash if timestamp was modified + // Generating a hash is computationally expensive - we only generate the hash if timestamp was different if (testFileHash(path, item)) { + // The hash is the same .. so we need to fix-up the timestamp depending on where it is wrong + addLogEntry("Local item has the same hash value as the item online - correcting timestamp", ["verbose"]); + // Test if the local timestamp is newer + if (localModifiedTime > itemModifiedTime) { + // The source of the out-of-date timestamp was OneDrive and this needs to be corrected to avoid always generating a hash test if timestamp is different + addLogEntry("The source of the incorrect timestamp was OneDrive online - correcting timestamp online", ["verbose"]); + if (!dryRun) { + // Attempt to update the online date time stamp + uploadLastModifiedTime(item.driveId, item.id, localModifiedTime.toUTC(), item.eTag); + } + } else { + // The source of the out-of-date timestamp was the local file and this needs to be corrected to avoid always generating a hash test if timestamp is different + addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally", ["verbose"]); + if (!dryRun) { + addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); + setTimes(path, item.mtime, item.mtime); + } + } return true; } else { - log.vlog("The local item has a different hash when compared to ", itemSource, " item hash"); + // The hash is different so the content of the file has to be different as to what is stored online + addLogEntry("The local item has a different hash when compared to " ~ itemSource ~ " item hash", ["verbose"]); + return false; } } } else { // Unable to read local file - log.log("Unable to determine the sync state of this file as it cannot be read (file permissions or file corruption): ", path); + addLogEntry("Unable to determine the sync state of this file as it cannot be read (file permissions or file corruption): " ~ path); return false; } } else { - log.vlog("The local item is a directory but should be a file"); + addLogEntry("The local item is a directory but should be a file", ["verbose"]); } break; case ItemType.dir: @@ -3262,1104 +2310,772 @@ final class SyncEngine if (isDir(path)) { return true; } else { - log.vlog("The local item is a file but should be a directory"); + addLogEntry("The local item is a file but should be a directory", ["verbose"]); } break; + case ItemType.unknown: + // Unknown type - return true but we dont action or sync these items + return true; } return false; } - - private void deleteItems() - { - foreach_reverse (i; idsToDelete) { - Item item; - string path; - if (!itemdb.selectById(i[0], i[1], item)) continue; // check if the item is in the db - // Compute this item path - path = computeItemPath(i[0], i[1]); - // Try to delete item object - log.log("Trying to delete item ", path); - if (!dryRun) { - // Actually process the database entry removal - itemdb.deleteById(item.driveId, item.id); - if (item.remoteDriveId != null) { - // delete the linked remote folder - itemdb.deleteById(item.remoteDriveId, item.remoteId); + + // Get the /delta data using the provided details + JSONValue getDeltaChangesByItemId(string selectedDriveId, string selectedItemId, string providedDeltaLink, OneDriveApi getDeltaQueryOneDriveApiInstance) { + + // Function variables + JSONValue deltaChangesBundle; + + // Get the /delta data for this account | driveId | deltaLink combination + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("selectedDriveId: " ~ selectedDriveId, ["debug"]); + addLogEntry("selectedItemId: " ~ selectedItemId, ["debug"]); + addLogEntry("providedDeltaLink: " ~ providedDeltaLink, ["debug"]); + addLogEntry("------------------------------------------------------------------", ["debug"]); + + try { + deltaChangesBundle = getDeltaQueryOneDriveApiInstance.viewChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink); + } catch (OneDriveException exception) { + // caught an exception + addLogEntry("getDeltaQueryOneDriveApiInstance.viewChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink) generated a OneDriveException", ["debug"]); + + auto errorArray = splitLines(exception.msg); + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(getDeltaQueryOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query OneDrive API for Delta Changes - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); } - } - bool needsRemoval = false; - if (exists(path)) { - // path exists on the local system - // make sure that the path refers to the correct item - Item pathItem; - if (itemdb.selectByPath(path, item.driveId, pathItem)) { - if (pathItem.id == item.id) { - needsRemoval = true; - } else { - log.log("Skipped due to id difference!"); - } + // dont retry request, loop back to calling function + addLogEntry("Looping back after failure", ["debug"]); + deltaChangesBundle = null; + } else { + // Default operation if not 408,429,503,504 errors + if (exception.httpStatusCode == 410) { + addLogEntry(); + addLogEntry("WARNING: The OneDrive API responded with an error that indicates the locally stored deltaLink value is invalid"); + // Essentially the 'providedDeltaLink' that we have stored is no longer available ... re-try without the stored deltaLink + addLogEntry("WARNING: Retrying OneDrive API call without using the locally stored deltaLink value"); + // Configure an empty deltaLink + addLogEntry("Delta link expired for 'getDeltaQueryOneDriveApiInstance.viewChangesByItemId(selectedDriveId, selectedItemId, providedDeltaLink)', setting 'deltaLink = null'", ["debug"]); + string emptyDeltaLink = ""; + // retry with empty deltaLink + deltaChangesBundle = getDeltaQueryOneDriveApiInstance.viewChangesByItemId(selectedDriveId, selectedItemId, emptyDeltaLink); } else { - // item has disappeared completely - needsRemoval = true; - } - } - if (needsRemoval) { - log.log("Deleting item ", path); - if (!dryRun) { - if (isFile(path)) { - remove(path); - } else { - try { - // Remove any children of this path if they still exist - // Resolve 'Directory not empty' error when deleting local files - foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { - attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); - } - // Remove the path now that it is empty of children - rmdirRecurse(path); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } + // display what the error is + addLogEntry("CODING TO DO: Hitting this failure error output"); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + deltaChangesBundle = null; } } } - if (!dryRun) { - // clean up idsToDelete - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); - } + return deltaChangesBundle; } - // scan the given directory for differences and new items - for use with --synchronize - void scanForDifferences(const(string) path) - { - // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? - string logPath; - if (path == ".") { - // get the configured sync_dir - logPath = buildNormalizedPath(cfg.getValueString("sync_dir")); - } else { - // use what was passed in - logPath = path; - } + // Common code to handle a 408 or 429 response from the OneDrive API + void handleOneDriveThrottleRequest(OneDriveApi activeOneDriveApiInstance) { - // If we are using --upload-only & --sync-shared-folders there is a possability that a 'new' local folder might - // be misinterpreted that it needs to be uploaded to the users default OneDrive DriveID rather than the requested / configured - // Shared Business Folder. In --resync scenarios, the DB information that tells that this Business Shared Folder does not exist, - // and in a --upload-only scenario will never exist, so the correct lookups are unable to be performed. - if ((exists(cfg.businessSharedFolderFilePath)) && (syncBusinessFolders) && (cfg.getValueBool("upload_only"))){ - // business_shared_folders file exists, --sync-shared-folders is enabled, --upload-only is enabled - log.vdebug("OneDrive Business --upload-only & --sync-shared-folders edge case triggered"); - handleUploadOnlyBusinessSharedFoldersEdgeCase(); - } + // If OneDrive sends a status code 429 then this function will be used to process the Retry-After response header which contains the value by which we need to wait + addLogEntry("Handling a OneDrive HTTP 429 Response Code (Too Many Requests)", ["debug"]); - // Are we configured to use a National Cloud Deployment - if (nationalCloudDeployment) { - // Select items that have a out-of-sync flag set - flagNationalCloudDeploymentOutOfSyncItems(); - } + // Read in the Retry-After HTTP header as set and delay as per this value before retrying the request + auto retryAfterValue = activeOneDriveApiInstance.getRetryAfterValue(); + addLogEntry("Using Retry-After Value = " ~ to!string(retryAfterValue), ["debug"]); - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - log.log("Uploading differences of ", logPath); - } + // HTTP request returned status code 429 (Too Many Requests) + // https://github.com/abraunegg/onedrive/issues/133 + // https://github.com/abraunegg/onedrive/issues/815 - Item item; - // For each unique OneDrive driveID we know about - foreach (driveId; driveIDsArray) { - log.vdebug("Processing DB entries for this driveId: ", driveId); - // Database scan of every item in DB for the given driveId based on the root parent for that drive - if ((syncBusinessFolders) && (driveId != defaultDriveId)) { - // There could be multiple shared folders all from this same driveId - are we doing a single directory sync? - if (cfg.getValueString("single_directory") != ""){ - // Limit the local filesystem check to just the requested directory - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); - } - } else { - // check everything associated with each driveId we know about - foreach(dbItem; itemdb.selectByDriveId(driveId)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(dbItem); - } - } - } else { - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); - } - } - } - - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - log.log("Uploading new items of ", logPath); + ulong delayBeforeRetry = 0; + if (retryAfterValue != 0) { + // Use the HTTP Response Header Value + delayBeforeRetry = retryAfterValue; + } else { + // Use a 120 second delay as a default given header value was zero + // This value is based on log files and data when determining correct process for 429 response handling + delayBeforeRetry = 120; + // Update that we are over-riding the provided value with a default + addLogEntry("HTTP Response Header retry-after value was 0 - Using a preconfigured default of: " ~ to!string(delayBeforeRetry), ["debug"]); } - // Filesystem walk to find new files not uploaded - uploadNewItems(path); - // clean up idsToDelete only if --dry-run is set - if (dryRun) { - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); - } + // Sleep thread as per request + addLogEntry("Thread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); + addLogEntry("Sleeping for " ~ to!string(delayBeforeRetry) ~ " seconds"); + Thread.sleep(dur!"seconds"(delayBeforeRetry)); + + // Reset retry-after value to zero as we have used this value now and it may be changed in the future to a different value + activeOneDriveApiInstance.resetRetryAfterValue(); } - // scan the given directory for differences only - for use with --monitor - void scanForDifferencesDatabaseScan(const(string) path) - { - // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? - string logPath; - if (path == ".") { - // get the configured sync_dir - logPath = buildNormalizedPath(cfg.getValueString("sync_dir")); + // If the JSON response is not correct JSON object, exit + void invalidJSONResponseFromOneDriveAPI() { + addLogEntry("ERROR: Query of the OneDrive API returned an invalid JSON response"); + // Must force exit here, allow logging to be done + forceExit(); + } + + // Handle an unhandled API error + void defaultUnhandledHTTPErrorCode(OneDriveException exception) { + // display error + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + // Must force exit here, allow logging to be done + forceExit(); + } + + // Display the pertinant details of the sync engine + void displaySyncEngineDetails() { + // Display accountType, defaultDriveId, defaultRootId & remainingFreeSpace for verbose logging purposes + addLogEntry("Application Version: " ~ appConfig.applicationVersion, ["verbose"]); + addLogEntry("Account Type: " ~ appConfig.accountType, ["verbose"]); + addLogEntry("Default Drive ID: " ~ appConfig.defaultDriveId, ["verbose"]); + addLogEntry("Default Root ID: " ~ appConfig.defaultRootId, ["verbose"]); + + // What do we display here for space remaining + if (appConfig.remainingFreeSpace > 0) { + // Display the actual value + addLogEntry("Remaining Free Space: " ~ to!string(byteToGibiByte(appConfig.remainingFreeSpace)) ~ " GB (" ~ to!string(appConfig.remainingFreeSpace) ~ " bytes)", ["verbose"]); } else { - // use what was passed in - logPath = path; + // zero or non-zero value or restricted + if (!appConfig.quotaRestricted){ + addLogEntry("Remaining Free Space: 0 KB", ["verbose"]); + } else { + addLogEntry("Remaining Free Space: Not Available", ["verbose"]); + } } + } + + // Query itemdb.computePath() and catch potential assert when DB consistency issue occurs + string computeItemPath(string thisDriveId, string thisItemId) { - // If we are using --upload-only & --sync-shared-folders there is a possability that a 'new' local folder might - // be misinterpreted that it needs to be uploaded to the users default OneDrive DriveID rather than the requested / configured - // Shared Business Folder. In --resync scenarios, the DB information that tells that this Business Shared Folder does not exist, - // and in a --upload-only scenario will never exist, so the correct lookups are unable to be performed. - if ((exists(cfg.businessSharedFolderFilePath)) && (syncBusinessFolders) && (cfg.getValueBool("upload_only"))){ - // business_shared_folders file exists, --sync-shared-folders is enabled, --upload-only is enabled - log.vdebug("OneDrive Business --upload-only & --sync-shared-folders edge case triggered"); - handleUploadOnlyBusinessSharedFoldersEdgeCase(); - } + // static declare this for this function + static import core.exception; + string calculatedPath; + addLogEntry("Attempting to calculate local filesystem path for " ~ thisDriveId ~ " and " ~ thisItemId, ["debug"]); - // Are we configured to use a National Cloud Deployment - if (nationalCloudDeployment) { - // Select items that have a out-of-sync flag set - flagNationalCloudDeploymentOutOfSyncItems(); + try { + calculatedPath = itemDB.computePath(thisDriveId, thisItemId); + } catch (core.exception.AssertError) { + // broken tree in the database, we cant compute the path for this item id, exit + addLogEntry("ERROR: A database consistency issue has been caught. A --resync is needed to rebuild the database."); + // Must force exit here, allow logging to be done + forceExit(); } - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - log.vlog("Uploading differences of ", logPath); + // return calculated path as string + return calculatedPath; + } + + // Try and compute the file hash for the given item + bool testFileHash(string path, Item item) { + + // Generate QuickXORHash first before attempting to generate any other type of hash + if (item.quickXorHash) { + if (item.quickXorHash == computeQuickXorHash(path)) return true; + } else if (item.sha256Hash) { + if (item.sha256Hash == computeSHA256Hash(path)) return true; } - Item item; - // For each unique OneDrive driveID we know about - foreach (driveId; driveIDsArray) { - log.vdebug("Processing DB entries for this driveId: ", driveId); - // Database scan of every item in DB for the given driveId based on the root parent for that drive - if ((syncBusinessFolders) && (driveId != defaultDriveId)) { - // There could be multiple shared folders all from this same driveId - are we doing a single directory sync? - if (cfg.getValueString("single_directory") != ""){ - // Limit the local filesystem check to just the requested directory - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); - } + return false; + } + + // Process items that need to be removed + void processDeleteItems() { + + foreach_reverse (i; idsToDelete) { + Item item; + string path; + if (!itemDB.selectById(i[0], i[1], item)) continue; // check if the item is in the db + // Compute this item path + path = computeItemPath(i[0], i[1]); + + // Log the action if the path exists .. it may of already been removed and this is a legacy array item + if (exists(path)) { + if (item.type == ItemType.file) { + addLogEntry("Trying to delete file " ~ path); } else { - // check everything associated with each driveId we know about - foreach(dbItem; itemdb.selectByDriveId(driveId)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(dbItem); - } - } - } else { - if (itemdb.selectByPath(path, driveId, item)) { - // Does it still exist on disk in the location the DB thinks it is - log.vdebug("Calling uploadDifferences(dbItem) as item is present in local cache DB"); - uploadDifferences(item); + addLogEntry("Trying to delete directory " ~ path); } } - } - } - - void flagNationalCloudDeploymentOutOfSyncItems() { - // Any entry in the DB than is flagged as out-of-sync needs to be cleaned up locally first before we scan the entire DB - // Normally, this is done at the end of processing all /delta queries, however National Cloud Deployments do not support /delta as a query - // https://docs.microsoft.com/en-us/graph/deployments#supported-features - // Select items that have a out-of-sync flag set - foreach (driveId; driveIDsArray) { - // For each unique OneDrive driveID we know about - Item[] outOfSyncItems = itemdb.selectOutOfSyncItems(driveId); - foreach (item; outOfSyncItems) { - if (!dryRun) { - // clean up idsToDelete - idsToDelete.length = 0; - assumeSafeAppend(idsToDelete); - // flag to delete local file as it now is no longer in sync with OneDrive - log.vdebug("Flagging to delete local item as it now is no longer in sync with OneDrive"); - log.vdebug("item: ", item); - idsToDelete ~= [item.driveId, item.id]; - // delete items in idsToDelete - if (idsToDelete.length > 0) deleteItems(); + + // Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy + itemDB.deleteById(item.driveId, item.id); + if (item.remoteDriveId != null) { + // delete the linked remote folder + itemDB.deleteById(item.remoteDriveId, item.remoteId); + } + + // Add to pathFakeDeletedArray + // We dont want to try and upload this item again, so we need to track this object + if (dryRun) { + // We need to add './' here so that it can be correctly searched to ensure it is not uploaded + string pathToAdd = "./" ~ path; + pathFakeDeletedArray ~= pathToAdd; + } + + bool needsRemoval = false; + if (exists(path)) { + // path exists on the local system + // make sure that the path refers to the correct item + Item pathItem; + if (itemDB.selectByPath(path, item.driveId, pathItem)) { + if (pathItem.id == item.id) { + needsRemoval = true; + } else { + addLogEntry("Skipped due to id difference!"); + } + } else { + // item has disappeared completely + needsRemoval = true; } } - } - } - - void handleUploadOnlyBusinessSharedFoldersEdgeCase() { - // read in the business_shared_folders file contents - string[] businessSharedFoldersList; - // open file as read only - auto file = File(cfg.businessSharedFolderFilePath, "r"); - auto range = file.byLine(); - foreach (line; range) { - // Skip comments in file - if (line.length == 0 || line[0] == ';' || line[0] == '#') continue; - businessSharedFoldersList ~= buildNormalizedPath(line); - } - file.close(); - - // Query the GET /me/drive/sharedWithMe API - JSONValue graphQuery = onedrive.getSharedWithMe(); - if (graphQuery.type() == JSONType.object) { - if (count(graphQuery["value"].array) != 0) { - // Shared items returned - log.vdebug("onedrive.getSharedWithMe API Response: ", graphQuery); - foreach (searchResult; graphQuery["value"].array) { - // loop variables - string sharedFolderName; - string remoteParentDriveId; - string remoteParentItemId; - Item remoteItemRoot; - Item remoteItem; - - // is the shared item with us a 'folder' ? - // we only handle folders, not files or other items - if (isItemFolder(searchResult)) { - // Debug response output - log.vdebug("shared folder entry: ", searchResult); - sharedFolderName = searchResult["name"].str; - remoteParentDriveId = searchResult["remoteItem"]["parentReference"]["driveId"].str; - remoteParentItemId = searchResult["remoteItem"]["parentReference"]["id"].str; - - if (canFind(businessSharedFoldersList, sharedFolderName)) { - // Shared Folder matches what is in the shared folder list - log.vdebug("shared folder name matches business_shared_folders list item: ", sharedFolderName); - // Actions: - // 1. Add this remote item to the DB so that it can be queried - // 2. Add remoteParentDriveId to driveIDsArray so we have a record of it - - // Make JSON item DB compatible - remoteItem = makeItem(searchResult); - // Fix up entries, as we are manipulating the data - remoteItem.driveId = remoteParentDriveId; - remoteItem.eTag = ""; - remoteItem.cTag = ""; - remoteItem.parentId = defaultRootId; - remoteItem.remoteDriveId = ""; - remoteItem.remoteId = ""; - - // Build the remote root DB item - remoteItemRoot.driveId = remoteParentDriveId; - remoteItemRoot.id = defaultRootId; - remoteItemRoot.name = "root"; - remoteItemRoot.type = ItemType.dir; - remoteItemRoot.mtime = remoteItem.mtime; - remoteItemRoot.syncStatus = "Y"; - - // Add root remote item to the local database - log.vdebug("Adding remote folder root to database: ", remoteItemRoot); - itemdb.upsert(remoteItemRoot); - - // Add shared folder item to the local database - log.vdebug("Adding remote folder to database: ", remoteItem); - itemdb.upsert(remoteItem); - - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, remoteParentDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= remoteParentDriveId; + if (needsRemoval) { + // Log the action + if (item.type == ItemType.file) { + addLogEntry("Deleting file " ~ path); + } else { + addLogEntry("Deleting directory " ~ path); + } + + // Perform the action + if (!dryRun) { + if (isFile(path)) { + remove(path); + } else { + try { + // Remove any children of this path if they still exist + // Resolve 'Directory not empty' error when deleting local files + foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { + attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); } + // Remove the path now that it is empty of children + rmdirRecurse(path); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } } } } + + if (!dryRun) { + // Cleanup array memory + idsToDelete = []; + } } - // scan the given directory for new items - for use with --monitor or --cleanup-local-files - void scanForDifferencesFilesystemScan(const(string) path) - { - // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? - string logPath; - if (path == ".") { - // get the configured sync_dir - logPath = buildNormalizedPath(cfg.getValueString("sync_dir")); + // Update the timestamp of an object online + void uploadLastModifiedTime(string driveId, string id, SysTime mtime, string eTag) { + + string itemModifiedTime; + itemModifiedTime = mtime.toISOExtString(); + JSONValue data = [ + "fileSystemInfo": JSONValue([ + "lastModifiedDateTime": itemModifiedTime + ]) + ]; + + // What eTag value do we use? + string eTagValue; + if (appConfig.accountType == "personal") { + eTagValue = null; } else { - // use what was passed in - logPath = path; + eTagValue = eTag; } - // scan for changes in the path provided - if (isDir(path)) { - // if this path is a directory, output this message. - // if a file, potentially leads to confusion as to what the client is actually doing - if (!cleanupLocalFiles) { - // if --cleanup-local-files was set, we will not be uploading data - log.vlog("Uploading new items of ", logPath); + JSONValue response; + // Create a new OneDrive API instance + OneDriveApi uploadLastModifiedTimeApiInstance; + uploadLastModifiedTimeApiInstance = new OneDriveApi(appConfig); + uploadLastModifiedTimeApiInstance.initialise(); + + // Try and update the online last modified time + try { + // Use this instance + response = uploadLastModifiedTimeApiInstance.updateById(driveId, id, data, eTagValue); + // Shut the instance down + uploadLastModifiedTimeApiInstance.shutdown(); + // Free object and memory + object.destroy(uploadLastModifiedTimeApiInstance); + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(response); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadLastModifiedTimeApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to update the timestamp on an item on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + uploadLastModifiedTime(driveId, id, mtime, eTag); + return; + } else { + // Default operation if not 408,429,503,504 errors + if (exception.httpStatusCode == 409) { + // ETag does not match current item's value - use a null eTag + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + uploadLastModifiedTime(driveId, id, mtime, null); + } else { + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } } } - - // Filesystem walk to find extra files that reside locally. - // If --cleanup-local-files is not used, these will be uploaded (normal operation) - // If --download-only --cleanup-local-files is being used, extra files found locally will be deleted from the local filesystem - uploadNewItems(path); } - private void uploadDifferences(const ref Item item) - { - // see if this item.id we were supposed to have deleted - // match early and return - if (dryRun) { - foreach (i; idsToDelete) { - if (i[1] == item.id) { - return; - } - } - } - - bool unwanted = false; - string path; + // Perform a database integrity check - checking all the items that are in-sync at the moment, validating what we know should be on disk, to what is actually on disk + void performDatabaseConsistencyAndIntegrityCheck() { - // Compute this item path early as we we use this path often - path = computeItemPath(item.driveId, item.id); + // Log what we are doing + if (!appConfig.surpressLoggingOutput) { + addLogEntry("Performing a database consistency and integrity check on locally stored data ... "); + } - // item.id was in the database associated with the item.driveId specified - log.vlog("Processing ", buildNormalizedPath(path)); + // What driveIDsArray do we use? If we are doing a --single-directory we need to use just the drive id associated with that operation + string[] consistencyCheckDriveIdsArray; + if (singleDirectoryScope) { + consistencyCheckDriveIdsArray ~= singleDirectoryScopeDriveId; + } else { + consistencyCheckDriveIdsArray = driveIDsArray; + } - // What type of DB item are we processing - // Is this item excluded by user configuration of skip_dir or skip_file? - // Is this item a directory or 'remote' type? A 'remote' type is a folder DB tie so should be compared as directory for exclusion - if ((item.type == ItemType.dir)||(item.type == ItemType.remote)) { - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - if (exists(path ~ "/.nosync")) { - log.vlog("Skipping item - .nosync found & --check-for-nosync enabled: ", path); - return; + // Create a new DB blank item + Item item; + // Use the array we populate, rather than selecting all distinct driveId's from the database + foreach (driveId; consistencyCheckDriveIdsArray) { + // Make the logging more accurate - we cant update driveId as this then breaks the below queries + addLogEntry("Processing DB entries for this Drive ID: " ~ driveId, ["verbose"]); + + // What OneDrive API query do we use? + // - Are we running against a National Cloud Deployments that does not support /delta ? + // National Cloud Deployments do not support /delta as a query + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // + // - Are we performing a --single-directory sync, which will exclude many items online, focusing in on a specific online directory + // + // - Are we performing a --download-only --cleanup-local-files action? + // + // If we did, we self generated a /delta response, thus need to now process elements that are still flagged as out-of-sync + if ((singleDirectoryScope) || (nationalCloudDeployment) || (cleanupLocalFiles)) { + // Any entry in the DB than is flagged as out-of-sync needs to be cleaned up locally first before we scan the entire DB + // Normally, this is done at the end of processing all /delta queries, however when using --single-directory or a National Cloud Deployments is configured + // We cant use /delta to query the OneDrive API as National Cloud Deployments dont support /delta + // https://docs.microsoft.com/en-us/graph/deployments#supported-features + // We dont use /delta for --single-directory as, in order to sync a single path with /delta, we need to query the entire OneDrive API JSON data to then filter out + // objects that we dont want, thus, it is easier to use the same method as National Cloud Deployments, but query just the objects we are after + + // For each unique OneDrive driveID we know about + Item[] outOfSyncItems = itemDB.selectOutOfSyncItems(driveId); + foreach (outOfSyncItem; outOfSyncItems) { + if (!dryRun) { + // clean up idsToDelete + idsToDelete.length = 0; + assumeSafeAppend(idsToDelete); + // flag to delete local file as it now is no longer in sync with OneDrive + addLogEntry("Flagging to delete local item as it now is no longer in sync with OneDrive", ["debug"]); + addLogEntry("outOfSyncItem: " ~ to!string(outOfSyncItem), ["debug"]); + idsToDelete ~= [outOfSyncItem.driveId, outOfSyncItem.id]; + // delete items in idsToDelete + if (idsToDelete.length > 0) processDeleteItems(); + } + } + + // Fetch database items associated with this path + Item[] driveItems; + if (singleDirectoryScope) { + // Use the --single-directory items we previously configured + // - query database for children objects using those items + driveItems = getChildren(singleDirectoryScopeDriveId, singleDirectoryScopeItemId); + } else { + // Check everything associated with each driveId we know about + addLogEntry("Selecting DB items via itemDB.selectByDriveId(driveId)", ["debug"]); + // Query database + driveItems = itemDB.selectByDriveId(driveId); + } + + // Log DB items to process + addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]); + + // Process each database database item associated with the driveId + foreach(dbItem; driveItems) { + // Does it still exist on disk in the location the DB thinks it is + checkDatabaseItemForConsistency(dbItem); + } + } else { + // Check everything associated with each driveId we know about + addLogEntry("Selecting DB items via itemDB.selectByDriveId(driveId)", ["debug"]); + + // Query database + auto driveItems = itemDB.selectByDriveId(driveId); + addLogEntry("Database items to process for this driveId: " ~ to!string(driveItems.count), ["debug"]); + + // Process each database database item associated with the driveId + foreach(dbItem; driveItems) { + // Does it still exist on disk in the location the DB thinks it is + checkDatabaseItemForConsistency(dbItem); } } - // Is the path excluded? - unwanted = selectiveSync.isDirNameExcluded(item.name); - } - - // Is this item a file? - if (item.type == ItemType.file) { - // Is the filename excluded? - unwanted = selectiveSync.isFileNameExcluded(item.name); } - // If path or filename does not exclude, is this excluded due to use of selective sync? - if (!unwanted) { - // is sync_list configured - if (syncListConfigured) { - // sync_list configured and in use - // Is the path excluded via sync_list? - unwanted = selectiveSync.isPathExcludedViaSyncList(path); + // Are we doing a --download-only sync? + if (!appConfig.getValueBool("download_only")) { + // Do we have any known items, where the content has changed locally, that needs to be uploaded? + if (!databaseItemsWhereContentHasChanged.empty) { + // There are changed local files that were in the DB to upload + addLogEntry("Changed local items to upload to OneDrive: " ~ to!string(databaseItemsWhereContentHasChanged.length)); + processChangedLocalItemsToUpload(); + // Cleanup array memory + databaseItemsWhereContentHasChanged = []; } } - - // skip unwanted items - if (unwanted) { - //log.vlog("Filtered out"); - return; - } + } + + // Check this Database Item for its consistency on disk + void checkDatabaseItemForConsistency(Item dbItem) { + + // What is the local path item + string localFilePath; + // Do we want to onward process this item? + bool unwanted = false; - // Check against Microsoft OneDrive restriction and limitations about Windows naming files - if (!isValidName(path)) { - log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", path); - return; - } + // Compute this dbItem path early as we we use this path often + localFilePath = buildNormalizedPath(computeItemPath(dbItem.driveId, dbItem.id)); - // Check for bad whitespace items - if (!containsBadWhiteSpace(path)) { - log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", path); - return; + // To improve logging output for this function, what is the 'logical path'? + string logOutputPath; + if (localFilePath == ".") { + // get the configured sync_dir + logOutputPath = buildNormalizedPath(appConfig.getValueString("sync_dir")); + } else { + // Use the path that was computed + logOutputPath = localFilePath; } - // Check for HTML ASCII Codes as part of file name - if (!containsASCIIHTMLCodes(path)) { - log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", path); - return; - } + // Log what we are doing + addLogEntry("Processing " ~ logOutputPath, ["verbose"]); - final switch (item.type) { - case ItemType.dir: - uploadDirDifferences(item, path); - break; + // Determine which action to take + final switch (dbItem.type) { case ItemType.file: - uploadFileDifferences(item, path); + // Logging output + checkFileDatabaseItemForConsistency(dbItem, localFilePath); + break; + case ItemType.dir: + // Logging output + checkDirectoryDatabaseItemForConsistency(dbItem, localFilePath); break; case ItemType.remote: - uploadRemoteDirDifferences(item, path); + // checkRemoteDirectoryDatabaseItemForConsistency(dbItem, localFilePath); + break; + case ItemType.unknown: + // Unknown type - we dont action these items break; } } - - private void uploadDirDifferences(const ref Item item, const(string) path) - { - assert(item.type == ItemType.dir); - if (exists(path)) { - // Fix https://github.com/abraunegg/onedrive/issues/1915 - try { - if (!isDir(path)) { - log.vlog("The item was a directory but now it is a file"); - uploadDeleteItem(item, path); - uploadNewFile(path); - } else { - log.vlog("The directory has not changed"); - // loop through the children - foreach (Item child; itemdb.selectChildren(item.driveId, item.id)) { - uploadDifferences(child); - } - } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return; - } - } else { - // Directory does not exist locally - // If we are in a --dry-run situation - this directory may never have existed as we never downloaded it - if (!dryRun) { - // Not --dry-run situation - if (!cfg.getValueBool("monitor")) { - // Not in --monitor mode - log.vlog("The directory has been deleted locally"); - } else { - // Appropriate message as we are in --monitor mode - log.vlog("The directory appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'"); - log.vdebug("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped"); - } - // A moved file will be uploaded as 'new', delete the old file and reference - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } - } else { - // we are in a --dry-run situation, directory appears to have deleted locally - this directory may never have existed as we never downloaded it .. - // Check if path does not exist in database - Item databaseItem; - if (!itemdb.selectByPath(path, defaultDriveId, databaseItem)) { - // Path not found in database - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); + + // Perform the database consistency check on this file item + void checkFileDatabaseItemForConsistency(Item dbItem, string localFilePath) { + + // What is the source of this item data? + string itemSource = "database"; + + // Does this item|file still exist on disk? + if (exists(localFilePath)) { + // Path exists locally, is this path a file? + if (isFile(localFilePath)) { + // Can we actually read the local file? + if (readLocalFile(localFilePath)){ + // File is readable + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + SysTime itemModifiedTime = dbItem.mtime; + // Reduce time resolution to seconds before comparing + itemModifiedTime.fracSecs = Duration.zero; + localModifiedTime.fracSecs = Duration.zero; + + if (localModifiedTime != itemModifiedTime) { + // The modified dates are different + addLogEntry("The local item has a different modified time " ~ to!string(localModifiedTime) ~ " when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime), ["debug"]); + + // Test the file hash + if (!testFileHash(localFilePath, dbItem)) { + // Is the local file 'newer' or 'older' (ie was an old file 'restored locally' by a different backup / replacement process?) + if (localModifiedTime >= itemModifiedTime) { + // Local file is newer + if (!appConfig.getValueBool("download_only")) { + addLogEntry("The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive", ["verbose"]); + // Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check + databaseItemsWhereContentHasChanged ~= [dbItem.driveId, dbItem.id, localFilePath]; + } else { + addLogEntry("The file content has changed locally and has a newer timestamp. The file will remain different to online file due to --download-only being used", ["verbose"]); + } + } else { + // Local file is older - data recovery process? something else? + if (!appConfig.getValueBool("download_only")) { + addLogEntry("The file content has changed locally and file now has a older timestamp. Uploading this file to OneDrive may potentially cause data-loss online", ["verbose"]); + // Add to an array of files we need to upload as this file has changed locally in-between doing the /delta check and performing this check + databaseItemsWhereContentHasChanged ~= [dbItem.driveId, dbItem.id, localFilePath]; + } else { + addLogEntry("The file content has changed locally and file now has a older timestamp. The file will remain different to online file due to --download-only being used", ["verbose"]); + } + } + } else { + // The file contents have not changed, but the modified timestamp has + addLogEntry("The last modified timestamp has changed however the file content has not changed", ["verbose"]); + addLogEntry("The local item has the same hash value as the item online - correcting timestamp online", ["verbose"]); + if (!dryRun) { + // Attempt to update the online date time stamp + uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime.toUTC(), dbItem.eTag); + } + } + } else { + // The file has not changed + addLogEntry("The file has not changed", ["verbose"]); } } else { - // Path was found in the database - // Did we 'fake create it' as part of --dry-run ? - foreach (i; idsFaked) { - if (i[1] == item.id) { - log.vdebug("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use"); - log.vlog("The directory has not changed"); - return; - } - } - // item.id did not match a 'faked' download new directory creation - log.vlog("The directory has been deleted locally"); - uploadDeleteItem(item, path); + //The file is not readable - skipped + addLogEntry("Skipping processing this file as it cannot be read (file permissions or file corruption): " ~ localFilePath); } - } - } - } - - private void uploadRemoteDirDifferences(const ref Item item, const(string) path) - { - assert(item.type == ItemType.remote); - if (exists(path)) { - if (!isDir(path)) { - log.vlog("The item was a directory but now it is a file"); - uploadDeleteItem(item, path); - uploadNewFile(path); } else { - log.vlog("The directory has not changed"); - // continue through the linked folder - assert(item.remoteDriveId && item.remoteId); - Item remoteItem; - bool found = itemdb.selectById(item.remoteDriveId, item.remoteId, remoteItem); - if(found){ - // item was found in the database - uploadDifferences(remoteItem); - } + // The item was a file but now is a directory + addLogEntry("The item was a file but now is a directory", ["verbose"]); } } else { - // are we in a dry-run scenario + // File does not exist locally, but is in our database as a dbItem containing all the data was passed into this function + // If we are in a --dry-run situation - this file may never have existed as we never downloaded it if (!dryRun) { - // no dry-run - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } + // Not --dry-run situation + addLogEntry("The file has been deleted locally", ["verbose"]); + // Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set + uploadDeletedItem(dbItem, localFilePath); } else { - // we are in a --dry-run situation, directory appears to have deleted locally - this directory may never have existed as we never downloaded it .. - // Check if path does not exist in database - Item databaseItem; - if (!itemdb.selectByPathWithoutRemote(path, defaultDriveId, databaseItem)) { - // Path not found in database - log.vlog("The directory has been deleted locally"); - if (noRemoteDelete) { - // do not process remote directory delete - log.vlog("Skipping remote directory delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); + // We are in a --dry-run situation, file appears to have been deleted locally - this file may never have existed locally as we never downloaded it due to --dry-run + // Did we 'fake create it' as part of --dry-run ? + bool idsFakedMatch = false; + foreach (i; idsFaked) { + if (i[1] == dbItem.id) { + addLogEntry("Matched faked file which is 'supposed' to exist but not created due to --dry-run use", ["debug"]); + addLogEntry("The file has not changed", ["verbose"]); + idsFakedMatch = true; } - } else { - // Path was found in the database - // Did we 'fake create it' as part of --dry-run ? - foreach (i; idsFaked) { - if (i[1] == item.id) { - log.vdebug("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use"); - log.vlog("The directory has not changed"); - return; - } - } - // item.id did not match a 'faked' download new directory creation - log.vlog("The directory has been deleted locally"); - uploadDeleteItem(item, path); + } + if (!idsFakedMatch) { + // dbItem.id did not match a 'faked' download new file creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation + addLogEntry("The file has been deleted locally", ["verbose"]); + // Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set + uploadDeletedItem(dbItem, localFilePath); } } } } - - // upload local file system differences to OneDrive - private void uploadFileDifferences(const ref Item item, const(string) path) - { - // Reset upload failure - OneDrive or filesystem issue (reading data) - uploadFailed = false; - - // uploadFileDifferences is called when processing DB entries to compare against actual files on disk - string itemSource = "database"; - assert(item.type == ItemType.file); - if (exists(path)) { - if (isFile(path)) { - // can we actually read the local file? - if (readLocalFile(path)){ - // file is readable - SysTime localModifiedTime = timeLastModified(path).toUTC(); - SysTime itemModifiedTime = item.mtime; - // HACK: reduce time resolution to seconds before comparing - itemModifiedTime.fracSecs = Duration.zero; - localModifiedTime.fracSecs = Duration.zero; - - if (localModifiedTime != itemModifiedTime) { - log.vlog("The file last modified time has changed"); - log.vdebug("The local item has a different modified time ", localModifiedTime, " when compared to ", itemSource, " modified time ", itemModifiedTime); - string eTag = item.eTag; - - // perform file hash tests - has the content of the file changed? - if (!testFileHash(path, item)) { - log.vlog("The file content has changed"); - log.vdebug("The local item has a different hash when compared to ", itemSource, " item hash"); - write("Uploading modified file ", path, " ... "); - JSONValue response; - - if (!dryRun) { - // Get the file size - long thisFileSize = getSize(path); - // Are we using OneDrive Personal or OneDrive Business? - // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) - // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here - if (accountType == "personal"){ - // Original file upload logic - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUploadReplace(path, item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 404) { - // HTTP request returned status code 404 - the eTag provided does not exist - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 404 - eTag Issue' - gracefully handling error"); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - write("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Simple Upload Replace Failed - OneDrive eTag / cTag match issue (Personal Account)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request as a session"); - // Try upload as a session - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // upload done without error - writeln("done."); - } else { - writeln(""); - try { - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Session Upload Replace Failed - OneDrive eTag / cTag match issue (Personal Account)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // upload done without error - writeln("done."); - } - } else { - // OneDrive Business Account - // We need to always use a session to upload, but handle the changed file correctly - if (accountType == "business"){ - try { - // is this a zero-byte file? - if (thisFileSize == 0) { - // the file we are trying to upload as a session is a zero byte file - we cant use a session to upload or replace the file - // as OneDrive technically does not support zero byte files - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("Skip Reason: Microsoft OneDrive does not support 'zero-byte' files as a modified upload. Will upload as new file."); - // delete file on OneDrive - onedrive.deleteById(item.driveId, item.id, item.eTag); - // delete file from local database - itemdb.deleteById(item.driveId, item.id); - return; - } else { - if ((!syncBusinessFolders) || (item.driveId == defaultDriveId)) { - // For logging consistency - writeln(""); - // If we are not syncing Shared Business Folders, or this change is going to the 'users' default drive, handle normally - // Perform a normal session upload - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } else { - // If we are uploading to a shared business folder, there are a couple of corner cases here: - // 1. Shared Folder is a 'users' folder - // 2. Shared Folder is a 'SharePoint Library' folder, meaning we get hit by this stupidity: https://github.com/OneDrive/onedrive-api-docs/issues/935 - response = handleSharePointMetadataAdditionBug(item, path); - } - } - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - writeln("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Session Upload Replace Failed - OneDrive eTag / cTag match issue (Business Account)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return; - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // Did the upload fail? - if (!uploadFailed){ - // upload done without error or failure - writeln("done."); - // As the session.upload includes the last modified time, save the response - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - - // OneDrive documentLibrary - if (accountType == "documentLibrary"){ - // is this a zero-byte file? - if (thisFileSize == 0) { - // the file we are trying to upload as a session is a zero byte file - we cant use a session to upload or replace the file - // as OneDrive technically does not support zero byte files - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("Skip Reason: Microsoft OneDrive does not support 'zero-byte' files as a modified upload. Will upload as new file."); - // delete file on OneDrive - onedrive.deleteById(item.driveId, item.id, item.eTag); - // delete file from local database - itemdb.deleteById(item.driveId, item.id); - return; - } else { - // Due to https://github.com/OneDrive/onedrive-api-docs/issues/935 Microsoft modifies all PDF, MS Office & HTML files with added XML content. It is a 'feature' of SharePoint. - // This means, as a session upload, on 'completion' the file is 'moved' and generates a 404 ...... - response = handleSharePointMetadataAdditionBug(item, path); - - // Did the upload fail? - if (!uploadFailed){ - // upload done without error or failure - writeln("done."); - // As the session.upload includes the last modified time, save the response - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - } - } - - // Update etag with ctag from response - if ("cTag" in response) { - // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded via simple upload - eTag = response["cTag"].str; - } else { - // Is there an eTag in the response? - if ("eTag" in response) { - // use the eTag from the response as there was no cTag - eTag = response["eTag"].str; - } else { - // no tag available - set to nothing - eTag = ""; - } - } - - // log that the modified file was uploaded successfully - log.fileOnly("Uploading modified file ", path, " ... done."); - - // update free space tracking if this is our drive id - if (item.driveId == defaultDriveId) { - // how much space is left on OneDrive after upload? - remainingFreeSpace = (remainingFreeSpace - thisFileSize); - log.vlog("Remaining free space on OneDrive: ", remainingFreeSpace); - } - } else { - // we are --dry-run - simulate the file upload - writeln("done."); - response = createFakeResponse(path); - // Log action to log file - log.fileOnly("Uploading modified file ", path, " ... done."); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - return; - } - } - if (accountType == "personal"){ - // If Personal, call to update the modified time as stored on OneDrive - if (!dryRun) { - uploadLastModifiedTime(item.driveId, item.id, eTag, localModifiedTime.toUTC()); - } + // Perform the database consistency check on this directory item + void checkDirectoryDatabaseItemForConsistency(Item dbItem, string localFilePath) { + + // What is the source of this item data? + string itemSource = "database"; + + // Does this item|directory still exist on disk? + if (exists(localFilePath)) { + // Fix https://github.com/abraunegg/onedrive/issues/1915 + try { + if (!isDir(localFilePath)) { + addLogEntry("The item was a directory but now it is a file", ["verbose"]); + uploadDeletedItem(dbItem, localFilePath); + uploadNewFile(localFilePath); + } else { + // Directory still exists locally + addLogEntry("The directory has not changed", ["verbose"]); + // When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed + // Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal + if (!singleDirectoryScope) { + // loop through the children + foreach (Item child; itemDB.selectChildren(dbItem.driveId, dbItem.id)) { + checkDatabaseItemForConsistency(child); } - } else { - log.vlog("The file has not changed"); } - } else { - //The file is not readable - skipped - log.log("Skipping processing this file as it cannot be read (file permissions or file corruption): ", path); - uploadFailed = true; } - } else { - log.vlog("The item was a file but now is a directory"); - uploadDeleteItem(item, path); - uploadCreateDir(path); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } else { - // File does not exist locally - // If we are in a --dry-run situation - this file may never have existed as we never downloaded it + // Directory does not exist locally, but it is in our database as a dbItem containing all the data was passed into this function + // If we are in a --dry-run situation - this directory may never have existed as we never created it if (!dryRun) { // Not --dry-run situation - if (!cfg.getValueBool("monitor")) { - log.vlog("The file has been deleted locally"); + if (!appConfig.getValueBool("monitor")) { + // Not in --monitor mode + addLogEntry("The directory has been deleted locally", ["verbose"]); } else { // Appropriate message as we are in --monitor mode - log.vlog("The file appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'"); - log.vdebug("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped"); - } - // A moved file will be uploaded as 'new', delete the old file and reference - if (noRemoteDelete) { - // do not process remote file delete - log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); + addLogEntry("The directory appears to have been deleted locally .. but we are running in --monitor mode. This may have been 'moved' on the local filesystem rather than being 'deleted'", ["verbose"]); + addLogEntry("Most likely cause - 'inotify' event was missing for whatever action was taken locally or action taken when application was stopped", ["debug"]); } + // A moved directory will be uploaded as 'new', delete the old directory and database reference + // Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set + uploadDeletedItem(dbItem, localFilePath); } else { - // We are in a --dry-run situation, file appears to have deleted locally - this file may never have existed as we never downloaded it .. - // Check if path does not exist in database - Item databaseItem; - if (!itemdb.selectByPath(path, defaultDriveId, databaseItem)) { - // file not found in database - log.vlog("The file has been deleted locally"); - if (noRemoteDelete) { - // do not process remote file delete - log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); + // We are in a --dry-run situation, directory appears to have been deleted locally - this directory may never have existed locally as we never created it due to --dry-run + // Did we 'fake create it' as part of --dry-run ? + bool idsFakedMatch = false; + foreach (i; idsFaked) { + if (i[1] == dbItem.id) { + addLogEntry("Matched faked dir which is 'supposed' to exist but not created due to --dry-run use", ["debug"]); + addLogEntry("The directory has not changed", ["verbose"]); + idsFakedMatch = true; } + } + if (!idsFakedMatch) { + // dbItem.id did not match a 'faked' download new directory creation - so this in-sync object was actually deleted locally, but we are in a --dry-run situation + addLogEntry("The directory has been deleted locally", ["verbose"]); + // Upload to OneDrive the instruction to delete this item. This will handle the 'noRemoteDelete' flag if set + uploadDeletedItem(dbItem, localFilePath); } else { - // file was found in the database - // Did we 'fake create it' as part of --dry-run ? - foreach (i; idsFaked) { - if (i[1] == item.id) { - log.vdebug("Matched faked file which is 'supposed' to exist but not created due to --dry-run use"); - log.vlog("The file has not changed"); - return; + // When we are using --single-directory, we use a the getChildren() call to get all children of a path, meaning all children are already traversed + // Thus, if we traverse the path of this directory .. we end up with double processing & log output .. which is not ideal + if (!singleDirectoryScope) { + // loop through the children + foreach (Item child; itemDB.selectChildren(dbItem.driveId, dbItem.id)) { + checkDatabaseItemForConsistency(child); } } - // item.id did not match a 'faked' download new file creation - log.vlog("The file has been deleted locally"); - if (noRemoteDelete) { - // do not process remote file delete - log.vlog("Skipping remote file delete as --upload-only & --no-remote-delete configured"); - } else { - uploadDeleteItem(item, path); - } } } } } - private JSONValue handleSharePointMetadataAdditionBug(const ref Item item, const(string) path) - { - // Explicit function for handling https://github.com/OneDrive/onedrive-api-docs/issues/935 - JSONValue response; - // Handle certain file types differently - if ((extension(path) == ".txt") || (extension(path) == ".csv")) { - // .txt and .csv are unaffected by https://github.com/OneDrive/onedrive-api-docs/issues/935 - // For logging consistency - writeln(""); - try { - response = session.upload(path, item.driveId, item.parentId, baseName(path), item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return response; - } - // Resolve https://github.com/abraunegg/onedrive/issues/36 - if ((e.httpStatusCode == 409) || (e.httpStatusCode == 423)) { - // The file is currently checked out or locked for editing by another user - // We cant upload this file at this time - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - writeln("", path, " is currently checked out or locked for editing by another user."); - log.fileOnly(path, " is currently checked out or locked for editing by another user."); - uploadFailed = true; - return response; - } - if (e.httpStatusCode == 412) { - // HTTP request returned status code 412 - ETag does not match current item's value - // Delete record from the local database - file will be uploaded as a new file - writeln("skipped."); - log.vdebug("Session Upload Replace Failed - OneDrive eTag / cTag match issue (Sharepoint Library)"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' - gracefully handling error. Will upload as new file."); - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return response; - } else { - // display what the error is - writeln("skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + // Does this local path (directory or file) conform with the Microsoft Naming Restrictions? It needs to conform otherwise we cannot create the directory or upload the file. + bool checkPathAgainstMicrosoftNamingRestrictions(string localFilePath) { + + // Check if the given path violates certain Microsoft restrictions and limitations + // Return a true|false response + bool invalidPath = false; + + // Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders + if (!invalidPath) { + if (!isValidName(localFilePath)) { // This will return false if this is not a valid name according to the OneDrive API specifications + addLogEntry("Skipping item - invalid name (Microsoft Naming Convention): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; } - // upload done without error - writeln("done."); - } else { - // Due to https://github.com/OneDrive/onedrive-api-docs/issues/935 Microsoft modifies all PDF, MS Office & HTML files with added XML content. It is a 'feature' of SharePoint. - // This means, as a session upload, on 'completion' the file is 'moved' and generates a 404 ...... - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("Skip Reason: Microsoft Sharepoint 'enrichment' after upload issue"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); - // Delete record from the local database - file will be uploaded as a new file - itemdb.deleteById(item.driveId, item.id); - uploadFailed = true; - return response; } - // return a JSON response so that it can be used and saved - return response; - } - - // upload new items to OneDrive - private void uploadNewItems(const(string) path) - { - static import std.utf; - import std.range : walkLength; - import std.uni : byGrapheme; - // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders - // If the path is greater than allowed characters, then one drive will return a '400 - Bad Request' - // Need to ensure that the URI is encoded before the check is made: - // - 400 Character Limit for OneDrive Business / Office 365 - // - 430 Character Limit for OneDrive Personal - long maxPathLength = 0; - long pathWalkLength = 0; - - // Configure maxPathLength based on account type - if (accountType == "personal"){ - // Personal Account - maxPathLength = 430; - } else { - // Business Account / Office365 - maxPathLength = 400; + // Check path for bad whitespace items + if (!invalidPath) { + if (containsBadWhiteSpace(localFilePath)) { // This will return true if this contains a bad whitespace item + addLogEntry("Skipping item - invalid name (Contains an invalid whitespace item): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; + } } - // A short lived file that has disappeared will cause an error - is the path valid? - if (!exists(path)) { - log.log("Skipping item - path has disappeared: ", path); - return; + // Check path for HTML ASCII Codes + if (!invalidPath) { + if (containsASCIIHTMLCodes(localFilePath)) { // This will return true if this contains HTML ASCII Codes + addLogEntry("Skipping item - invalid name (Contains HTML ASCII Code): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; + } } - // Calculate the path length by walking the path, catch any UTF-8 character errors - // https://github.com/abraunegg/onedrive/issues/487 - // https://github.com/abraunegg/onedrive/issues/1192 - try { - pathWalkLength = path.byGrapheme.walkLength; - } catch (std.utf.UTFException e) { - // path contains characters which generate a UTF exception - log.vlog("Skipping item - invalid UTF sequence: ", path); - log.vdebug(" Error Reason:", e.msg); - return; + // Check path for ASCII Control Codes + if (!invalidPath) { + if (containsASCIIControlCodes(localFilePath)) { // This will return true if this contains ASCII Control Codes + addLogEntry("Skipping item - invalid name (Contains ASCII Control Codes): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; + } } - // check the std.encoding of the path - // https://github.com/skilion/onedrive/issues/57 - // https://github.com/abraunegg/onedrive/issues/487 - if(!isValid(path)) { - // Path is not valid according to https://dlang.org/phobos/std_encoding.html - log.vlog("Skipping item - invalid character encoding sequence: ", path); - return; - } + // Return if this is a valid path + return invalidPath; + } + + // Does this local path (directory or file) get excluded from any operation based on any client side filtering rules? + bool checkPathAgainstClientSideFiltering(string localFilePath) { - // Is the path length is less than maxPathLength - if(pathWalkLength < maxPathLength){ - // skip dot files if configured - if (cfg.getValueBool("skip_dotfiles")) { - if (isDotFile(path)) { - log.vlog("Skipping item - .file or .folder: ", path); - return; + // Check the path against client side filtering rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + // Return a true|false response + + bool clientSideRuleExcludesPath = false; + + // does the path exist? + if (!exists(localFilePath)) { + // path does not exist - we cant review any client side rules on something that does not exist locally + return clientSideRuleExcludesPath; + } + + // - check_nosync + if (!clientSideRuleExcludesPath) { + // Do we need to check for .nosync? Only if --check-for-nosync was passed in + if (appConfig.getValueBool("check_nosync")) { + if (exists(localFilePath ~ "/.nosync")) { + addLogEntry("Skipping item - .nosync found & --check-for-nosync enabled: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } } - - // Do we need to check for .nosync? Only if --check-for-nosync was passed in - if (cfg.getValueBool("check_nosync")) { - if (exists(path ~ "/.nosync")) { - log.vlog("Skipping item - .nosync found & --check-for-nosync enabled: ", path); - return; + } + + // - skip_dotfiles + if (!clientSideRuleExcludesPath) { + // Do we need to check skip dot files if configured + if (appConfig.getValueBool("skip_dotfiles")) { + if (isDotFile(localFilePath)) { + addLogEntry("Skipping item - .file or .folder: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } } - + } + + // - skip_symlinks + if (!clientSideRuleExcludesPath) { // Is the path a symbolic link - if (isSymlink(path)) { + if (isSymlink(localFilePath)) { // if config says so we skip all symlinked items - if (cfg.getValueBool("skip_symlinks")) { - log.vlog("Skipping item - skip symbolic links configured: ", path); - return; - + if (appConfig.getValueBool("skip_symlinks")) { + addLogEntry("Skipping item - skip symbolic links configured: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } // skip unexisting symbolic links - else if (!exists(readLink(path))) { + else if (!exists(readLink(localFilePath))) { // reading the symbolic link failed - is the link a relative symbolic link // drwxrwxr-x. 2 alex alex 46 May 30 09:16 . // drwxrwxr-x. 3 alex alex 35 May 30 09:14 .. @@ -4368,7 +3084,7 @@ final class SyncEngine // // absolute links will be able to be read, but 'relative' links will fail, because they cannot be read based on the current working directory 'sync_dir' string currentSyncDir = getcwd(); - string fullLinkPath = buildNormalizedPath(absolutePath(path)); + string fullLinkPath = buildNormalizedPath(absolutePath(localFilePath)); string fileName = baseName(fullLinkPath); string parentLinkPath = dirName(fullLinkPath); // test if this is a 'relative' symbolic link @@ -4379,1798 +3095,3433 @@ final class SyncEngine chdir(currentSyncDir); // results if (relativeLinkTest) { - log.vdebug("Not skipping item - symbolic link is a 'relative link' to target ('", relativeLink, "') which can be supported: ", path); + addLogEntry("Not skipping item - symbolic link is a 'relative link' to target ('" ~ relativeLink ~ "') which can be supported: " ~ localFilePath, ["debug"]); } else { - log.logAndNotify("Skipping item - invalid symbolic link: ", path); - return; + addLogEntry("Skipping item - invalid symbolic link: "~ localFilePath, ["info", "notify"]); + clientSideRuleExcludesPath = true; } } } - - // Check for bad whitespace items - if (!containsBadWhiteSpace(path)) { - log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", path); - return; - } - - // Check for HTML ASCII Codes as part of file name - if (!containsASCIIHTMLCodes(path)) { - log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", path); - return; - } - - // Is this item excluded by user configuration of skip_dir or skip_file? - if (path != ".") { - if (isDir(path)) { - log.vdebug("Checking local path: ", path); + } + + // Is this item excluded by user configuration of skip_dir or skip_file? + if (!clientSideRuleExcludesPath) { + if (localFilePath != ".") { + // skip_dir handling + if (isDir(localFilePath)) { + addLogEntry("Checking local path: " ~ localFilePath, ["debug"]); + // Only check path if config is != "" - if (cfg.getValueString("skip_dir") != "") { + if (appConfig.getValueString("skip_dir") != "") { // The path that needs to be checked needs to include the '/' // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched - if (selectiveSync.isDirNameExcluded(path.strip('.'))) { - log.vlog("Skipping item - excluded by skip_dir config: ", path); - return; - } - } - - // In the event that this 'new item' is actually a OneDrive Business Shared Folder - // however the user may have omitted --sync-shared-folders, thus 'technically' this is a new item - // for this account OneDrive root, however this then would cause issues if --sync-shared-folders - // is added again after this sync - if ((exists(cfg.businessSharedFolderFilePath)) && (!syncBusinessFolders)){ - // business_shared_folders file exists, but we are not using / syncing them - // The file contents can only contain 'folder' names, so we need to strip './' from any path we are checking - if(selectiveSync.isSharedFolderMatched(strip(path,"./"))){ - // path detected as a 'new item' is matched as a path in business_shared_folders - log.vlog("Skipping item - excluded as included in business_shared_folders config: ", path); - log.vlog("To sync this directory to your OneDrive Account update your business_shared_folders config"); - return; + if (selectiveSync.isDirNameExcluded(localFilePath.strip('.'))) { + addLogEntry("Skipping item - excluded by skip_dir config: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } } } - if (isFile(path)) { - log.vdebug("Checking file: ", path); + // skip_file handling + if (isFile(localFilePath)) { + addLogEntry("Checking file: " ~ localFilePath, ["debug"]); + // The path that needs to be checked needs to include the '/' // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched - if (selectiveSync.isFileNameExcluded(path.strip('.'))) { - log.vlog("Skipping item - excluded by skip_file config: ", path); - return; + if (selectiveSync.isFileNameExcluded(localFilePath.strip('.'))) { + addLogEntry("Skipping item - excluded by skip_file config: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } } - - // is sync_list configured + } + } + + // Is this item excluded by user configuration of sync_list? + if (!clientSideRuleExcludesPath) { + if (localFilePath != ".") { if (syncListConfigured) { // sync_list configured and in use - if (selectiveSync.isPathExcludedViaSyncList(path)) { - if ((isFile(path)) && (cfg.getValueBool("sync_root_files")) && (rootName(path.strip('.').strip('/')) == "")) { - log.vdebug("Not skipping path due to sync_root_files inclusion: ", path); + if (selectiveSync.isPathExcludedViaSyncList(localFilePath)) { + if ((isFile(localFilePath)) && (appConfig.getValueBool("sync_root_files")) && (rootName(localFilePath.strip('.').strip('/')) == "")) { + addLogEntry("Not skipping path due to sync_root_files inclusion: " ~ localFilePath, ["debug"]); } else { - string userSyncList = cfg.configDirName ~ "/sync_list"; - if (exists(userSyncList)){ + if (exists(appConfig.syncListFilePath)){ // skipped most likely due to inclusion in sync_list - log.vlog("Skipping item - excluded by sync_list config: ", path); - return; + addLogEntry("Skipping item - excluded by sync_list config: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } else { // skipped for some other reason - log.vlog("Skipping item - path excluded by user config: ", path); - return; + addLogEntry("Skipping item - path excluded by user config: " ~ localFilePath, ["verbose"]); + clientSideRuleExcludesPath = true; } } } } } - - // Check against Microsoft OneDrive restriction and limitations about Windows naming files - if (!isValidName(path)) { - log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", path); - return; - } - - // If we are in a --dry-run scenario, we may have renamed a folder - but it is technically not renamed locally - // Thus, that entire path may be attemtped to be uploaded as new data to OneDrive - if (dryRun) { - // check the pathsRenamed array for this path - // if any match - we need to exclude this path - foreach (thisRenamedPath; pathsRenamed) { - log.vdebug("Renamed Path to evaluate: ", thisRenamedPath); - // Can we find 'thisRenamedPath' in the given 'path' - if (canFind(path, thisRenamedPath)) { - log.vdebug("Renamed Path MATCH - DONT UPLOAD AS NEW"); - return; + } + + // Check if this is excluded by a user set maximum filesize to upload + if (!clientSideRuleExcludesPath) { + if (isFile(localFilePath)) { + if (fileSizeLimit != 0) { + // Get the file size + ulong thisFileSize = getSize(localFilePath); + if (thisFileSize >= fileSizeLimit) { + addLogEntry("Skipping item - excluded by skip_size config: " ~ localFilePath ~ " (" ~ to!string(thisFileSize/2^^20) ~ " MB)", ["verbose"]); } } } + } + + return clientSideRuleExcludesPath; + } + + // Does this JSON item (as received from OneDrive API) get excluded from any operation based on any client side filtering rules? + // This function is only used when we are fetching objects from the OneDrive API using a /children query to help speed up what object we query + bool checkJSONAgainstClientSideFiltering(JSONValue onedriveJSONItem) { - // We want to upload this new local data - if (isDir(path)) { - Item item; - bool pathFoundInDB = false; - foreach (driveId; driveIDsArray) { - if (itemdb.selectByPath(path, driveId, item)) { - pathFoundInDB = true; - } - } - - // Was the path found in the database? - if (!pathFoundInDB) { - // Path not found in database when searching all drive id's - if (!cleanupLocalFiles) { - // --download-only --cleanup-local-files not used - uploadCreateDir(path); + bool clientSideRuleExcludesPath = false; + + // Check the path against client side filtering rules + // - check_nosync (MISSING) + // - skip_dotfiles (MISSING) + // - skip_symlinks (MISSING) + // - skip_file + // - skip_dir + // - sync_list + // - skip_size (MISSING) + // Return a true|false response + + // Use the JSON elements rather can computing a DB struct via makeItem() + string thisItemId = onedriveJSONItem["id"].str; + string thisItemDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + string thisItemParentId = onedriveJSONItem["parentReference"]["id"].str; + string thisItemName = onedriveJSONItem["name"].str; + + // Is this parent is in the database + bool parentInDatabase = false; + + // Calculate if the Parent Item is in the database so that it can be re-used + parentInDatabase = itemDB.idInLocalDatabase(thisItemDriveId, thisItemParentId); + + // Check if this is excluded by config option: skip_dir + if (!clientSideRuleExcludesPath) { + // Is the item a folder? + if (isItemFolder(onedriveJSONItem)) { + // Only check path if config is != "" + if (!appConfig.getValueString("skip_dir").empty) { + // work out the 'snippet' path where this folder would be created + string simplePathToCheck = ""; + string complexPathToCheck = ""; + string matchDisplay = ""; + + if (hasParentReference(onedriveJSONItem)) { + // we need to workout the FULL path for this item + // simple path + if (("name" in onedriveJSONItem["parentReference"]) != null) { + simplePathToCheck = onedriveJSONItem["parentReference"]["name"].str ~ "/" ~ onedriveJSONItem["name"].str; + } else { + simplePathToCheck = onedriveJSONItem["name"].str; + } + addLogEntry("skip_dir path to check (simple): " ~ simplePathToCheck, ["debug"]); + + // complex path + if (parentInDatabase) { + // build up complexPathToCheck + //complexPathToCheck = buildNormalizedPath(newItemPath); + complexPathToCheck = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + } else { + addLogEntry("Parent details not in database - unable to compute complex path to check", ["debug"]); + } + if (!complexPathToCheck.empty) { + addLogEntry("skip_dir path to check (complex): " ~ complexPathToCheck, ["debug"]); + } } else { - // we need to clean up this directory - log.log("Removing local directory as --download-only & --cleanup-local-files configured"); - // Remove any children of this path if they still exist - // Resolve 'Directory not empty' error when deleting local files - try { - foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { - // what sort of child is this? - if (isDir(child.name)) { - log.log("Removing local directory: ", child.name); - } else { - log.log("Removing local file: ", child.name); - } - // are we in a --dry-run scenario? - if (!dryRun) { - // No --dry-run ... process local delete - try { - attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } - // Remove the path now that it is empty of children - log.log("Removing local directory: ", path); - // are we in a --dry-run scenario? - if (!dryRun) { - // No --dry-run ... process local delete - try { - rmdirRecurse(path); - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - } - } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return; - } + simplePathToCheck = onedriveJSONItem["name"].str; } - } - - // recursively traverse children - // the above operation takes time and the directory might have - // disappeared in the meantime - if (!exists(path)) { - if (!cleanupLocalFiles) { - // --download-only --cleanup-local-files not used - log.vlog("Directory disappeared during upload: ", path); + + // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder + // then isDirNameExcluded matching will not work + // Clean up 'root:' if present + if (startsWith(simplePathToCheck, "root:")){ + addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]); + simplePathToCheck = strip(simplePathToCheck, "root:"); } - return; - } - - // Try and access the directory and any path below - try { - auto entries = dirEntries(path, SpanMode.shallow, false); - foreach (DirEntry entry; entries) { - string thisPath = entry.name; - uploadNewItems(thisPath); + if (startsWith(complexPathToCheck, "root:")){ + addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]); + complexPathToCheck = strip(complexPathToCheck, "root:"); } - } catch (FileException e) { - // display the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return; - } - } else { - // path is not a directory, is it a valid file? - // pipes - whilst technically valid files, are not valid for this client - // prw-rw-r--. 1 user user 0 Jul 7 05:55 my_pipe - if (isFile(path)) { - // Path is a valid file - bool fileFoundInDB = false; - Item item; - // Search the database for this file - foreach (driveId; driveIDsArray) { - if (itemdb.selectByPath(path, driveId, item)) { - fileFoundInDB = true; + // OK .. what checks are we doing? + if ((!simplePathToCheck.empty) && (complexPathToCheck.empty)) { + // just a simple check + addLogEntry("Performing a simple check only", ["debug"]); + clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(simplePathToCheck); + } else { + // simple and complex + addLogEntry("Performing a simple then complex path match if required", ["debug"]); + + // simple first + addLogEntry("Performing a simple check first", ["debug"]); + clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(simplePathToCheck); + matchDisplay = simplePathToCheck; + if (!clientSideRuleExcludesPath) { + addLogEntry("Simple match was false, attempting complex match", ["debug"]); + // simple didnt match, perform a complex check + clientSideRuleExcludesPath = selectiveSync.isDirNameExcluded(complexPathToCheck); + matchDisplay = complexPathToCheck; } } - - // Was the file found in the database? - if (!fileFoundInDB) { - // File not found in database when searching all drive id's - // Do we upload the file or clean up the file? - if (!cleanupLocalFiles) { - // --download-only --cleanup-local-files not used - uploadNewFile(path); - // Did the upload fail? - if (!uploadFailed) { - // Upload did not fail - // Issue #763 - Delete local files after sync handling - // are we in an --upload-only & --remove-source-files scenario? - if ((uploadOnly) && (localDeleteAfterUpload)) { - // Log that we are deleting a local item - log.log("Removing local file as --upload-only & --remove-source-files configured"); - // are we in a --dry-run scenario? - log.vdebug("Removing local file: ", path); - if (!dryRun) { - // No --dry-run ... process local file delete - safeRemove(path); - } - } - } - } else { - // we need to clean up this file - log.log("Removing local file as --download-only & --cleanup-local-files configured"); - // are we in a --dry-run scenario? - log.log("Removing local file: ", path); - if (!dryRun) { - // No --dry-run ... process local file delete - safeRemove(path); - } - } + // End Result + addLogEntry("skip_dir exclude result (directory based): " ~ clientSideRuleExcludesPath, ["debug"]); + if (clientSideRuleExcludesPath) { + // This path should be skipped + addLogEntry("Skipping item - excluded by skip_dir config: " ~ matchDisplay, ["verbose"]); } - } else { - // path is not a valid file - log.log("Skipping item - item is not a valid file: ", path); } } - } else { - // This path was skipped - why? - log.log("Skipping item '", path, "' due to the full path exceeding ", maxPathLength, " characters (Microsoft OneDrive limitation)"); } - } - - // create new directory on OneDrive - private void uploadCreateDir(const(string) path) - { - log.vlog("OneDrive Client requested to create remote path: ", path); - - JSONValue onedrivePathDetails; - Item parent; - - // Was the path entered the root path? - if (path != "."){ - // What parent path to use? - string parentPath = dirName(path); // will be either . or something else - if (parentPath == "."){ - // Assume this is a new 'local' folder in the users configured sync_dir - // Use client defaults - parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 - parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 - } else { - // Query the database using each of the driveId's we are using - foreach (driveId; driveIDsArray) { - // Query the database for this parent path using each driveId - Item dbResponse; - if(itemdb.selectByPathWithoutRemote(parentPath, driveId, dbResponse)){ - // parent path was found in the database - parent = dbResponse; - } - } - } - - // If this is still null or empty - we cant query the database properly later on - // Query OneDrive API for parent details - if ((parent.driveId == "") && (parent.id == "")){ - try { - log.vdebug("Attempting to query OneDrive for this parent path: ", parentPath); - onedrivePathDetails = onedrive.getPathDetails(parentPath); - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(parentPath); generated a OneDriveException"); - // exception - set onedriveParentRootDetails to a blank valid JSON - onedrivePathDetails = parseJSON("{}"); - if (e.httpStatusCode == 404) { - // Parent does not exist ... need to create parent - log.vdebug("Parent path does not exist: ", parentPath); - uploadCreateDir(parentPath); - } + + // Check if this is excluded by config option: skip_file + if (!clientSideRuleExcludesPath) { + // is the item a file ? + if (isFileItem(onedriveJSONItem)) { + // JSON item is a file + + // skip_file can contain 4 types of entries: + // - wildcard - *.txt + // - text + wildcard - name*.txt + // - full path + combination of any above two - /path/name*.txt + // - full path to file - /path/to/file.txt + + string exclusionTestPath = ""; + + // is the parent id in the database? + if (parentInDatabase) { + // parent id is in the database, so we can try and calculate the full file path + string jsonItemPath = ""; - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadCreateDir(path);"); - uploadCreateDir(path); - // return back to original call - return; + // Compute this item path & need the full path for this file + jsonItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + // Log the calculation + addLogEntry("New Item calculated full path is: " ~ jsonItemPath, ["debug"]); + + // The path that needs to be checked needs to include the '/' + // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched + // However, as 'path' used throughout, use a temp variable with this modification so that we use the temp variable for exclusion checks + if (!startsWith(jsonItemPath, "/")){ + // Add '/' to the path + exclusionTestPath = '/' ~ jsonItemPath; } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // what are we checking + addLogEntry("skip_file item to check (full calculated path): " ~ exclusionTestPath, ["debug"]); + } else { + // parent not in database, we can only check using this JSON item's name + if (!startsWith(thisItemName, "/")){ + // Add '/' to the path + exclusionTestPath = '/' ~ thisItemName; } + + // what are we checking + addLogEntry("skip_file item to check (file name only - parent path not in database): " ~ exclusionTestPath, ["debug"]); + clientSideRuleExcludesPath = selectiveSync.isFileNameExcluded(exclusionTestPath); } - // configure the parent item data - if (hasId(onedrivePathDetails) && hasParentReference(onedrivePathDetails)){ - log.vdebug("Parent path found, configuring parent item"); - parent.id = onedrivePathDetails["id"].str; // This item's ID. Should give something like 12345ABCDE1234A1!101 - parent.driveId = onedrivePathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - } else { - // OneDrive API query failed - // Assume client defaults - log.vdebug("Parent path could not be queried, using OneDrive account defaults"); - parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 - parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 + // Perform the 'skip_file' evaluation + clientSideRuleExcludesPath = selectiveSync.isFileNameExcluded(exclusionTestPath); + addLogEntry("Result: " ~ to!string(clientSideRuleExcludesPath), ["debug"]); + + if (clientSideRuleExcludesPath) { + // This path should be skipped + addLogEntry("Skipping item - excluded by skip_file config: " ~ exclusionTestPath, ["verbose"]); } } - - JSONValue response; - // test if the path we are going to create already exists on OneDrive - try { - log.vdebug("Attempting to query OneDrive for this path: ", path); - response = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } catch (OneDriveException e) { - log.vdebug("response = onedrive.getPathDetails(path); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found on the drive id we queried - log.vlog("The requested directory to create was not found on OneDrive - creating remote directory: ", path); - - if (!dryRun) { - // Perform the database lookup - is the parent in the database? - if (!itemdb.selectByPath(dirName(path), parent.driveId, parent)) { - // parent is not in the database - log.vdebug("Parent path is not in the database - need to add it: ", dirName(path)); - uploadCreateDir(dirName(path)); - } + } + + // Check if this is included or excluded by use of sync_list + if (!clientSideRuleExcludesPath) { + // No need to try and process something against a sync_list if it has been configured + if (syncListConfigured) { + // Compute the item path if empty - as to check sync_list we need an actual path to check + + // What is the path of the new item + string newItemPath; + + // Is the parent in the database? If not, we cannot compute the the full path based on the database entries + // In a --resync scenario - the database is empty + if (parentInDatabase) { + // Calculate this items path based on database entries + newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; + } else { + // parent not in the database + if (("path" in onedriveJSONItem["parentReference"]) != null) { + // If there is a parent reference path, try and use it + string selfBuiltPath = onedriveJSONItem["parentReference"]["path"].str ~ "/" ~ onedriveJSONItem["name"].str; - // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? - if (defaultDriveId == parent.driveId){ - // enforce check of parent path. if the above was triggered, the below will generate a sync retry and will now be sucessful - enforce(itemdb.selectByPath(dirName(path), parent.driveId, parent), "The parent item id is not in the database"); - } else { - log.vdebug("Parent drive ID is not our drive ID - parent most likely a shared folder"); + // Check for ':' and split if present + auto splitIndex = selfBuiltPath.indexOf(":"); + if (splitIndex != -1) { + // Keep only the part after ':' + selfBuiltPath = selfBuiltPath[splitIndex + 1 .. $]; } - JSONValue driveItem = [ - "name": JSONValue(baseName(path)), - "folder": parseJSON("{}") - ]; - - // Submit the creation request - // Fix for https://github.com/skilion/onedrive/issues/356 - try { - // Attempt to create a new folder on the configured parent driveId & parent id - response = onedrive.createById(parent.driveId, parent.id, driveItem); - } catch (OneDriveException e) { - if (e.httpStatusCode == 409) { - // OneDrive API returned a 404 (above) to say the directory did not exist - // but when we attempted to create it, OneDrive responded that it now already exists - log.vlog("OneDrive reported that ", path, " already exists .. OneDrive API race condition"); - return; - } else { - // some other error from OneDrive was returned - display what it is - log.error("OneDrive generated an error when creating this path: ", path); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); + // Set newItemPath to the self built path + newItemPath = selfBuiltPath; } else { - // Simulate a successful 'directory create' & save it to the dryRun database copy - // The simulated response has to pass 'makeItem' as part of saveItem - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); + // no parent reference path available in provided JSON + newItemPath = thisItemName; } - - log.vlog("Successfully created the remote directory ", path, " on OneDrive"); - return; } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadCreateDir(path);"); - uploadCreateDir(path); - // return back to original call - return; + // Check for HTML entities (e.g., '%20' for space) in newItemPath + if (containsURLEncodedItems(newItemPath)) { + addLogEntry("CAUTION: The JSON element transmitted by the Microsoft OneDrive API includes HTML URL encoded items, which may complicate pattern matching and potentially lead to synchronisation problems for this item."); + addLogEntry("WORKAROUND: An alternative solution could be to change the name of this item through the online platform: " ~ newItemPath, ["verbose"]); + addLogEntry("See: https://github.com/OneDrive/onedrive-api-docs/issues/1765 for further details", ["verbose"]); } - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; + // Update newItemPath + if(newItemPath[0] == '/') { + newItemPath = newItemPath[1..$]; } - } - - // response from OneDrive has to be a valid JSON object - if (response.type() == JSONType.object){ - // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file - // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, - // even though some file systems (such as a POSIX-compliant file system) may consider them as different. - // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. - if (response["name"].str == baseName(path)){ - // OneDrive 'name' matches local path name - log.vlog("The requested directory to create was found on OneDrive - skipping creating the directory: ", path ); - // Check that this path is in the database - if (!itemdb.selectById(parent.driveId, parent.id, parent)){ - // parent for 'path' is NOT in the database - log.vlog("The parent for this path is not in the local database - need to add parent to local database"); - parentPath = dirName(path); - // add the parent into the database - uploadCreateDir(parentPath); - // save this child item into the database - log.vlog("The parent for this path has been added to the local database - adding requested path (", path ,") to database"); - if (!dryRun) { - // save the live data - saveItem(response); - } else { - // need to fake this data - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); - } + // What path are we checking? + addLogEntry("sync_list item to check: " ~ newItemPath, ["debug"]); + + // Unfortunatly there is no avoiding this call to check if the path is excluded|included via sync_list + if (selectiveSync.isPathExcludedViaSyncList(newItemPath)) { + // selective sync advised to skip, however is this a file and are we configured to upload / download files in the root? + if ((isItemFile(onedriveJSONItem)) && (appConfig.getValueBool("sync_root_files")) && (rootName(newItemPath) == "") ) { + // This is a file + // We are configured to sync all files in the root + // This is a file in the logical root + clientSideRuleExcludesPath = false; } else { - // parent is in database - log.vlog("The parent for this path is in the local database - adding requested path (", path ,") to database"); - // are we in a --dry-run scenario? - if (!dryRun) { - // get the live data - JSONValue pathDetails; - try { - pathDetails = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } catch (OneDriveException e) { - log.vdebug("pathDetails = onedrive.getPathDetailsByDriveId(parent.driveId, path) generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // The directory was not found - log.error("ERROR: The requested single directory to sync was not found on OneDrive"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling onedrive.getPathDetailsByDriveId(parent.driveId, path);"); - pathDetails = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } - - if (e.httpStatusCode >= 500) { - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - return; - } - } - - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(pathDetails); - - // OneDrive Personal Shared Folder edgecase handling - // In a: - // --resync --upload-only --single-directory 'dir' scenario, and where the root 'dir' for --single-directory is a 'shared folder' - // OR - // --resync --upload-only scenario, and where the root 'dir' to upload is a 'shared folder' - // - // We will not have the 'tie' DB entry created because of --upload-only because we do not download the folder structure from OneDrive - // to know what the remoteDriveId actually is - if (accountType == "personal"){ - // are we in a --resync --upload-only scenario ? - if ((cfg.getValueBool("resync")) && (cfg.getValueBool("upload_only"))) { - // Create a temp item - // Takes a JSON input and formats to an item which can be used by the database - Item tempItem = makeItem(pathDetails); - // New DB Tie item due to edge case - Item tieDBItem; - // Set the name - tieDBItem.name = tempItem.name; - // Set the correct item type - tieDBItem.type = ItemType.dir; - //parent.type = ItemType.remote; - if ((tempItem.type == ItemType.remote) && (!tempItem.remoteDriveId.empty)) { - // set the right elements - tieDBItem.driveId = tempItem.remoteDriveId; - tieDBItem.id = tempItem.remoteId; - // Set the correct mtime - tieDBItem.mtime = tempItem.mtime; - // Add tie DB record to the local database - log.vdebug("Adding tie DB record to database: ", tieDBItem); - itemdb.upsert(tieDBItem); - } - } - } - } else { - // need to fake this data - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); - } + // path is unwanted + clientSideRuleExcludesPath = true; + addLogEntry("Skipping item - excluded by sync_list config: " ~ newItemPath, ["verbose"]); } - } else { - // They are the "same" name wise but different in case sensitivity - log.error("ERROR: Current directory has a 'case-insensitive match' to an existing directory on OneDrive"); - log.error("ERROR: To resolve, rename this local directory: ", buildNormalizedPath(absolutePath(path))); - log.error("ERROR: Remote OneDrive directory: ", response["name"].str); - log.log("Skipping: ", buildNormalizedPath(absolutePath(path))); - return; } - } else { - // response is not valid JSON, an error was returned from OneDrive - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - log.log("Skipping: ", buildNormalizedPath(absolutePath(path))); - return; } } + + // return if path is excluded + return clientSideRuleExcludesPath; } - // upload a new file to OneDrive - private void uploadNewFile(const(string) path) - { - // Reset upload failure - OneDrive or filesystem issue (reading data) - uploadFailed = false; - Item parent; - bool parentPathFoundInDB = false; - // Check the database for the parent path - // What parent path to use? - string parentPath = dirName(path); // will be either . or something else - if (parentPath == "."){ - // Assume this is a new file in the users configured sync_dir root - // Use client defaults - parent.id = defaultRootId; // Should give something like 12345ABCDE1234A1!101 - parent.driveId = defaultDriveId; // Should give something like 12345abcde1234a1 - parentPathFoundInDB = true; - } else { - // Query the database using each of the driveId's we are using - foreach (driveId; driveIDsArray) { - // Query the database for this parent path using each driveId - Item dbResponse; - if(itemdb.selectByPath(parentPath, driveId, dbResponse)){ - // parent path was found in the database - parent = dbResponse; - parentPathFoundInDB = true; - } - } + // Process the list of local changes to upload to OneDrive + void processChangedLocalItemsToUpload() { + + // Each element in this array 'databaseItemsWhereContentHasChanged' is an Database Item ID that has been modified locally + ulong batchSize = appConfig.concurrentThreads; + ulong batchCount = (databaseItemsWhereContentHasChanged.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; + + // For each batch of files to upload, upload the changed data to OneDrive + foreach (chunk; databaseItemsWhereContentHasChanged.chunks(batchSize)) { + uploadChangedLocalFileToOneDrive(chunk); } + } + + // Upload changed local files to OneDrive in parallel + void uploadChangedLocalFileToOneDrive(string[3][] array) { + + foreach (i, localItemDetails; taskPool.parallel(array)) { + + addLogEntry("Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); + + // These are the details of the item we need to upload + string changedItemParentId = localItemDetails[0]; + string changedItemId = localItemDetails[1]; + string localFilePath = localItemDetails[2]; + + // How much space is remaining on OneDrive + ulong remainingFreeSpace; + // Did the upload fail? + bool uploadFailed = false; + // Did we skip due to exceeding maximum allowed size? + bool skippedMaxSize = false; + // Did we skip to an exception error? + bool skippedExceptionError = false; + + // Unfortunatly, we cant store an array of Item's ... so we have to re-query the DB again - unavoidable extra processing here + // This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function + Item dbItem; + itemDB.selectById(changedItemParentId, changedItemId, dbItem); + + // Query the available space online + // This will update appConfig.quotaAvailable & appConfig.quotaRestricted values + remainingFreeSpace = getRemainingFreeSpace(dbItem.driveId); + + // Get the file size from the actual file + ulong thisFileSizeLocal = getSize(localFilePath); + // Get the file size from the DB data + ulong thisFileSizeFromDB = to!ulong(dbItem.size); + + // remainingFreeSpace online includes the current file online + // we need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value + ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal; + + // Based on what we know, for this thread - can we safely upload this modified local file? + addLogEntry("This Thread Current Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]); + addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); - // Get the file size - long thisFileSize = getSize(path); - // Can we upload this file - is there enough free space? - https://github.com/skilion/onedrive/issues/73 - // We can only use 'remainingFreeSpace' if we are uploading to our driveId ... if this is a shared folder, we have no visibility of space available, as quota details are not provided by the OneDrive API - if (parent.driveId == defaultDriveId) { - // the file will be uploaded to my driveId - log.vdebug("File upload destination is users default driveId .."); - // are quota details being restricted? - if (!quotaRestricted) { - // quota is not being restricted - we can track drive space allocation to determine if it is possible to upload the file - if ((remainingFreeSpace - thisFileSize) < 0) { - // no space to upload file, based on tracking of quota values - quotaAvailable = false; + JSONValue uploadResponse; + + bool spaceAvailableOnline = false; + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true + + // What was the latest getRemainingFreeSpace() value? + if (appConfig.quotaAvailable) { + // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? + if (calculatedSpaceOnlinePostUpload > 0) { + // Based on this thread action, we beleive that there is space available online to upload - proceed + spaceAvailableOnline = true; + } + } + // Is quota being restricted? + if (appConfig.quotaRestricted) { + // Space available online is being restricted - so we have no way to really know if there is space available online + spaceAvailableOnline = true; + } + + // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) + if (spaceAvailableOnline) { + // Does this file exceed the maximum file size to upload to OneDrive? + if (thisFileSizeLocal <= maxUploadFileSize) { + // Attempt to upload the modified file + // Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result + uploadResponse = performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + + // Evaluate the returned JSON uploadResponse + // If there was an error uploading the file, uploadResponse should be empty and invalid + if (uploadResponse.type() != JSONType.object) { + uploadFailed = true; + skippedExceptionError = true; + } + } else { - // there is free space to upload file, based on tracking of quota values - quotaAvailable = true; + // Skip file - too large + uploadFailed = true; + skippedMaxSize = true; } } else { - // set quotaAvailable as true, even though we have zero way to validate that this is correct or not - quotaAvailable = true; + // Cant upload this file - no space available + uploadFailed = true; } - } else { - // the file will be uploaded to a shared folder - // we can't track if there is enough free space to upload the file - log.vdebug("File upload destination is a shared folder - the upload may fail if not enough space on OneDrive .."); - // set quotaAvailable as true, even though we have zero way to validate that this is correct or not - quotaAvailable = true; - } - - // If performing a dry-run or parentPath is found in the database & there is quota available to upload file - if ((dryRun) || (parentPathFoundInDB && quotaAvailable)) { - // Maximum file size upload - // https://support.microsoft.com/en-us/office/invalid-file-names-and-file-types-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us - // July 2020, maximum file size for all accounts is 100GB - // January 2021, maximum file size for all accounts is 250GB - auto maxUploadFileSize = 268435456000; // 250GB - - // Can we read the file - as a permissions issue or file corruption will cause a failure - // https://github.com/abraunegg/onedrive/issues/113 - if (readLocalFile(path)){ - // we are able to read the file - // To avoid a 409 Conflict error - does the file actually exist on OneDrive already? - JSONValue fileDetailsFromOneDrive; - if (thisFileSize <= maxUploadFileSize){ - // Resolves: https://github.com/skilion/onedrive/issues/121, https://github.com/skilion/onedrive/issues/294, https://github.com/skilion/onedrive/issues/329 - // Does this 'file' already exist on OneDrive? - try { - // test if the local path exists on OneDrive - // if parent.driveId is invalid, then API call will generate a 'HTTP 400 - Bad Request' - make sure we at least have a valid parent.driveId - if (!parent.driveId.empty) { - // use configured value for parent.driveId - fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(parent.driveId, path); - } else { - // switch to using defaultDriveId - log.vdebug("parent.driveId is empty - using defaultDriveId for API call"); - fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(defaultDriveId, path); - } - } catch (OneDriveException e) { - // log that we generated an exception - log.vdebug("fileDetailsFromOneDrive = onedrive.getPathDetailsByDriveId(parent.driveId, path); generated a OneDriveException"); - // OneDrive returned a 'HTTP/1.1 400 Bad Request' - // If the 'path', when encoded, cannot be interpreted by the OneDrive API, the API will generate a 400 error - if (e.httpStatusCode == 400) { - log.log("Skipping uploading this new file: ", buildNormalizedPath(absolutePath(path))); - log.vlog("Skipping item - OneDrive returned a 'HTTP 400 - Bad Request' when attempting to query if file exists"); - log.error("ERROR: To resolve, rename this local file: ", buildNormalizedPath(absolutePath(path))); - uploadFailed = true; - return; - } - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - if (e.httpStatusCode == 401) { - log.vlog("Skipping item - OneDrive returned a 'HTTP 401 - Unauthorized' when attempting to query if file exists"); - uploadFailed = true; - return; + + // Did the upload fail? + if (uploadFailed) { + // Upload failed .. why? + // No space available online + if (!spaceAvailableOnline) { + addLogEntry("Skipping uploading modified file " ~ localFilePath ~ " due to insufficient free space available on Microsoft OneDrive", ["info", "notify"]); + } + // File exceeds max allowed size + if (skippedMaxSize) { + addLogEntry("Skipping uploading this modified file as it exceeds the maximum size allowed by OneDrive: " ~ localFilePath, ["info", "notify"]); + } + // Generic message + if (skippedExceptionError) { + // normal failure message if API or exception error generated + addLogEntry("Uploading modified file " ~ localFilePath ~ " ... failed!", ["info", "notify"]); + } + } else { + // Upload was successful + addLogEntry("Uploading modified file " ~ localFilePath ~ " ... done.", ["info", "notify"]); + + // Save JSON item in database + saveItem(uploadResponse); + + if (!dryRun) { + // Check the integrity of the uploaded modified file + performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal); + + // Update the date / time of the file online to match the local item + // Get the local file last modified time + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + localModifiedTime.fracSecs = Duration.zero; + // Get the latest eTag, and use that + string etagFromUploadResponse = uploadResponse["eTag"].str; + // Attempt to update the online date time stamp based on our local data + uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse); + } + } + + addLogEntry("Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); + + } // end of 'foreach (i, localItemDetails; array.enumerate)' + } + + // Perform the upload of a locally modified file to OneDrive + JSONValue performModifiedFileUpload(Item dbItem, string localFilePath, ulong thisFileSizeLocal) { + + JSONValue uploadResponse; + OneDriveApi uploadFileOneDriveApiInstance; + uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); + uploadFileOneDriveApiInstance.initialise(); + + // Is this a dry-run scenario? + if (!dryRun) { + // Do we use simpleUpload or create an upload session? + bool useSimpleUpload = false; + + //if ((appConfig.accountType == "personal") && (thisFileSizeLocal <= sessionThresholdFileSize)) { + + if (thisFileSizeLocal <= sessionThresholdFileSize) { + useSimpleUpload = true; + } + + // We can only upload zero size files via simpleFileUpload regardless of account type + // Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53 + // Additionally, all files where file size is < 4MB should be uploaded by simpleUploadReplace - everything else should use a session to upload the modified file + + if ((thisFileSizeLocal == 0) || (useSimpleUpload)) { + // Must use Simple Upload to replace the file online + try { + uploadResponse = uploadFileOneDriveApiInstance.simpleUploadReplace(localFilePath, dbItem.driveId, dbItem.id); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); } - // A 404 is the expected response if the file was not present - if (e.httpStatusCode == 404) { - // The file was not found on OneDrive, need to upload it - // Check if file should be skipped based on skip_size config - if (thisFileSize >= this.newSizeLimit) { - log.vlog("Skipping item - excluded by skip_size config: ", path, " (", thisFileSize/2^^20," MB)"); - return; - } - - // start of upload file - write("Uploading new file ", path, " ... "); - JSONValue response; - - // Calculate upload speed - auto uploadStartTime = Clock.currTime(); + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to upload a modified file to OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); - if (!dryRun) { - // Resolve https://github.com/abraunegg/onedrive/issues/37 - if (thisFileSize == 0){ - // We can only upload zero size files via simpleFileUpload regardless of account type - // https://github.com/OneDrive/onedrive-api-docs/issues/53 - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - // error uploading file - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + + } catch (FileException e) { + // filesystem error + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } else { + // Configure JSONValue variables we use for a session upload + JSONValue currentOnlineData; + JSONValue uploadSessionData; + string currentETag; + + // As this is a unique thread, the sessionFilePath for where we save the data needs to be unique + // The best way to do this is generate a 10 digit alphanumeric string, and use this as the file extention + string threadUploadSessionFilePath = appConfig.uploadSessionFilePath ~ "." ~ generateAlphanumericString(); + + // Get the absolute latest object details from online + try { + currentOnlineData = uploadFileOneDriveApiInstance.getPathDetailsByDriveId(dbItem.driveId, localFilePath); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to obtain latest file details from OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + + } + + // Was a valid JSON response provided? + if (currentOnlineData.type() == JSONType.object) { + // Does the response contain an eTag? + if (hasETag(currentOnlineData)) { + // Use the value returned from online + currentETag = currentOnlineData["eTag"].str; + } else { + // Use the database value + currentETag = dbItem.eTag; + } + } else { + // no valid JSON response + currentETag = dbItem.eTag; + } + + // Create the Upload Session + try { + uploadSessionData = createSessionFileUpload(uploadFileOneDriveApiInstance, localFilePath, dbItem.driveId, dbItem.parentId, baseName(localFilePath), currentETag, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to create an upload session on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + + } catch (FileException e) { + writeln("DEBUG TO REMOVE: Modified file upload FileException Handling (Create the Upload Session)"); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + + // Perform the Upload using the session + try { + uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSizeLocal, uploadSessionData, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to upload a file via a session to OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + + } catch (FileException e) { + writeln("DEBUG TO REMOVE: Modified file upload FileException Handling (Perform the Upload using the session)"); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + + } else { + // We are in a --dry-run scenario + uploadResponse = createFakeResponse(localFilePath); + } + + // Debug Log the modified upload response + addLogEntry("Modified File Upload Response: " ~ to!string(uploadResponse), ["debug"]); + + // Shutdown the API instance + uploadFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(uploadFileOneDriveApiInstance); + // Return JSON + return uploadResponse; + } + + // Query the OneDrive API using the provided driveId to get the latest quota details + ulong getRemainingFreeSpace(string driveId) { + + // Get the quota details for this driveId, as this could have changed since we started the application - the user could have added / deleted data online, or purchased additional storage + // Quota details are ONLY available for the main default driveId, as the OneDrive API does not provide quota details for shared folders + + JSONValue currentDriveQuota; + ulong remainingQuota; + + try { + // Create a new OneDrive API instance + OneDriveApi getCurrentDriveQuotaApiInstance; + getCurrentDriveQuotaApiInstance = new OneDriveApi(appConfig); + getCurrentDriveQuotaApiInstance.initialise(); + addLogEntry("Seeking available quota for this drive id: " ~ driveId, ["debug"]); + currentDriveQuota = getCurrentDriveQuotaApiInstance.getDriveQuota(driveId); + // Shut this API instance down + getCurrentDriveQuotaApiInstance.shutdown(); + // Free object and memory + object.destroy(getCurrentDriveQuotaApiInstance); + } catch (OneDriveException e) { + addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]); + } + + // validate that currentDriveQuota is a JSON value + if (currentDriveQuota.type() == JSONType.object) { + // Response from API contains valid data + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value + + if ("quota" in currentDriveQuota){ + if (driveId == appConfig.defaultDriveId) { + // We potentially have updated quota remaining details available + // However in some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero + if ("remaining" in currentDriveQuota["quota"]){ + // We have valid quota remaining details returned for the provided drive id + remainingQuota = currentDriveQuota["quota"]["remaining"].integer; + + if (remainingQuota <= 0) { + if (appConfig.accountType == "personal"){ + // zero space available + addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); + remainingQuota = 0; + appConfig.quotaAvailable = false; + } else { + // zero space available is being reported, maybe being restricted? + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); + remainingQuota = 0; + appConfig.quotaRestricted = true; + } + } + } + } else { + // quota details returned, but for a drive id that is not ours + if ("remaining" in currentDriveQuota["quota"]){ + // remaining is in the quota JSON response + if (currentDriveQuota["quota"]["remaining"].integer <= 0) { + // value returned is 0 or less than 0 + addLogEntry("OneDrive quota information is set at zero, as this is not our drive id, ignoring", ["verbose"]); + remainingQuota = 0; + appConfig.quotaRestricted = true; + } + } + } + } else { + // No quota details returned + if (driveId == appConfig.defaultDriveId) { + // no quota details returned for current drive id + addLogEntry("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); + remainingQuota = 0; + appConfig.quotaRestricted = true; + } else { + // quota details not available + addLogEntry("WARNING: OneDrive quota information is being restricted as this is not our drive id.", ["debug"]); + remainingQuota = 0; + appConfig.quotaRestricted = true; + } + } + } + + // what was the determined available quota? + addLogEntry("Available quota: " ~ to!string(remainingQuota), ["debug"]); + return remainingQuota; + } + + // Perform a filesystem walk to uncover new data to upload to OneDrive + void scanLocalFilesystemPathForNewData(string path) { + + // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? + string logPath; + if (path == ".") { + // get the configured sync_dir + logPath = buildNormalizedPath(appConfig.getValueString("sync_dir")); + } else { + // use what was passed in + if (!appConfig.getValueBool("monitor")) { + logPath = buildNormalizedPath(appConfig.getValueString("sync_dir")) ~ "/" ~ path; + } else { + logPath = path; + } + } + + // Log the action that we are performing, however only if this is a directory + if (isDir(path)) { + if (!appConfig.surpressLoggingOutput) { + if (!cleanupLocalFiles) { + addLogEntry("Scanning the local file system '" ~ logPath ~ "' for new data to upload ..."); + } else { + addLogEntry("Scanning the local file system '" ~ logPath ~ "' for data to cleanup ..."); + } + } + } + + auto startTime = Clock.currTime(); + addLogEntry("Starting Filesystem Walk: " ~ to!string(startTime), ["debug"]); + + // Perform the filesystem walk of this path, building an array of new items to upload + scanPathForNewData(path); + + // To finish off the processing items, this is needed to reflect this in the log + addLogEntry("------------------------------------------------------------------", ["debug"]); + + auto finishTime = Clock.currTime(); + addLogEntry("Finished Filesystem Walk: " ~ to!string(finishTime), ["debug"]); + + auto elapsedTime = finishTime - startTime; + addLogEntry("Elapsed Time Filesystem Walk: " ~ to!string(elapsedTime), ["debug"]); + + // Upload new data that has been identified + // Are there any items to download post fetching the /delta data? + if (!newLocalFilesToUploadToOneDrive.empty) { + // There are elements to upload + addLogEntry("New items to upload to OneDrive: " ~ to!string(newLocalFilesToUploadToOneDrive.length), ["verbose"]); + + // Reset totalDataToUpload + totalDataToUpload = 0; + + // How much data do we need to upload? This is important, as, we need to know how much data to determine if all the files can be uploaded + foreach (uploadFilePath; newLocalFilesToUploadToOneDrive) { + // validate that the path actually exists so that it can be counted + if (exists(uploadFilePath)) { + totalDataToUpload = totalDataToUpload + getSize(uploadFilePath); + } + } + + // How much data is there to upload + if (totalDataToUpload < 1024) { + // Display as Bytes to upload + addLogEntry("Total New Data to Upload: " ~ to!string(totalDataToUpload) ~ " Bytes", ["verbose"]); + } else { + if ((totalDataToUpload > 1024) && (totalDataToUpload < 1048576)) { + // Display as KB to upload + addLogEntry("Total New Data to Upload: " ~ to!string((totalDataToUpload / 1024)) ~ " KB", ["verbose"]); + } else { + // Display as MB to upload + addLogEntry("Total New Data to Upload: " ~ to!string((totalDataToUpload / 1024 / 1024)) ~ " MB", ["verbose"]); + } + } + + // How much space is available (Account Drive ID) + // The file, could be uploaded to a shared folder, which, we are not tracking how much free space is available there ... + addLogEntry("Current Available Space Online (Account Drive ID): " ~ to!string((appConfig.remainingFreeSpace / 1024 / 1024)) ~ " MB", ["debug"]); + + // Perform the upload + uploadNewLocalFileItems(); + + // Cleanup array memory + newLocalFilesToUploadToOneDrive = []; + } + } + + // Scan this path for new data + void scanPathForNewData(string path) { + + ulong maxPathLength; + ulong pathWalkLength; + + // Add this logging break to assist with what was checked for each path + if (path != ".") { + addLogEntry("------------------------------------------------------------------", ["debug"]); + } + + // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders + // If the path is greater than allowed characters, then one drive will return a '400 - Bad Request' + // Need to ensure that the URI is encoded before the check is made: + // - 400 Character Limit for OneDrive Business / Office 365 + // - 430 Character Limit for OneDrive Personal + + // Configure maxPathLength based on account type + if (appConfig.accountType == "personal") { + // Personal Account + maxPathLength = 430; + } else { + // Business Account / Office365 / SharePoint + maxPathLength = 400; + } + + // A short lived item that has already disappeared will cause an error - is the path still valid? + if (!exists(path)) { + addLogEntry("Skipping item - path has disappeared: " ~ path); + return; + } + + // Calculate the path length by walking the path and catch any UTF-8 sequence errors at the same time + // https://github.com/skilion/onedrive/issues/57 + // https://github.com/abraunegg/onedrive/issues/487 + // https://github.com/abraunegg/onedrive/issues/1192 + try { + pathWalkLength = path.byGrapheme.walkLength; + } catch (std.utf.UTFException e) { + // Path contains characters which generate a UTF exception + addLogEntry("Skipping item - invalid UTF sequence: " ~ path, ["info", "notify"]); + addLogEntry(" Error Reason:" ~ e.msg, ["debug"]); + return; + } + + // Is the path length is less than maxPathLength + if (pathWalkLength < maxPathLength) { + // Is this path unwanted + bool unwanted = false; + + // First check of this item - if we are in a --dry-run scenario, we may have 'fake deleted' this path + // thus, the entries are not in the dry-run DB copy, thus, at this point the client thinks that this is an item to upload + // Check this 'path' for an entry in pathFakeDeletedArray - if it is there, this is unwanted + if (dryRun) { + // Is this path in the array of fake deleted items? If yes, return early, nothing else to do, save processing + if (canFind(pathFakeDeletedArray, path)) return; + } + + // This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly + // Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252 + if (!unwanted) { + if(!isValid(path)) { + // Path is not valid according to https://dlang.org/phobos/std_encoding.html + addLogEntry("Skipping item - invalid character encoding sequence: " ~ path, ["info", "notify"]); + unwanted = true; + } + } + + // Check this path against the Client Side Filtering Rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + if (!unwanted) { + unwanted = checkPathAgainstClientSideFiltering(path); + } + + // Check this path against the Microsoft Naming Conventions & Restristions + // - Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders + // - Check path for bad whitespace items + // - Check path for HTML ASCII Codes + // - Check path for ASCII Control Codes + if (!unwanted) { + unwanted = checkPathAgainstMicrosoftNamingRestrictions(path); + } + + if (!unwanted) { + // At this point, this path, we want to scan for new data as it is not excluded + if (isDir(path)) { + // Check if this path in the database + bool directoryFoundInDB = pathFoundInDatabase(path); + + // Was the path found in the database? + if (!directoryFoundInDB) { + // Path not found in database when searching all drive id's + if (!cleanupLocalFiles) { + // --download-only --cleanup-local-files not used + // Create this directory on OneDrive so that we can upload files to it + createDirectoryOnline(path); + } else { + // we need to clean up this directory + addLogEntry("Removing local directory as --download-only & --cleanup-local-files configured"); + // Remove any children of this path if they still exist + // Resolve 'Directory not empty' error when deleting local files + try { + foreach (DirEntry child; dirEntries(path, SpanMode.depth, false)) { + // what sort of child is this? + if (isDir(child.name)) { + addLogEntry("Removing local directory: " ~ child.name); + } else { + addLogEntry("Removing local file: " ~ child.name); } - } else { - // File is not a zero byte file - // Are we using OneDrive Personal or OneDrive Business? - // To solve 'Multiple versions of file shown on website after single upload' (https://github.com/abraunegg/onedrive/issues/2) - // check what 'account type' this is as this issue only affects OneDrive Business so we need some extra logic here - if (accountType == "personal"){ - // Original file upload logic - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request as a session"); - // Try upload as a session - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - // error uploading file - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } else { - // File larger than threshold - use a session to upload - writeln(""); + + // are we in a --dry-run scenario? + if (!dryRun) { + // No --dry-run ... process local delete + if (exists(child)) { try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } + attrIsDir(child.linkAttributes) ? rmdir(child.name) : remove(child.name); } catch (FileException e) { // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; } } - } else { - // OneDrive Business Account - always use a session to upload - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } } } - - // response from OneDrive has to be a valid JSON object - if (response.type() == JSONType.object){ - // upload done without error - writeln("done."); - - // upload finished - auto uploadFinishTime = Clock.currTime(); - auto uploadDuration = uploadFinishTime - uploadStartTime; - log.vdebug("File Size: ", thisFileSize, " Bytes"); - log.vdebug("Upload Duration: ", (uploadDuration.total!"msecs"/1e3), " Seconds"); - auto uploadSpeed = (thisFileSize / (uploadDuration.total!"msecs"/1e3)/ 1024 / 1024); - log.vdebug("Upload Speed: ", uploadSpeed, " Mbps (approx)"); - - // Log upload action to log file - log.fileOnly("Uploading new file ", path, " ... done."); - // The file was uploaded, or a 4xx / 5xx error was generated - if ("size" in response){ - // The response JSON contains size, high likelihood valid response returned - ulong uploadFileSize = response["size"].integer; - - // In some cases the file that was uploaded was not complete, but 'completed' without errors on OneDrive - // This has been seen with PNG / JPG files mainly, which then contributes to generating a 412 error when we attempt to update the metadata - // Validate here that the file uploaded, at least in size, matches in the response to what the size is on disk - if (thisFileSize != uploadFileSize){ - // Upload size did not match local size - // There are 2 scenarios where this happens: - // 1. Failed Transfer - // 2. Upload file is going to a SharePoint Site, where Microsoft enriches the file with additional metadata with no way to disable - // For this client: - // - If a SharePoint Library, disableUploadValidation gets flagged as True - // - If we are syncing a business shared folder, this folder could reside on a Users Path (there should be no upload issue) or SharePoint (upload issue) - if ((disableUploadValidation)|| (syncBusinessFolders && (parent.driveId != defaultDriveId))){ - // Print a warning message - should only be triggered if: - // - disableUploadValidation gets flagged (documentLibrary account type) - // - syncBusinessFolders is being used & parent.driveId != defaultDriveId - log.log("WARNING: Uploaded file size does not match local file - skipping upload validation"); - log.vlog("WARNING: Due to Microsoft Sharepoint 'enrichment' of files, this file is now technically different to your local copy"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); - } else { - // OK .. the uploaded file does not match and we did not disable this validation - log.log("Uploaded file size does not match local file - upload failure - retrying"); - // Delete uploaded bad file - onedrive.deleteById(response["parentReference"]["driveId"].str, response["id"].str, response["eTag"].str); - // Re-upload - uploadNewFile(path); - return; - } - } - - // File validation is OK - if ((accountType == "personal") || (thisFileSize == 0)){ - // Update the item's metadata on OneDrive - string id = response["id"].str; - string cTag; - - // Is there a valid cTag in the response? - if ("cTag" in response) { - // use the cTag instead of the eTag because OneDrive may update the metadata of files AFTER they have been uploaded - cTag = response["cTag"].str; - } else { - // Is there an eTag in the response? - if ("eTag" in response) { - // use the eTag from the response as there was no cTag - cTag = response["eTag"].str; - } else { - // no tag available - set to nothing - cTag = ""; - } - } - // check if the path exists locally before we try to set the file times - if (exists(path)) { - SysTime mtime = timeLastModified(path).toUTC(); - // update the file modified time on OneDrive and save item details to database - uploadLastModifiedTime(parent.driveId, id, cTag, mtime); - } else { - // will be removed in different event! - log.log("File disappeared after upload: ", path); - } - } else { - // OneDrive Business Account - always use a session to upload - // The session includes a Request Body element containing lastModifiedDateTime - // which negates the need for a modify event against OneDrive - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } - } - - // update free space tracking if this is our drive id - if (parent.driveId == defaultDriveId) { - // how much space is left on OneDrive after upload? - remainingFreeSpace = (remainingFreeSpace - thisFileSize); - log.vlog("Remaining free space on OneDrive: ", remainingFreeSpace); + // Remove the path now that it is empty of children + addLogEntry("Removing local directory: " ~ path); + // are we in a --dry-run scenario? + if (!dryRun) { + // No --dry-run ... process local delete + try { + rmdirRecurse(path); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } - // File uploaded successfully, space details updated if required - return; - } else { - // response is not valid JSON, an error was returned from OneDrive - log.fileOnly("Uploading new file ", path, " ... error"); - uploadFailed = true; - return; } - } else { - // we are --dry-run - simulate the file upload - writeln("done."); - response = createFakeResponse(path); - // Log action to log file - log.fileOnly("Uploading new file ", path, " ... done."); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); return; } } - // OneDrive returned a '429 - Too Many Requests' - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call + } + + // flag for if we are going traverse this path + bool skipFolderTraverse = false; + + // Before we traverse this 'path', we need to make a last check to see if this was just excluded + if (appConfig.accountType == "business") { + // search businessSharedFoldersOnlineToSkip for this path + if (canFind(businessSharedFoldersOnlineToSkip, path)) { + // This path was skipped - why? + addLogEntry("Skipping item '" ~ path ~ "' due to this path matching an existing online Business Shared Folder name", ["info", "notify"]); + skipFolderTraverse = true; + } + } + + // Do we traverse this path? + if (!skipFolderTraverse) { + // Try and access this directory and any path below + try { + auto entries = dirEntries(path, SpanMode.shallow, false); + foreach (DirEntry entry; entries) { + string thisPath = entry.name; + scanPathForNewData(thisPath); + } + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); return; } - // OneDrive returned a 'HTTP 5xx Server Side Error' - gracefully handling error - error message already logged - if (e.httpStatusCode >= 500) { - uploadFailed = true; + } + } else { + // https://github.com/abraunegg/onedrive/issues/984 + // path is not a directory, is it a valid file? + // pipes - whilst technically valid files, are not valid for this client + // prw-rw-r--. 1 user user 0 Jul 7 05:55 my_pipe + if (isFile(path)) { + // Path is a valid file, not a pipe + bool fileFoundInDB = pathFoundInDatabase(path); + // Was the file found in the database? + if (!fileFoundInDB) { + // File not found in database when searching all drive id's + // Do we upload the file or clean up the file? + if (!cleanupLocalFiles) { + // --download-only --cleanup-local-files not used + // Add this path as a file we need to upload + addLogEntry("OneDrive Client flagging to upload this file to OneDrive: " ~ path, ["debug"]); + newLocalFilesToUploadToOneDrive ~= path; + } else { + // we need to clean up this file + addLogEntry("Removing local file as --download-only & --cleanup-local-files configured"); + // are we in a --dry-run scenario? + addLogEntry("Removing local file: " ~ path); + if (!dryRun) { + // No --dry-run ... process local file delete + safeRemove(path); + } + } + } + } else { + // path is not a valid file + addLogEntry("Skipping item - item is not a valid file: " ~ path, ["info", "notify"]); + } + } + } + } else { + // This path was skipped - why? + addLogEntry("Skipping item '" ~ path ~ "' due to the full path exceeding " ~ to!string(maxPathLength) ~ " characters (Microsoft OneDrive limitation)", ["info", "notify"]); + } + } + + // Handle a single file inotify trigger when using --monitor + void handleLocalFileTrigger(string localFilePath) { + // Is this path a new file or an existing one? + // Normally we would use pathFoundInDatabase() to calculate, but we need 'databaseItem' as well if the item is in the database + Item databaseItem; + bool fileFoundInDB = false; + string[3][] modifiedItemToUpload; + + foreach (driveId; driveIDsArray) { + if (itemDB.selectByPath(localFilePath, driveId, databaseItem)) { + fileFoundInDB = true; + break; + } + } + + // Was the file found in the database? + if (!fileFoundInDB) { + // This is a new file as it is not in the database + // Log that the file has been added locally + addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]); + // Scan the parent path for any new data, not just this this item + scanLocalFilesystemPathForNewData(dirName(localFilePath)); + } else { + // This is a potentially modified file, needs to be handled as such. Is the item truly modified? + if (!testFileHash(localFilePath, databaseItem)) { + // The local file failed the hash comparison test - there is a data difference + // Log that the file has changed locally + addLogEntry("[M] Local file changed: " ~ localFilePath, ["verbose"]); + // Add the modified item to the array to upload + modifiedItemToUpload ~= [databaseItem.driveId, databaseItem.id, localFilePath]; + uploadChangedLocalFileToOneDrive(modifiedItemToUpload); + } + } + } + + // Query the database to determine if this path is within the existing database + bool pathFoundInDatabase(string searchPath) { + + // Check if this path in the database + Item databaseItem; + bool pathFoundInDB = false; + foreach (driveId; driveIDsArray) { + if (itemDB.selectByPath(searchPath, driveId, databaseItem)) { + pathFoundInDB = true; + } + } + return pathFoundInDB; + } + + // Create a new directory online on OneDrive + // - Test if we can get the parent path details from the database, otherwise we need to search online + // for the path flow and create the folder that way + void createDirectoryOnline(string thisNewPathToCreate) { + // Log what we are doing + addLogEntry("OneDrive Client requested to create this directory online: " ~ thisNewPathToCreate); + + Item parentItem; + JSONValue onlinePathData; + + // Create a new API Instance for this thread and initialise it + OneDriveApi createDirectoryOnlineOneDriveApiInstance; + createDirectoryOnlineOneDriveApiInstance = new OneDriveApi(appConfig); + createDirectoryOnlineOneDriveApiInstance.initialise(); + + // What parent path to use? + string parentPath = dirName(thisNewPathToCreate); // will be either . or something else + + // Configure the parentItem by if this is the account 'root' use the root details, or search the database for the parent details + if (parentPath == ".") { + // Parent path is '.' which is the account root + // Use client defaults + parentItem.driveId = appConfig.defaultDriveId; // Should give something like 12345abcde1234a1 + parentItem.id = appConfig.defaultRootId; // Should give something like 12345ABCDE1234A1!101 + } else { + // Query the parent path online + addLogEntry("Attempting to query Local Database for this parent path: " ~ parentPath, ["debug"]); + + // Attempt a 2 step process to work out where to create the directory + // Step 1: Query the DB first for the parent path, to try and avoid an API call + // Step 2: Query online as last resort + + // Step 1: Check if this parent path in the database + Item databaseItem; + bool parentPathFoundInDB = false; + + foreach (driveId; driveIDsArray) { + addLogEntry("Query DB with this driveID for the Parent Path: " ~ driveId, ["debug"]); + // Query the database for this parent path using each driveId that we know about + if (itemDB.selectByPath(parentPath, driveId, databaseItem)) { + parentPathFoundInDB = true; + addLogEntry("Parent databaseItem: " ~ to!string(databaseItem), ["debug"]); + addLogEntry("parentPathFoundInDB: " ~ to!string(parentPathFoundInDB), ["debug"]); + parentItem = databaseItem; + } + } + + // After querying all DB entries for each driveID for the parent path, what are the details in parentItem? + addLogEntry("Parent parentItem after DB Query exhausted: " ~ to!string(parentItem), ["debug"]); + + // Step 2: Query for the path online if not found in the local database + if (!parentPathFoundInDB) { + // parent path not found in database + try { + addLogEntry("Attempting to query OneDrive Online for this parent path as path not found in local database: " ~ parentPath, ["debug"]); + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); + addLogEntry("Online Parent Path Query Response: " ~ to!string(onlinePathData), ["debug"]); + + // Save item to the database + saveItem(onlinePathData); + parentItem = makeItem(onlinePathData); + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 404) { + // Parent does not exist ... need to create parent + addLogEntry("Parent path does not exist online: " ~ parentPath, ["debug"]); + createDirectoryOnline(parentPath); + // no return here as we need to continue, but need to re-query the OneDrive API to get the right parental details now that they exist + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); + parentItem = makeItem(onlinePathData); + } else { + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(createDirectoryOnlineOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to create a remote directory on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + createDirectoryOnline(thisNewPathToCreate); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + } + } + } + + // Make sure the full path does not exist online, this should generate a 404 response, to which then the folder will be created online + try { + // Try and query the OneDrive API for the path we need to create + addLogEntry("Attempting to query OneDrive API for this path: " ~ thisNewPathToCreate, ["debug"]); + addLogEntry("parentItem details: " ~ to!string(parentItem), ["debug"]); + + if (parentItem.driveId == appConfig.defaultDriveId) { + // Use getPathDetailsByDriveId + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, thisNewPathToCreate); + } else { + // If the parentItem.driveId is not our driveId - the path we are looking for will not be at the logical location that getPathDetailsByDriveId + // can use - as it will always return a 404 .. even if the path actually exists (which is the whole point of this test) + // Search the parentItem.driveId for any folder name match that we are going to create, then compare response JSON items with parentItem.id + // If no match, the folder we want to create does not exist at the location we are seeking to create it at, thus generate a 404 + onlinePathData = createDirectoryOnlineOneDriveApiInstance.searchDriveForPath(parentItem.driveId, baseName(thisNewPathToCreate)); + + // Process the response from searching the drive + ulong responseCount = count(onlinePathData["value"].array); + if (responseCount > 0) { + // Search 'name' matches were found .. need to match these against parentItem.id + bool foundDirectoryOnline = false; + JSONValue foundDirectoryJSONItem; + // Items were returned .. but is one of these what we are looking for? + foreach (childJSON; onlinePathData["value"].array) { + // Is this item not a file? + if (!isFileItem(childJSON)) { + Item thisChildItem = makeItem(childJSON); + // Direct Match Check + if ((parentItem.id == thisChildItem.parentId) && (baseName(thisNewPathToCreate) == thisChildItem.name)) { + // High confidence that this child folder is a direct match we are trying to create and it already exists online + addLogEntry("Path we are searching for exists online (Direct Match): " ~ baseName(thisNewPathToCreate), ["debug"]); + addLogEntry("childJSON: " ~ to!string(childJSON), ["debug"]); + foundDirectoryOnline = true; + foundDirectoryJSONItem = childJSON; + break; + } + // Full Lower Case POSIX Match Check + string childAsLower = toLower(childJSON["name"].str); + string thisFolderNameAsLower = toLower(baseName(thisNewPathToCreate)); + + if (childAsLower == thisFolderNameAsLower) { + // This is a POSIX 'case in-sensitive match' ..... + // Local item name has a 'case-insensitive match' to an existing item on OneDrive + addLogEntry("Path we are searching for exists online (POSIX 'case in-sensitive match'): " ~ baseName(thisNewPathToCreate), ["debug"]); + addLogEntry("childJSON: " ~ to!string(childJSON), ["debug"]); + foundDirectoryOnline = true; + foundDirectoryJSONItem = childJSON; + break; + } + } + } + + if (foundDirectoryOnline) { + // Directory we are seeking was found online ... + onlinePathData = foundDirectoryJSONItem; + } else { + // No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder + throw new OneDriveException(404, "Name not found via search"); + } + } else { + // No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder + throw new OneDriveException(404, "Name not found via search"); + } + } + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 404) { + // This is a good error - it means that the directory to create 100% does not exist online + // The directory was not found on the drive id we queried + addLogEntry("The requested directory to create was not found on OneDrive - creating remote directory: " ~ thisNewPathToCreate, ["verbose"]); + + // Build up the create directory request + JSONValue createDirectoryOnlineAPIResponse; + JSONValue newDriveItem = [ + "name": JSONValue(baseName(thisNewPathToCreate)), + "folder": parseJSON("{}") + ]; + + // Submit the creation request + // Fix for https://github.com/skilion/onedrive/issues/356 + if (!dryRun) { + try { + // Attempt to create a new folder on the required driveId and parent item id + string requiredDriveId; + string requiredParentItemId; + + // Is this a Personal Account and is the item a Remote Object (Shared Folder) ? + if ((appConfig.accountType == "personal") && (parentItem.type == ItemType.remote)) { + // Yes .. Shared Folder + addLogEntry("parentItem data: " ~ to!string(parentItem), ["debug"]); + requiredDriveId = parentItem.remoteDriveId; + requiredParentItemId = parentItem.remoteId; + } else { + // Not a personal account + Shared Folder + requiredDriveId = parentItem.driveId; + requiredParentItemId = parentItem.id; + } + + // Where are we creating this new folder? + addLogEntry("requiredDriveId: " ~ requiredDriveId, ["debug"]); + addLogEntry("requiredParentItemId: " ~ requiredParentItemId, ["debug"]); + addLogEntry("newDriveItem JSON: " ~ to!string(newDriveItem), ["debug"]); + + // Create the new folder + createDirectoryOnlineAPIResponse = createDirectoryOnlineOneDriveApiInstance.createById(requiredDriveId, requiredParentItemId, newDriveItem); + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(createDirectoryOnlineAPIResponse); + // Log that the directory was created + addLogEntry("Successfully created the remote directory " ~ thisNewPathToCreate ~ " on Microsoft OneDrive"); + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 409) { + // OneDrive API returned a 404 (above) to say the directory did not exist + // but when we attempted to create it, OneDrive responded that it now already exists + addLogEntry("OneDrive reported that " ~ thisNewPathToCreate ~ " already exists .. OneDrive API race condition", ["verbose"]); + return; + } else { + // some other error from OneDrive was returned - display what it is + addLogEntry("OneDrive generated an error when creating this path: " ~ thisNewPathToCreate); + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + return; + } + } + } else { + // Simulate a successful 'directory create' & save it to the dryRun database copy + addLogEntry("Successfully created the remote directory " ~ thisNewPathToCreate ~ " on Microsoft OneDrive"); + // The simulated response has to pass 'makeItem' as part of saveItem + auto fakeResponse = createFakeResponse(thisNewPathToCreate); + // Save item to the database + saveItem(fakeResponse); + } + + // Shutdown API instance + createDirectoryOnlineOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(createDirectoryOnlineOneDriveApiInstance); + return; + + } else { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(createDirectoryOnlineOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to create a remote directory on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + createDirectoryOnline(thisNewPathToCreate); + } else { + // If we get a 400 error, there is an issue creating this folder on Microsoft OneDrive for some reason + // If the error is not 400, re-try, else fail + if (exception.httpStatusCode != 400) { + // Attempt a re-try + createDirectoryOnline(thisNewPathToCreate); + } else { + // We cant create this directory online + addLogEntry("This folder cannot be created online: " ~ buildNormalizedPath(absolutePath(thisNewPathToCreate)), ["debug"]); + } + } + } + } + + // If we get to this point - onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, thisNewPathToCreate) generated a 'valid' response .... + // This means that the folder potentially exists online .. which is odd .. as it should not have existed + if (onlinePathData.type() == JSONType.object) { + // A valid object was responded with + if (onlinePathData["name"].str == baseName(thisNewPathToCreate)) { + // OneDrive 'name' matches local path name + if (appConfig.accountType == "business") { + // We are a business account, this existing online folder, could be a Shared Online Folder and is the 'Add shortcut to My files' item + addLogEntry("onlinePathData: " ~ to!string(onlinePathData), ["debug"]); + + if (isItemRemote(onlinePathData)) { + // The folder is a remote item ... we do not want to create this ... + addLogEntry("Remote Existing Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]); + addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]); + + // Add this path to businessSharedFoldersOnlineToSkip + businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate]; + // no save to database, no online create + return; + } + } + + // Path found online + addLogEntry("The requested directory to create was found on OneDrive - skipping creating the directory: " ~ thisNewPathToCreate, ["verbose"]); + + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(onlinePathData); + return; + } else { + // Normally this would throw an error, however we cant use throw new posixException() + string msg = format("POSIX 'case-insensitive match' between '%s' (local) and '%s' (online) which violates the Microsoft OneDrive API namespace convention", baseName(thisNewPathToCreate), onlinePathData["name"].str); + displayPosixErrorMessage(msg); + addLogEntry("ERROR: Requested directory to create has a 'case-insensitive match' to an existing directory on OneDrive online."); + addLogEntry("ERROR: To resolve, rename this local directory: " ~ buildNormalizedPath(absolutePath(thisNewPathToCreate))); + addLogEntry("Skipping creating this directory online due to 'case-insensitive match': " ~ thisNewPathToCreate); + // Add this path to posixViolationPaths + posixViolationPaths ~= [thisNewPathToCreate]; + return; + } + } else { + // response is not valid JSON, an error was returned from OneDrive + addLogEntry("ERROR: There was an error performing this operation on Microsoft OneDrive"); + addLogEntry("ERROR: Increase logging verbosity to assist determining why."); + addLogEntry("Skipping: " ~ buildNormalizedPath(absolutePath(thisNewPathToCreate))); + return; + } + } + + // Test that the online name actually matches the requested local name + void performPosixTest(string localNameToCheck, string onlineName) { + + // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file + // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, + // even though some file systems (such as a POSIX-compliant file system) may consider them as different. + // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. + if (localNameToCheck != onlineName) { + // POSIX Error + // Local item name has a 'case-insensitive match' to an existing item on OneDrive + throw new posixException(localNameToCheck, onlineName); + } + } + + // Upload new file items as identified + void uploadNewLocalFileItems() { + // Lets deal with the new local items in a batch process + ulong batchSize = appConfig.concurrentThreads; + ulong batchCount = (newLocalFilesToUploadToOneDrive.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; + + foreach (chunk; newLocalFilesToUploadToOneDrive.chunks(batchSize)) { + uploadNewLocalFileItemsInParallel(chunk); + } + } + + // Upload the file batches in parallel + void uploadNewLocalFileItemsInParallel(string[] array) { + foreach (i, fileToUpload; taskPool.parallel(array)) { + addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); + uploadNewFile(fileToUpload); + addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); + } + } + + // Upload a new file to OneDrive + void uploadNewFile(string fileToUpload) { + // Debug for the moment + addLogEntry("fileToUpload: " ~ fileToUpload, ["debug"]); + + // These are the details of the item we need to upload + // How much space is remaining on OneDrive + ulong remainingFreeSpaceOnline; + // Did the upload fail? + bool uploadFailed = false; + // Did we skip due to exceeding maximum allowed size? + bool skippedMaxSize = false; + // Did we skip to an exception error? + bool skippedExceptionError = false; + // Is the parent path in the item database? + bool parentPathFoundInDB = false; + // Get this file size + ulong thisFileSize; + // Is there space available online + bool spaceAvailableOnline = false; + + // Check the database for the parent path of fileToUpload + Item parentItem; + // What parent path to use? + string parentPath = dirName(fileToUpload); // will be either . or something else + if (parentPath == "."){ + // Assume this is a new file in the users configured sync_dir root + // Use client defaults + parentItem.id = appConfig.defaultRootId; // Should give something like 12345ABCDE1234A1!101 + parentItem.driveId = appConfig.defaultDriveId; // Should give something like 12345abcde1234a1 + parentPathFoundInDB = true; + } else { + // Query the database using each of the driveId's we are using + foreach (driveId; driveIDsArray) { + // Query the database for this parent path using each driveId + Item dbResponse; + if(itemDB.selectByPath(parentPath, driveId, dbResponse)){ + // parent path was found in the database + parentItem = dbResponse; + parentPathFoundInDB = true; + } + } + } + + // If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty + if ((parentPathFoundInDB) && (parentItem.driveId.empty)) { + // switch to using defaultDriveId + addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls"); + parentItem.driveId = appConfig.defaultDriveId; + } + + // Can we read the file - as a permissions issue or actual file corruption will cause a failure + // Resolves: https://github.com/abraunegg/onedrive/issues/113 + if (readLocalFile(fileToUpload)) { + if (parentPathFoundInDB) { + // The local file can be read - so we can read it to attemtp to upload it in this thread + // Get the file size + thisFileSize = getSize(fileToUpload); + // Does this file exceed the maximum filesize for OneDrive + // Resolves: https://github.com/skilion/onedrive/issues/121 , https://github.com/skilion/onedrive/issues/294 , https://github.com/skilion/onedrive/issues/329 + if (thisFileSize <= maxUploadFileSize) { + // Is there enough free space on OneDrive when we started this thread, to upload the file to OneDrive? + remainingFreeSpaceOnline = getRemainingFreeSpace(parentItem.driveId); + addLogEntry("Current Available Space Online (Upload Target Drive ID): " ~ to!string((remainingFreeSpaceOnline / 1024 / 1024)) ~ " MB", ["debug"]); + + // When we compare the space online to the total we are trying to upload - is there space online? + ulong calculatedSpaceOnlinePostUpload = remainingFreeSpaceOnline - thisFileSize; + + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true + + if (remainingFreeSpaceOnline > totalDataToUpload) { + // Space available + spaceAvailableOnline = true; + } else { + // we need to look more granular + // What was the latest getRemainingFreeSpace() value? + if (appConfig.quotaAvailable) { + // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? + if (calculatedSpaceOnlinePostUpload > 0) { + // Based on this thread action, we beleive that there is space available online to upload - proceed + spaceAvailableOnline = true; + } + } + } + + // Is quota being restricted? + if (appConfig.quotaRestricted) { + // If the upload target drive is not our drive id, then it is a shared folder .. we need to print a space warning message + if (parentItem.driveId != appConfig.defaultDriveId) { + // Different message depending on account type + if (appConfig.accountType == "personal") { + addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); + } else { + addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + } + } else { + if (appConfig.accountType == "personal") { + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); + } else { + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + } + } + // Space available online is being restricted - so we have no way to really know if there is space available online + spaceAvailableOnline = true; + } + + // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) + if (spaceAvailableOnline) { + // We need to check that this new local file does not exist on OneDrive + + // Create a new API Instance for this thread and initialise it + OneDriveApi checkFileOneDriveApiInstance; + checkFileOneDriveApiInstance = new OneDriveApi(appConfig); + checkFileOneDriveApiInstance.initialise(); + + JSONValue fileDetailsFromOneDrive; + + // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file + // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, + // even though some file systems (such as a POSIX-compliant file systems that Linux use) may consider them as different. + // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior, OneDrive does not use this. + + // In order to upload this file - this query HAS to respond as a 404 - Not Found + + // Does this 'file' already exist on OneDrive? + try { + fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); + // Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API + if (hasName(fileDetailsFromOneDrive)) { + performPosixTest(baseName(fileToUpload), fileDetailsFromOneDrive["name"].str); + } else { + throw new jsonResponseException("Unable to perform POSIX test as the OneDrive API request generated an invalid JSON response"); + } + + // No 404 or otherwise was triggered, meaning that the file already exists online and passes the POSIX test ... + addLogEntry("fileDetailsFromOneDrive after exist online check: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); + + // Does the data from online match our local file? + if (performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize)) { + // Save item to the database + saveItem(fileDetailsFromOneDrive); + } + } catch (OneDriveException exception) { + // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online + if (exception.httpStatusCode == 404) { + // The file has been checked, client side filtering checked, does not exist online - we need to upload it + addLogEntry("fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); generated a 404 - file does not exist online - must upload it", ["debug"]); + uploadFailed = performNewFileUpload(parentItem, fileToUpload, thisFileSize); + } else { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(checkFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to validate file details on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + uploadNewFile(fileToUpload); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + } catch (posixException e) { + displayPosixErrorMessage(e.msg); + uploadFailed = true; + } catch (jsonResponseException e) { + addLogEntry(e.msg, ["debug"]); + uploadFailed = true; + } + + // Operations in this thread are done / complete - either upload was done or it failed + checkFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(checkFileOneDriveApiInstance); + } else { + // skip file upload - insufficent space to upload + addLogEntry("Skipping uploading this new file as it exceeds the available free space on OneDrive: " ~ fileToUpload); + uploadFailed = true; + } + } else { + // Skip file upload - too large + addLogEntry("Skipping uploading this new file as it exceeds the maximum size allowed by OneDrive: " ~ fileToUpload); + uploadFailed = true; + } + } else { + // why was the parent path not in the database? + if (canFind(posixViolationPaths, parentPath)) { + addLogEntry("ERROR: POSIX 'case-insensitive match' for the parent path which violates the Microsoft OneDrive API namespace convention."); + } else { + addLogEntry("ERROR: Parent path is not in the database or online."); + } + addLogEntry("ERROR: Unable to upload this file: " ~ fileToUpload); + uploadFailed = true; + } + } else { + // Unable to read local file + addLogEntry("Skipping uploading this file as it cannot be read (file permissions or file corruption): " ~ fileToUpload); + uploadFailed = true; + } + + // Upload success or failure? + if (uploadFailed) { + // Need to add this to fileUploadFailures to capture at the end + fileUploadFailures ~= fileToUpload; + } + } + + // Perform the actual upload to OneDrive + bool performNewFileUpload(Item parentItem, string fileToUpload, ulong thisFileSize) { + + // Assume that by default the upload fails + bool uploadFailed = true; + + // OneDrive API Upload Response + JSONValue uploadResponse; + + // Create the OneDriveAPI Upload Instance + OneDriveApi uploadFileOneDriveApiInstance; + uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); + uploadFileOneDriveApiInstance.initialise(); + + // Calculate upload speed + auto uploadStartTime = Clock.currTime(); + + // Is this a dry-run scenario? + if (!dryRun) { + // Not a dry-run situation + // Do we use simpleUpload or create an upload session? + bool useSimpleUpload = false; + if (thisFileSize <= sessionThresholdFileSize) { + useSimpleUpload = true; + } + + // We can only upload zero size files via simpleFileUpload regardless of account type + // Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53 + // Additionally, only where file size is < 4MB should be uploaded by simpleUpload - everything else should use a session to upload + + if ((thisFileSize == 0) || (useSimpleUpload)) { + try { + // Attempt to upload the zero byte file using simpleUpload for all account types + uploadResponse = uploadFileOneDriveApiInstance.simpleUpload(fileToUpload, parentItem.driveId, parentItem.id, baseName(fileToUpload)); + uploadFailed = false; + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... done."); + // Shutdown the API + uploadFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(uploadFileOneDriveApiInstance); + } catch (OneDriveException exception) { + // An error was responded with - what was it + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to upload a new file to OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + performNewFileUpload(parentItem, fileToUpload, thisFileSize); + // Return upload status + return uploadFailed; + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... failed."); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + + } catch (FileException e) { + // display the error message + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... failed."); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } else { + // Session Upload for this criteria: + // - Personal Account and file size > 4MB + // - All Business | Office365 | SharePoint files > 0 bytes + JSONValue uploadSessionData; + // As this is a unique thread, the sessionFilePath for where we save the data needs to be unique + // The best way to do this is generate a 10 digit alphanumeric string, and use this as the file extention + string threadUploadSessionFilePath = appConfig.uploadSessionFilePath ~ "." ~ generateAlphanumericString(); + + // Attempt to upload the > 4MB file using an upload session for all account types + try { + // Create the Upload Session + uploadSessionData = createSessionFileUpload(uploadFileOneDriveApiInstance, fileToUpload, parentItem.driveId, parentItem.id, baseName(fileToUpload), null, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + // An error was responded with - what was it + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to create an upload session on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + performNewFileUpload(parentItem, fileToUpload, thisFileSize); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... failed."); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + + } catch (FileException e) { + // display the error message + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... failed."); + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + + // Do we have a valid session URL that we can use ? + if (uploadSessionData.type() == JSONType.object) { + // This is a valid JSON object + bool sessionDataValid = true; + + // Validate that we have the following items which we need + if (!hasUploadURL(uploadSessionData)) { + sessionDataValid = false; + addLogEntry("Session data missing 'uploadUrl'", ["debug"]); + } + + if (!hasNextExpectedRanges(uploadSessionData)) { + sessionDataValid = false; + addLogEntry("Session data missing 'nextExpectedRanges'", ["debug"]); + } + + if (!hasLocalPath(uploadSessionData)) { + sessionDataValid = false; + addLogEntry("Session data missing 'localPath'", ["debug"]); + } + + if (sessionDataValid) { + // We have a valid Upload Session Data we can use + + try { + // Try and perform the upload session + uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSize, uploadSessionData, threadUploadSessionFilePath); + + if (uploadResponse.type() == JSONType.object) { + uploadFailed = false; + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... done."); + } else { + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... failed."); + uploadFailed = true; + } + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to upload a new file via a session to OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + performNewFileUpload(parentItem, fileToUpload, thisFileSize); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... failed."); + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + } else { + // No Upload URL or nextExpectedRanges or localPath .. not a valid JSON we can use + addLogEntry("Session data is missing required elements to perform a session upload.", ["verbose"]); + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... failed."); + } + } else { + // Create session Upload URL failed + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... failed."); + } + } + } else { + // We are in a --dry-run scenario + uploadResponse = createFakeResponse(fileToUpload); + uploadFailed = false; + addLogEntry("Uploading new file " ~ fileToUpload ~ " ... done.", ["info", "notify"]); + } + + // Upload has finished + auto uploadFinishTime = Clock.currTime(); + // If no upload failure, calculate metrics, perform integrity validation + if (!uploadFailed) { + // Upload did not fail ... + auto uploadDuration = uploadFinishTime - uploadStartTime; + addLogEntry("File Size: " ~ to!string(thisFileSize) ~ " Bytes", ["debug"]); + addLogEntry("Upload Duration: " ~ to!string((uploadDuration.total!"msecs"/1e3)) ~ " Seconds", ["debug"]); + auto uploadSpeed = (thisFileSize / (uploadDuration.total!"msecs"/1e3)/ 1024 / 1024); + addLogEntry("Upload Speed: " ~ to!string(uploadSpeed) ~ " Mbps (approx)", ["debug"]); + + // OK as the upload did not fail, we need to save the response from OneDrive, but it has to be a valid JSON response + if (uploadResponse.type() == JSONType.object) { + // check if the path still exists locally before we try to set the file times online - as short lived files, whilst we uploaded it - it may not exist locally aready + if (exists(fileToUpload)) { + if (!dryRun) { + // Check the integrity of the uploaded file, if the local file still exists + performUploadIntegrityValidationChecks(uploadResponse, fileToUpload, thisFileSize); + + // Update the file modified time on OneDrive and save item details to database + // Update the item's metadata on OneDrive + SysTime mtime = timeLastModified(fileToUpload).toUTC(); + mtime.fracSecs = Duration.zero; + string newFileId = uploadResponse["id"].str; + string newFileETag = uploadResponse["eTag"].str; + // Attempt to update the online date time stamp based on our local data + uploadLastModifiedTime(parentItem.driveId, newFileId, mtime, newFileETag); + } + } else { + // will be removed in different event! + addLogEntry("File disappeared locally after upload: " ~ fileToUpload); + } + } else { + // Log that an invalid JSON object was returned + addLogEntry("uploadFileOneDriveApiInstance.simpleUpload or session.upload call returned an invalid JSON Object from the OneDrive API", ["debug"]); + } + } + + // Return upload status + return uploadFailed; + } + + // Create the OneDrive Upload Session + JSONValue createSessionFileUpload(OneDriveApi activeOneDriveApiInstance, string fileToUpload, string parentDriveId, string parentId, string filename, string eTag, string threadUploadSessionFilePath) { + + // Upload file via a OneDrive API session + JSONValue uploadSession; + + // Calculate modification time + SysTime localFileLastModifiedTime = timeLastModified(fileToUpload).toUTC(); + localFileLastModifiedTime.fracSecs = Duration.zero; + + // Construct the fileSystemInfo JSON component needed to create the Upload Session + JSONValue fileSystemInfo = [ + "item": JSONValue([ + "@microsoft.graph.conflictBehavior": JSONValue("replace"), + "fileSystemInfo": JSONValue([ + "lastModifiedDateTime": localFileLastModifiedTime.toISOExtString() + ]) + ]) + ]; + + // Try to create the upload session for this file + uploadSession = activeOneDriveApiInstance.createUploadSession(parentDriveId, parentId, filename, eTag, fileSystemInfo); + + if (uploadSession.type() == JSONType.object) { + // a valid session object was created + if ("uploadUrl" in uploadSession) { + // Add the file path we are uploading to this JSON Session Data + uploadSession["localPath"] = fileToUpload; + // Save this session + saveSessionFile(threadUploadSessionFilePath, uploadSession); + } + } else { + // no valid session was created + addLogEntry("Creation of OneDrive API Upload Session failed.", ["verbose"]); + // return upload() will return a JSONValue response, create an empty JSONValue response to return + uploadSession = null; + } + // Return the JSON + return uploadSession; + } + + // Save the session upload data + void saveSessionFile(string threadUploadSessionFilePath, JSONValue uploadSessionData) { + + try { + std.file.write(threadUploadSessionFilePath, uploadSessionData.toString()); + } catch (FileException e) { + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + } + } + + // Perform the upload of file via the Upload Session that was created + JSONValue performSessionFileUpload(OneDriveApi activeOneDriveApiInstance, ulong thisFileSize, JSONValue uploadSessionData, string threadUploadSessionFilePath) { + + // Response for upload + JSONValue uploadResponse; + + // Session JSON needs to contain valid elements + // Get the offset details + ulong fragmentSize = 10 * 2^^20; // 10 MiB + ulong fragmentCount = 0; + ulong fragSize = 0; + ulong offset = uploadSessionData["nextExpectedRanges"][0].str.splitter('-').front.to!ulong; + size_t expected_total_fragments = cast(ulong) ceil(double(thisFileSize) / double(fragmentSize)); + ulong start_unix_time = Clock.currTime.toUnixTime(); + int h, m, s; + string etaString; + string uploadLogEntry = "Uploading: " ~ uploadSessionData["localPath"].str ~ " ... "; + + // Start the session upload using the active API instance for this thread + while (true) { + fragmentCount++; + addLogEntry("Fragment: " ~ to!string(fragmentCount) ~ " of " ~ to!string(expected_total_fragments), ["debug"]); + + // What ETA string do we use? + auto eta = calc_eta((fragmentCount -1), expected_total_fragments, start_unix_time); + if (eta == 0) { + // Initial calculation ... + etaString = format!"| ETA --:--:--"; + } else { + // we have at least an ETA provided + dur!"seconds"(eta).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| ETA %02d:%02d:%02d"( h, m, s); + } + + // Calculate this progress output + auto ratio = cast(double)(fragmentCount -1) / expected_total_fragments; + // Convert the ratio to a percentage and format it to two decimal places + string percentage = leftJustify(format("%d%%", cast(int)(ratio * 100)), 5, ' '); + addLogEntry(uploadLogEntry ~ percentage ~ etaString, ["consoleOnly"]); + + // What fragment size will be used? + addLogEntry("fragmentSize: " ~ to!string(fragmentSize) ~ " offset: " ~ to!string(offset) ~ " thisFileSize: " ~ to!string(thisFileSize), ["debug"]); + fragSize = fragmentSize < thisFileSize - offset ? fragmentSize : thisFileSize - offset; + addLogEntry("Using fragSize: " ~ to!string(fragSize), ["debug"]); + + // fragSize must not be a negative value + if (fragSize < 0) { + // Session upload will fail + // not a JSON object - fragment upload failed + addLogEntry("File upload session failed - invalid calculation of fragment size", ["verbose"]); + if (exists(threadUploadSessionFilePath)) { + remove(threadUploadSessionFilePath); + } + // set uploadResponse to null as error + uploadResponse = null; + return uploadResponse; + } + + // If the resume upload fails, we need to check for a return code here + try { + uploadResponse = activeOneDriveApiInstance.uploadFragment( + uploadSessionData["uploadUrl"].str, + uploadSessionData["localPath"].str, + offset, + fragSize, + thisFileSize + ); + } catch (OneDriveException exception) { + // if a 100 uploadResponse is generated, continue + if (exception.httpStatusCode == 100) { + continue; + } + // There was an error uploadResponse from OneDrive when uploading the file fragment + + // Handle transient errors: + // 408 - Request Time Out + // 429 - Too Many Requests + // 503 - Service Unavailable + // 504 - Gateway Timeout + + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle 'HTTP request returned status code 429 (Too Many Requests)' first + addLogEntry("Fragment upload failed - received throttle request uploadResponse from OneDrive", ["debug"]); + + if (exception.httpStatusCode == 429) { + auto retryAfterValue = activeOneDriveApiInstance.getRetryAfterValue(); + addLogEntry("Using Retry-After Value = " ~ to!string(retryAfterValue), ["debug"]); + + // Sleep thread as per request + addLogEntry(); + addLogEntry("Thread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); + addLogEntry("Sleeping for " ~ to!string(retryAfterValue) ~ " seconds"); + Thread.sleep(dur!"seconds"(retryAfterValue)); + addLogEntry("Retrying fragment upload"); + } else { + // Handle 408, 503 and 504 + auto errorArray = splitLines(exception.msg); + auto retryAfterValue = 30; + addLogEntry(); + addLogEntry("Thread sleeping due to '" ~ to!string(errorArray[0]) ~ "' - retrying applicable request in 30 seconds"); + addLogEntry("Sleeping for " ~ to!string(retryAfterValue) ~ " seconds"); + Thread.sleep(dur!"seconds"(retryAfterValue)); + addLogEntry("Retrying fragment upload"); + } + } else { + // insert a new line as well, so that the below error is inserted on the console in the right location + addLogEntry("Fragment upload failed - received an exception response from OneDrive API", ["verbose"]); + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + // retry fragment upload in case error is transient + addLogEntry("Retrying fragment upload", ["verbose"]); + } + + try { + uploadResponse = activeOneDriveApiInstance.uploadFragment( + uploadSessionData["uploadUrl"].str, + uploadSessionData["localPath"].str, + offset, + fragSize, + thisFileSize + ); + } catch (OneDriveException e) { + // OneDrive threw another error on retry + addLogEntry("Retry to upload fragment failed", ["verbose"]); + // display what the error is + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // set uploadResponse to null as the fragment upload was in error twice + uploadResponse = null; + } catch (std.exception.ErrnoException e) { + // There was a file system error - display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return uploadResponse; + } + } catch (ErrnoException e) { + // There was a file system error + // display the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + uploadResponse = null; + return uploadResponse; + } + + // was the fragment uploaded without issue? + if (uploadResponse.type() == JSONType.object){ + offset += fragmentSize; + if (offset >= thisFileSize) { + break; + } + // update the uploadSessionData details + uploadSessionData["expirationDateTime"] = uploadResponse["expirationDateTime"]; + uploadSessionData["nextExpectedRanges"] = uploadResponse["nextExpectedRanges"]; + saveSessionFile(threadUploadSessionFilePath, uploadSessionData); + } else { + // not a JSON object - fragment upload failed + addLogEntry("File upload session failed - invalid response from OneDrive API", ["verbose"]); + + // cleanup session data + if (exists(threadUploadSessionFilePath)) { + remove(threadUploadSessionFilePath); + } + // set uploadResponse to null as error + uploadResponse = null; + return uploadResponse; + } + } + + // upload complete + ulong end_unix_time = Clock.currTime.toUnixTime(); + auto upload_duration = cast(int)(end_unix_time - start_unix_time); + dur!"seconds"(upload_duration).split!("hours", "minutes", "seconds")(h, m, s); + etaString = format!"| DONE in %02d:%02d:%02d"( h, m, s); + addLogEntry(uploadLogEntry ~ "100% " ~ etaString, ["consoleOnly"]); + + // Remove session file if it exists + if (exists(threadUploadSessionFilePath)) { + remove(threadUploadSessionFilePath); + } + + // Return the session upload response + return uploadResponse; + } + + // Delete an item on OneDrive + void uploadDeletedItem(Item itemToDelete, string path) { + + // Are we in a situation where we HAVE to keep the data online - do not delete the remote object + if (noRemoteDelete) { + if ((itemToDelete.type == ItemType.dir)) { + // Do not process remote directory delete + addLogEntry("Skipping remote directory delete as --upload-only & --no-remote-delete configured", ["verbose"]); + } else { + // Do not process remote file delete + addLogEntry("Skipping remote file delete as --upload-only & --no-remote-delete configured", ["verbose"]); + } + } else { + + // Is this a --download-only operation? + if (!appConfig.getValueBool("download_only")) { + // Process the delete - delete the object online + addLogEntry("Deleting item from OneDrive: " ~ path); + bool flagAsBigDelete = false; + + Item[] children; + ulong itemsToDelete; + + if ((itemToDelete.type == ItemType.dir)) { + // Query the database - how many objects will this remove? + children = getChildren(itemToDelete.driveId, itemToDelete.id); + // Count the returned items + the original item (1) + itemsToDelete = count(children) + 1; + addLogEntry("Number of items online to delete: " ~ to!string(itemsToDelete), ["debug"]); + } else { + itemsToDelete = 1; + } + + // A local delete of a file|folder when using --monitor will issue a inotify event, which will trigger the local & remote data immediately be deleted + // The user may also be --sync process, so we are checking if something was deleted between application use + if (itemsToDelete >= appConfig.getValueLong("classify_as_big_delete")) { + // A big delete has been detected + flagAsBigDelete = true; + if (!appConfig.getValueBool("force")) { + addLogEntry("ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve data on Microsoft OneDrive"); + addLogEntry("ERROR: To delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value"); + // Must exit here to preserve data on online , allow logging to be done + forceExit(); + } + } + + // Are we in a --dry-run scenario? + if (!dryRun) { + // We are not in a dry run scenario + addLogEntry("itemToDelete: " ~ to!string(itemToDelete), ["debug"]); + + // Create new OneDrive API Instance + OneDriveApi uploadDeletedItemOneDriveApiInstance; + uploadDeletedItemOneDriveApiInstance = new OneDriveApi(appConfig); + uploadDeletedItemOneDriveApiInstance.initialise(); + + // what item are we trying to delete? + addLogEntry("Attempting to delete this single item id: " ~ itemToDelete.id ~ " from drive: " ~ itemToDelete.driveId, ["debug"]); + + try { + // perform the delete via the default OneDrive API instance + uploadDeletedItemOneDriveApiInstance.deleteById(itemToDelete.driveId, itemToDelete.id); + // Shutdown API + uploadDeletedItemOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(uploadDeletedItemOneDriveApiInstance); + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + // item.id, item.eTag could not be found on the specified driveId + addLogEntry("OneDrive reported: The resource could not be found to be deleted.", ["verbose"]); + } + } + + // Delete the reference in the local database + itemDB.deleteById(itemToDelete.driveId, itemToDelete.id); + if (itemToDelete.remoteId != null) { + // If the item is a remote item, delete the reference in the local database + itemDB.deleteById(itemToDelete.remoteDriveId, itemToDelete.remoteId); + } + } else { + // log that this is a dry-run activity + addLogEntry("dry run - no delete activity"); + } + } else { + // --download-only operation, we are not uploading any delete event to OneDrive + addLogEntry("Not pushing local delete to Microsoft OneDrive due to --download-only being used", ["debug"]); + } + } + } + + // Get the children of an item id from the database + Item[] getChildren(string driveId, string id) { + + Item[] children; + children ~= itemDB.selectChildren(driveId, id); + foreach (Item child; children) { + if (child.type != ItemType.file) { + // recursively get the children of this child + children ~= getChildren(child.driveId, child.id); + } + } + return children; + } + + // Perform a 'reverse' delete of all child objects on OneDrive + void performReverseDeletionOfOneDriveItems(Item[] children, Item itemToDelete) { + + // Log what is happening + addLogEntry("Attempting a reverse delete of all child objects from OneDrive", ["debug"]); + + // Create a new API Instance for this thread and initialise it + OneDriveApi performReverseDeletionOneDriveApiInstance; + performReverseDeletionOneDriveApiInstance = new OneDriveApi(appConfig); + performReverseDeletionOneDriveApiInstance.initialise(); + + foreach_reverse (Item child; children) { + // Log the action + addLogEntry("Attempting to delete this child item id: " ~ child.id ~ " from drive: " ~ child.driveId, ["debug"]); + + // perform the delete via the default OneDrive API instance + performReverseDeletionOneDriveApiInstance.deleteById(child.driveId, child.id, child.eTag); + // delete the child reference in the local database + itemDB.deleteById(child.driveId, child.id); + } + // Log the action + addLogEntry("Attempting to delete this parent item id: " ~ itemToDelete.id ~ " from drive: " ~ itemToDelete.driveId, ["debug"]); + + // Perform the delete via the default OneDrive API instance + performReverseDeletionOneDriveApiInstance.deleteById(itemToDelete.driveId, itemToDelete.id, itemToDelete.eTag); + // Shutdown API instance + performReverseDeletionOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(performReverseDeletionOneDriveApiInstance); + } + + // Create a fake OneDrive response suitable for use with saveItem + JSONValue createFakeResponse(const(string) path) { + + import std.digest.sha; + // Generate a simulated JSON response which can be used + // At a minimum we need: + // 1. eTag + // 2. cTag + // 3. fileSystemInfo + // 4. file or folder. if file, hash of file + // 5. id + // 6. name + // 7. parent reference + + string fakeDriveId = appConfig.defaultDriveId; + string fakeRootId = appConfig.defaultRootId; + SysTime mtime = timeLastModified(path).toUTC(); + + // Need to update the 'fakeDriveId' & 'fakeRootId' with elements from the --dry-run database + // Otherwise some calls to validate objects will fail as the actual driveId being used is invalid + string parentPath = dirName(path); + Item databaseItem; + + if (parentPath != ".") { + // Not a 'root' parent + // For each driveid in the existing driveIDsArray + foreach (searchDriveId; driveIDsArray) { + addLogEntry("FakeResponse: searching database for: " ~ searchDriveId ~ " " ~ parentPath, ["debug"]); + + if (itemDB.selectByPath(parentPath, searchDriveId, databaseItem)) { + addLogEntry("FakeResponse: Found Database Item: " ~ to!string(databaseItem), ["debug"]); + fakeDriveId = databaseItem.driveId; + fakeRootId = databaseItem.id; + } + } + } + + // real id / eTag / cTag are different format for personal / business account + auto sha1 = new SHA1Digest(); + ubyte[] fakedOneDriveItemValues = sha1.digest(path); + + JSONValue fakeResponse; + + if (isDir(path)) { + // path is a directory + fakeResponse = [ + "id": JSONValue(toHexString(fakedOneDriveItemValues)), + "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "fileSystemInfo": JSONValue([ + "createdDateTime": mtime.toISOExtString(), + "lastModifiedDateTime": mtime.toISOExtString() + ]), + "name": JSONValue(baseName(path)), + "parentReference": JSONValue([ + "driveId": JSONValue(fakeDriveId), + "driveType": JSONValue(appConfig.accountType), + "id": JSONValue(fakeRootId) + ]), + "folder": JSONValue("") + ]; + } else { + // path is a file + // compute file hash - both business and personal responses use quickXorHash + string quickXorHash = computeQuickXorHash(path); + + fakeResponse = [ + "id": JSONValue(toHexString(fakedOneDriveItemValues)), + "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), + "fileSystemInfo": JSONValue([ + "createdDateTime": mtime.toISOExtString(), + "lastModifiedDateTime": mtime.toISOExtString() + ]), + "name": JSONValue(baseName(path)), + "parentReference": JSONValue([ + "driveId": JSONValue(fakeDriveId), + "driveType": JSONValue(appConfig.accountType), + "id": JSONValue(fakeRootId) + ]), + "file": JSONValue([ + "hashes":JSONValue([ + "quickXorHash": JSONValue(quickXorHash) + ]) + + ]) + ]; + } + + addLogEntry("Generated Fake OneDrive Response: " ~ to!string(fakeResponse), ["debug"]); + return fakeResponse; + } + + // Save JSON item details into the item database + void saveItem(JSONValue jsonItem) { + + // jsonItem has to be a valid object + if (jsonItem.type() == JSONType.object) { + // Check if the response JSON has an 'id', otherwise makeItem() fails with 'Key not found: id' + if (hasId(jsonItem)) { + // Are we in a --upload-only & --remove-source-files scenario? + // We do not want to add the item to the database in this situation as there is no local reference to the file post file deletion + // If the item is a directory, we need to add this to the DB, if this is a file, we dont add this, the parent path is not in DB, thus any new files in this directory are not added + if ((uploadOnly) && (localDeleteAfterUpload) && (isItemFile(jsonItem))) { + // Log that we skipping adding item to the local DB and the reason why + addLogEntry("Skipping adding to database as --upload-only & --remove-source-files configured", ["debug"]); + } else { + // What is the JSON item we are trying to create a DB record with? + addLogEntry("saveItem - creating DB item from this JSON: " ~ to!string(jsonItem), ["debug"]); + + // Takes a JSON input and formats to an item which can be used by the database + Item item = makeItem(jsonItem); + + // Is this JSON item a 'root' item? + if ((isItemRoot(jsonItem)) && (item.name == "root")) { + addLogEntry("Updating DB Item object with correct values as this is a 'root' object", ["debug"]); + item.parentId = null; // ensures that this database entry has no parent + // Check for parentReference + if (hasParentReference(jsonItem)) { + // Set the correct item.driveId + addLogEntry("ROOT JSON Item HAS parentReference .... setting item.driveId = jsonItem['parentReference']['driveId'].str", ["debug"]); + item.driveId = jsonItem["parentReference"]["driveId"].str; + } + + // We only should be adding our account 'root' to the database, not shared folder 'root' items + if (item.driveId != appConfig.defaultDriveId) { + // Shared Folder drive 'root' object .. we dont want this item + addLogEntry("NOT adding 'remote root' object to database: " ~ to!string(item), ["debug"]); return; } } - // Check that the filename that is returned is actually the file we wish to upload - // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file - // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, - // even though some file systems (such as a POSIX-compliant file system) may consider them as different. - // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior. + // Add to the local database + addLogEntry("Adding to database: " ~ to!string(item), ["debug"]); + itemDB.upsert(item); - // fileDetailsFromOneDrive has to be a valid object - if (fileDetailsFromOneDrive.type() == JSONType.object){ - // fileDetailsFromOneDrive = onedrive.getPathDetails(path) returned a valid JSON, meaning the file exists on OneDrive - // Check that 'name' is in the JSON response (validates data) and that 'name' == the path we are looking for - if (("name" in fileDetailsFromOneDrive) && (fileDetailsFromOneDrive["name"].str == baseName(path))) { - // OneDrive 'name' matches local path name - log.vlog("Requested file to upload exists on OneDrive - local database is out of sync for this file: ", path); - - // Is the local file newer than the uploaded file? - SysTime localFileModifiedTime = timeLastModified(path).toUTC(); - SysTime remoteFileModifiedTime = SysTime.fromISOExtString(fileDetailsFromOneDrive["fileSystemInfo"]["lastModifiedDateTime"].str); - localFileModifiedTime.fracSecs = Duration.zero; - - if (localFileModifiedTime > remoteFileModifiedTime){ - // local file is newer - log.vlog("Requested file to upload is newer than existing file on OneDrive"); - write("Uploading modified file ", path, " ... "); - JSONValue response; - - if (!dryRun) { - if (accountType == "personal"){ - // OneDrive Personal account upload handling - if (thisFileSize <= thresholdFileSize) { - try { - response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); - writeln("done."); - } catch (OneDriveException e) { - log.vdebug("response = onedrive.simpleUpload(path, parent.driveId, parent.id, baseName(path)); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request as a session"); - // Try upload as a session - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - writeln("done."); - } catch (OneDriveException e) { - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // error uploading file - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } else { - // File larger than threshold - use a session to upload - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - writeln("done."); - } catch (OneDriveException e) { - log.vdebug("response = session.upload(path, parent.driveId, parent.id, baseName(path)); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // error uploading file - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } - - // response from OneDrive has to be a valid JSON object - if (response.type() == JSONType.object){ - // response is a valid JSON object - string id = response["id"].str; - string cTag; - - // Is there a valid cTag in the response? - if ("cTag" in response) { - // use the cTag instead of the eTag because Onedrive may update the metadata of files AFTER they have been uploaded - cTag = response["cTag"].str; - } else { - // Is there an eTag in the response? - if ("eTag" in response) { - // use the eTag from the response as there was no cTag - cTag = response["eTag"].str; - } else { - // no tag available - set to nothing - cTag = ""; - } - } - // validate if path exists so mtime can be calculated - if (exists(path)) { - SysTime mtime = timeLastModified(path).toUTC(); - uploadLastModifiedTime(parent.driveId, id, cTag, mtime); - } else { - // will be removed in different event! - log.log("File disappeared after upload: ", path); - } - } else { - // Log that an invalid JSON object was returned - log.vdebug("onedrive.simpleUpload or session.upload call returned an invalid JSON Object"); - return; - } - } else { - // OneDrive Business account modified file upload handling - if (accountType == "business"){ - // OneDrive Business Account - if ((!syncBusinessFolders) || (parent.driveId == defaultDriveId)) { - // If we are not syncing Shared Business Folders, or this change is going to the 'users' default drive, handle normally - // For logging consistency - writeln(""); - try { - response = session.upload(path, parent.driveId, parent.id, baseName(path), fileDetailsFromOneDrive["eTag"].str); - } catch (OneDriveException e) { - log.vdebug("response = session.upload(path, parent.driveId, parent.id, baseName(path), fileDetailsFromOneDrive['eTag'].str); generated a OneDriveException"); - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return; - } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling uploadNewFile(path);"); - uploadNewFile(path); - // return back to original call - return; - } - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying upload request"); - // Retry original request by calling function again to avoid replicating any further error handling - uploadNewFile(path); - // return back to original call - return; - } else { - // error uploading file - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return; - } - // upload complete - writeln("done."); - saveItem(response); - } else { - // If we are uploading to a shared business folder, there are a couple of corner cases here: - // 1. Shared Folder is a 'users' folder - // 2. Shared Folder is a 'SharePoint Library' folder, meaning we get hit by this stupidity: https://github.com/OneDrive/onedrive-api-docs/issues/935 - - // Need try{} & catch (OneDriveException e) { & catch (FileException e) { handler for this query - response = handleSharePointMetadataAdditionBugReplaceFile(fileDetailsFromOneDrive, parent, path); - if (!uploadFailed){ - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - } - - // OneDrive SharePoint account modified file upload handling - if (accountType == "documentLibrary"){ - // Depending on the file size, this will depend on how best to handle the modified local file - // as if too large, the following error will be generated by OneDrive: - // HTTP request returned status code 413 (Request Entity Too Large) - // We also cant use a session to upload the file, we have to use simpleUploadReplace - - // Need try{} & catch (OneDriveException e) { & catch (FileException e) { handler for this query - response = handleSharePointMetadataAdditionBugReplaceFile(fileDetailsFromOneDrive, parent, path); - if (!uploadFailed){ - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } else { - // uploadFailed, return - return; - } - } - } - - // Log action to log file - log.fileOnly("Uploading modified file ", path, " ... done."); - - // update free space tracking if this is our drive id - if (parent.driveId == defaultDriveId) { - // how much space is left on OneDrive after upload? - remainingFreeSpace = (remainingFreeSpace - thisFileSize); - log.vlog("Remaining free space on OneDrive: ", remainingFreeSpace); - } - } else { - // we are --dry-run - simulate the file upload - writeln("done."); - response = createFakeResponse(path); - // Log action to log file - log.fileOnly("Uploading modified file ", path, " ... done."); - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - return; - } + // If we have a remote drive ID, add this to our list of known drive id's + if (!item.remoteDriveId.empty) { + // Keep the driveIDsArray with unique entries only + if (!canFind(driveIDsArray, item.remoteDriveId)) { + // Add this drive id to the array to search with + driveIDsArray ~= item.remoteDriveId; + } + } + } + } else { + // log error + addLogEntry("ERROR: OneDrive response missing required 'id' element"); + addLogEntry("ERROR: " ~ to!string(jsonItem)); + } + } else { + // log error + addLogEntry("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object"); + addLogEntry("ERROR: Increase logging verbosity to assist determining why."); + } + } + + // Wrapper function for makeDatabaseItem so we can check to ensure that the item has the required hashes + Item makeItem(JSONValue onedriveJSONItem) { + + // Make the DB Item from the JSON data provided + Item newDatabaseItem = makeDatabaseItem(onedriveJSONItem); + + // Is this a 'file' item that has not been deleted? Deleted items have no hash + if ((newDatabaseItem.type == ItemType.file) && (!isItemDeleted(onedriveJSONItem))) { + // Does this item have a file size attribute? + if (hasFileSize(onedriveJSONItem)) { + // Is the file size greater than 0? + if (onedriveJSONItem["size"].integer > 0) { + // Does the DB item have any hashes as per the API provided JSON data? + if ((newDatabaseItem.quickXorHash.empty) && (newDatabaseItem.sha256Hash.empty)) { + // Odd .. there is no hash for this item .. why is that? + // Is there a 'file' JSON element? + if ("file" in onedriveJSONItem) { + // Microsoft OneDrive OneNote objects will report as files but have 'application/msonenote' and 'application/octet-stream' as mime types + if ((isMicrosoftOneNoteMimeType1(onedriveJSONItem)) || (isMicrosoftOneNoteMimeType2(onedriveJSONItem))) { + // Debug log output that this is a potential OneNote object + addLogEntry("This item is potentially an associated Microsoft OneNote Object Item", ["debug"]); } else { - // Save the details of the file that we got from OneDrive - // --dry-run safe - log.vlog("Updating the local database with details for this file: ", path); - if (!dryRun) { - // use the live data - saveItem(fileDetailsFromOneDrive); + // Not a Microsoft OneNote Mime Type Object .. + string apiWarningMessage = "WARNING: OneDrive API inconsistency - this file does not have any hash: "; + // This is computationally expensive .. but we are only doing this if there are no hashses provided + bool parentInDatabase = itemDB.idInLocalDatabase(newDatabaseItem.driveId, newDatabaseItem.parentId); + // Is the parent id in the database? + if (parentInDatabase) { + // This is again computationally expensive .. calculate this item path to advise the user the actual path of this item that has no hash + string newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name; + addLogEntry(apiWarningMessage ~ newItemPath); } else { - // need to fake this data - auto fakeResponse = createFakeResponse(path); - saveItem(fakeResponse); + // Parent is not in the database .. why? + // Check if the parent item had been skipped .. + if (newDatabaseItem.parentId in skippedItems) { + addLogEntry(apiWarningMessage ~ "newDatabaseItem.parentId listed within skippedItems", ["debug"]); + } else { + // Use the item ID .. there is no other reference available, parent is not being skipped, so we should have been able to calculate this - but we could not + addLogEntry(apiWarningMessage ~ newDatabaseItem.id); + } } } - } else { - // The files are the "same" name wise but different in case sensitivity - log.error("ERROR: A local file has the same name as another local file."); - log.error("ERROR: To resolve, rename this local file: ", buildNormalizedPath(absolutePath(path))); - log.log("Skipping uploading this new file: ", buildNormalizedPath(absolutePath(path))); - } - } else { - // fileDetailsFromOneDrive is not valid JSON, an error was returned from OneDrive - log.error("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - uploadFailed = true; - return; + } } } else { - // Skip file - too large - log.log("Skipping uploading this new file as it exceeds the maximum size allowed by OneDrive: ", path); - uploadFailed = true; - return; + // zero file size + addLogEntry("This item file is zero size - potentially no hash provided by the OneDrive API", ["debug"]); } - } else { - // unable to read local file - log.log("Skipping uploading this file as it cannot be read (file permissions or file corruption): ", path); } - } else { - // Upload of the new file did not occur .. why? - if (!parentPathFoundInDB) { - // Parent path was not found - log.log("Skipping uploading this new file as parent path is not in the database: ", path); - uploadFailed = true; - return; + } + + // Return the new database item + return newDatabaseItem; + } + + // Print the fileDownloadFailures and fileUploadFailures arrays if they are not empty + void displaySyncFailures() { + + // Were there any file download failures? + if (!fileDownloadFailures.empty) { + // There are download failures ... + addLogEntry(); + addLogEntry("Failed items to download from OneDrive: " ~ to!string(fileDownloadFailures.length)); + foreach(failedFileToDownload; fileDownloadFailures) { + // List the detail of the item that failed to download + addLogEntry("Failed to download: " ~ failedFileToDownload, ["info", "notify"]); + + // Is this failed item in the DB? It should not be .. + Item downloadDBItem; + // Need to check all driveid's we know about, not just the defaultDriveId + foreach (searchDriveId; driveIDsArray) { + if (itemDB.selectByPath(failedFileToDownload, searchDriveId, downloadDBItem)) { + // item was found in the DB + addLogEntry("ERROR: Failed Download Path found in database, must delete this item from the database .. it should not be in there if it failed to download"); + // Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy + itemDB.deleteById(downloadDBItem.driveId, downloadDBItem.id); + if (downloadDBItem.remoteDriveId != null) { + // delete the linked remote folder + itemDB.deleteById(downloadDBItem.remoteDriveId, downloadDBItem.remoteId); + } + } + } } - if (!quotaAvailable) { - // Not enough free space - log.log("Skipping item '", path, "' due to insufficient free space available on OneDrive"); - uploadFailed = true; - return; + // Set the flag + syncFailures = true; + } + + // Were there any file upload failures? + if (!fileUploadFailures.empty) { + // There are download failures ... + addLogEntry(); + addLogEntry("Failed items to upload to OneDrive: " ~ to!string(fileUploadFailures.length)); + foreach(failedFileToUpload; fileUploadFailures) { + // List the path of the item that failed to upload + addLogEntry("Failed to upload: " ~ failedFileToUpload, ["info", "notify"]); + + // Is this failed item in the DB? It should not be .. + Item uploadDBItem; + // Need to check all driveid's we know about, not just the defaultDriveId + foreach (searchDriveId; driveIDsArray) { + if (itemDB.selectByPath(failedFileToUpload, searchDriveId, uploadDBItem)) { + // item was found in the DB + addLogEntry("ERROR: Failed Upload Path found in database, must delete this item from the database .. it should not be in there if it failed to upload"); + // Process the database entry removal. In a --dry-run scenario, this is being done against a DB copy + itemDB.deleteById(uploadDBItem.driveId, uploadDBItem.id); + if (uploadDBItem.remoteDriveId != null) { + // delete the linked remote folder + itemDB.deleteById(uploadDBItem.remoteDriveId, uploadDBItem.remoteId); + } + } + } } + // Set the flag + syncFailures = true; } } - - private JSONValue handleSharePointMetadataAdditionBugReplaceFile(JSONValue fileDetailsFromOneDrive, const ref Item parent, const(string) path) - { - // Explicit function for handling https://github.com/OneDrive/onedrive-api-docs/issues/935 - // Replace existing file - JSONValue response; + + // Generate a /delta compatible response - for use when we cant actually use /delta + // This is required when the application is configured to use National Azure AD deployments as these do not support /delta queries + // The same technique can also be used when we are using --single-directory. The parent objects up to the single directory target can be added, + // then once the target of the --single-directory request is hit, all of the children of that path can be queried, giving a much more focused + // JSON response which can then be processed, negating the need to continuously traverse the tree and 'exclude' items + JSONValue generateDeltaResponse(string pathToQuery = null) { + + // JSON value which will be responded with + JSONValue selfGeneratedDeltaResponse; + + // Function variables + Item searchItem; + JSONValue rootData; + JSONValue driveData; + JSONValue pathData; + JSONValue topLevelChildren; + JSONValue[] childrenData; + string nextLink; - // Depending on the file size, this will depend on how best to handle the modified local file - // as if too large, the following error will be generated by OneDrive: - // HTTP request returned status code 413 (Request Entity Too Large) - // We also cant use a session to upload the file, we have to use simpleUploadReplace + // Was a path to query passed in? + if (pathToQuery.empty) { + // Will query for the 'root' + pathToQuery = "."; + } - // Calculate existing hash for this file - string existingFileHash = computeQuickXorHash(path); + // Create new OneDrive API Instance + OneDriveApi generateDeltaResponseOneDriveApiInstance; + generateDeltaResponseOneDriveApiInstance = new OneDriveApi(appConfig); + generateDeltaResponseOneDriveApiInstance.initialise(); - if (getSize(path) <= thresholdFileSize) { - // Upload file via simpleUploadReplace as below threshold size + if (!singleDirectoryScope) { + // In a --resync scenario, there is no DB data to query, so we have to query the OneDrive API here to get relevant details try { - response = onedrive.simpleUploadReplace(path, fileDetailsFromOneDrive["parentReference"]["driveId"].str, fileDetailsFromOneDrive["id"].str, fileDetailsFromOneDrive["eTag"].str); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return response; + // Query the OneDrive API + pathData = generateDeltaResponseOneDriveApiInstance.getPathDetails(pathToQuery); + // Is the path on OneDrive local or remote to our account drive id? + if (isItemRemote(pathData)) { + // The path we are seeking is remote to our account drive id + searchItem.driveId = pathData["remoteItem"]["parentReference"]["driveId"].str; + searchItem.id = pathData["remoteItem"]["id"].str; } else { - // display what the error is - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + // The path we are seeking is local to our account drive id + searchItem.driveId = pathData["parentReference"]["driveId"].str; + searchItem.id = pathData["id"].str; } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading modified file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + } catch (OneDriveException e) { + // Display error message + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + // Must exit here + generateDeltaResponseOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(generateDeltaResponseOneDriveApiInstance); + // Must force exit here, allow logging to be done + forceExit(); + } + } else { + // When setSingleDirectoryScope() was called, the following were set to the correct items, even if the path was remote: + // - singleDirectoryScopeDriveId + // - singleDirectoryScopeItemId + // Reuse these prior set values + searchItem.driveId = singleDirectoryScopeDriveId; + searchItem.id = singleDirectoryScopeItemId; + } + + // Before we get any data from the OneDrive API, flag any child object in the database as out-of-sync for this driveId & and object id + // Downgrade ONLY files associated with this driveId and idToQuery + addLogEntry("Downgrading all children for this searchItem.driveId (" ~ searchItem.driveId ~ ") and searchItem.id (" ~ searchItem.id ~ ") to an out-of-sync state", ["debug"]); + + auto drivePathChildren = getChildren(searchItem.driveId, searchItem.id); + if (count(drivePathChildren) > 0) { + // Children to process and flag as out-of-sync + foreach (drivePathChild; drivePathChildren) { + // Flag any object in the database as out-of-sync for this driveId & and object id + addLogEntry("Downgrading item as out-of-sync: " ~ drivePathChild.id, ["debug"]); + itemDB.downgradeSyncStatusFlag(drivePathChild.driveId, drivePathChild.id); + } + } + + // Get drive details for the provided driveId + try { + driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id); + } catch (OneDriveException exception) { + addLogEntry("driveData = generateDeltaResponseOneDriveApiInstance.getPathDetailsById(searchItem.driveId, searchItem.id) generated a OneDriveException", ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(generateDeltaResponseOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query path details on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + generateDeltaResponse(pathToQuery); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + + // Was a valid JSON response for 'driveData' provided? + if (driveData.type() == JSONType.object) { + + // Dynamic output for a non-verbose run so that the user knows something is happening + if (appConfig.verbosityCount == 0) { + if (!appConfig.surpressLoggingOutput) { + addLogEntry("Fetching items from the OneDrive API for Drive ID: " ~ searchItem.driveId, ["logFileOnly"]); + + // Use the dots to show the application is 'doing something' + addLogEntry("Fetching items from the OneDrive API for Drive ID: " ~ searchItem.driveId ~ " .", ["consoleOnlyNoNewLine"]); + } + } else { + addLogEntry("Generating a /delta response from the OneDrive API for Drive ID: " ~ searchItem.driveId, ["verbose"]); + } + + // Process this initial JSON response + if (!isItemRoot(driveData)) { + // Get root details for the provided driveId + try { + rootData = generateDeltaResponseOneDriveApiInstance.getDriveIdRoot(searchItem.driveId); + } catch (OneDriveException exception) { + addLogEntry("rootData = onedrive.getDriveIdRoot(searchItem.driveId) generated a OneDriveException", ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(generateDeltaResponseOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query drive root details on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Query: rootData = generateDeltaResponseOneDriveApiInstance.getDriveIdRoot(searchItem.driveId)"); + rootData = generateDeltaResponseOneDriveApiInstance.getDriveIdRoot(searchItem.driveId); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + // Add driveData JSON data to array + addLogEntry("Adding OneDrive root details for processing", ["verbose"]); + childrenData ~= rootData; } + + // Add driveData JSON data to array + addLogEntry("Adding OneDrive folder details for processing", ["verbose"]); + childrenData ~= driveData; } else { - // Have to upload via a session, however we have to delete the file first otherwise this will generate a 404 error post session upload - // Remove the existing file - onedrive.deleteById(fileDetailsFromOneDrive["parentReference"]["driveId"].str, fileDetailsFromOneDrive["id"].str, fileDetailsFromOneDrive["eTag"].str); - // Upload as a session, as a new file - writeln(""); + // driveData is an invalid JSON object + writeln("CODING TO DO: The query of OneDrive API to getPathDetailsById generated an invalid JSON response - thus we cant build our own /delta simulated response ... how to handle?"); + // Must exit here + generateDeltaResponseOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(generateDeltaResponseOneDriveApiInstance); + // Must force exit here, allow logging to be done + forceExit(); + } + + // For each child object, query the OneDrive API + for (;;) { + // query top level children try { - response = session.upload(path, parent.driveId, parent.id, baseName(path)); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // OneDrive returned a 'HTTP/1.1 401 Unauthorized Error' - file failed to be uploaded - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - log.vlog("OneDrive returned a 'HTTP 401 - Unauthorized' - gracefully handling error"); - uploadFailed = true; - return response; + topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink); + } catch (OneDriveException exception) { + // OneDrive threw an error + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Query Error: topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)", ["debug"]); + addLogEntry("driveId: " ~ searchItem.driveId, ["debug"]); + addLogEntry("idToQuery: " ~ searchItem.id, ["debug"]); + addLogEntry("nextLink: " ~ nextLink, ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(generateDeltaResponseOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry topLevelChildren = generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink)", ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query OneDrive top level drive children on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry("generateDeltaResponseOneDriveApiInstance.listChildren(searchItem.driveId, searchItem.id, nextLink) previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + generateDeltaResponse(pathToQuery); } else { + // Default operation if not 408,429,503,504 errors // display what the error is - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + + // process top level children + addLogEntry("Adding " ~ to!string(count(topLevelChildren["value"].array)) ~ " OneDrive items for processing from the OneDrive 'root' folder", ["verbose"]); + + foreach (child; topLevelChildren["value"].array) { + // Check for any Client Side Filtering here ... we should skip querying the OneDrive API for 'folders' that we are going to just process and skip anyway. + // This avoids needless calls to the OneDrive API, and potentially speeds up this process. + if (!checkJSONAgainstClientSideFiltering(child)) { + // add this child to the array of objects + childrenData ~= child; + // is this child a folder? + if (isItemFolder(child)) { + // We have to query this folders children if childCount > 0 + if (child["folder"]["childCount"].integer > 0){ + // This child folder has children + string childIdToQuery = child["id"].str; + string childDriveToQuery = child["parentReference"]["driveId"].str; + auto childParentPath = child["parentReference"]["path"].str.split(":"); + string folderPathToScan = childParentPath[1] ~ "/" ~ child["name"].str; + + string pathForLogging; + // Are we in a --single-directory situation? If we are, the path we are using for logging needs to use the input path as a base + if (singleDirectoryScope) { + pathForLogging = appConfig.getValueString("single_directory") ~ "/" ~ child["name"].str; + } else { + pathForLogging = child["name"].str; + } + + // Query the children of this item + JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, pathForLogging); + foreach (grandChild; grandChildrenData.array) { + // add the grandchild to the array + childrenData ~= grandChild; + } + } + } } - } catch (FileException e) { - // display the error message - writeln("skipped."); - log.fileOnly("Uploading new file ", path, " ... skipped."); - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - uploadFailed = true; - return response; } + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in topLevelChildren) { + // Update nextLink to next changeSet bundle + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + nextLink = topLevelChildren["@odata.nextLink"].str; + } else break; } - writeln("done."); - // Due to https://github.com/OneDrive/onedrive-api-docs/issues/935 Microsoft modifies all PDF, MS Office & HTML files with added XML content. It is a 'feature' of SharePoint. - // So - now the 'local' and 'remote' file is technically DIFFERENT ... thanks Microsoft .. NO way to disable this stupidity - string uploadNewFileHash; - if (hasQuickXorHash(response)) { - // use the response json hash detail to compare - uploadNewFileHash = response["file"]["hashes"]["quickXorHash"].str; + + if (appConfig.verbosityCount == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } } - if (existingFileHash != uploadNewFileHash) { - // file was modified by Microsoft post upload to SharePoint site - log.vdebug("Existing Local File Hash: ", existingFileHash); - log.vdebug("New Remote File Hash: ", uploadNewFileHash); + // Craft response from all returned JSON elements + selfGeneratedDeltaResponse = [ + "@odata.context": JSONValue("https://graph.microsoft.com/v1.0/$metadata#Collection(driveItem)"), + "value": JSONValue(childrenData.array) + ]; + + // Shutdown API + generateDeltaResponseOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(generateDeltaResponseOneDriveApiInstance); + + // Return the generated JSON response + return selfGeneratedDeltaResponse; + } + + // Query the OneDrive API for the specified child id for any children objects + JSONValue[] queryForChildren(string driveId, string idToQuery, string childParentPath, string pathForLogging) { + + // function variables + JSONValue thisLevelChildren; + JSONValue[] thisLevelChildrenData; + string nextLink; + + // Create new OneDrive API Instance + OneDriveApi queryChildrenOneDriveApiInstance; + queryChildrenOneDriveApiInstance = new OneDriveApi(appConfig); + queryChildrenOneDriveApiInstance.initialise(); + + for (;;) { + // query this level children + try { + thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance); + } catch (OneDriveException exception) { + + writeln("CODING TO DO: EXCEPTION HANDLING NEEDED: thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance)"); + + } + + if (appConfig.verbosityCount == 0) { + // Dynamic output for a non-verbose run so that the user knows something is happening + if (!appConfig.surpressLoggingOutput) { + addLogEntry(".", ["consoleOnlyNoNewLine"]); + } + } + + // Was a valid JSON response for 'thisLevelChildren' provided? + if (thisLevelChildren.type() == JSONType.object) { + // process this level children + if (!childParentPath.empty) { + // We dont use childParentPath to log, as this poses an information leak risk. + // The full parent path of the child, as per the JSON might be: + // /Level 1/Level 2/Level 3/Child Shared Folder/some folder/another folder + // But 'Child Shared Folder' is what is shared, thus '/Level 1/Level 2/Level 3/' is a potential information leak if logged. + // Plus, the application output now shows accuratly what is being shared - so that is a good thing. + addLogEntry("Adding " ~ to!string(count(thisLevelChildren["value"].array)) ~ " OneDrive items for processing from " ~ pathForLogging, ["verbose"]); + } + foreach (child; thisLevelChildren["value"].array) { + // Check for any Client Side Filtering here ... we should skip querying the OneDrive API for 'folders' that we are going to just process and skip anyway. + // This avoids needless calls to the OneDrive API, and potentially speeds up this process. + if (!checkJSONAgainstClientSideFiltering(child)) { + // add this child to the array of objects + thisLevelChildrenData ~= child; + // is this child a folder? + if (isItemFolder(child)){ + // We have to query this folders children if childCount > 0 + if (child["folder"]["childCount"].integer > 0){ + // This child folder has children + string childIdToQuery = child["id"].str; + string childDriveToQuery = child["parentReference"]["driveId"].str; + auto grandchildParentPath = child["parentReference"]["path"].str.split(":"); + string folderPathToScan = grandchildParentPath[1] ~ "/" ~ child["name"].str; + string newLoggingPath = pathForLogging ~ "/" ~ child["name"].str; + JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, newLoggingPath); + foreach (grandChild; grandChildrenData.array) { + // add the grandchild to the array + thisLevelChildrenData ~= grandChild; + } + } + } + } + } + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in thisLevelChildren) { + // Update nextLink to next changeSet bundle + nextLink = thisLevelChildren["@odata.nextLink"].str; + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + } else break; - if(!uploadOnly){ - // Download the Microsoft 'modified' file so 'local' is now in sync - log.vlog("Due to Microsoft Sharepoint 'enrichment' of files, downloading 'enriched' file to ensure local file is in-sync"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); - auto fileSize = response["size"].integer; - onedrive.downloadById(response["parentReference"]["driveId"].str, response["id"].str, path, fileSize); } else { - // we are not downloading a file, warn that file differences will exist - log.vlog("WARNING: Due to Microsoft Sharepoint 'enrichment' of files, this file is now technically different to your local copy"); - log.vlog("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details"); + // Invalid JSON response when querying this level children + addLogEntry("INVALID JSON response when attempting a retry of parent function - queryForChildren(driveId, idToQuery, childParentPath, pathForLogging)", ["debug"]); + + // retry thisLevelChildren = queryThisLevelChildren + addLogEntry("Thread sleeping for an additional 30 seconds", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + addLogEntry("Retry this call thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance)", ["debug"]); + thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance); } } - // return a JSON response so that it can be used and saved - return response; + // Shutdown API instance + queryChildrenOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(queryChildrenOneDriveApiInstance); + + // return response + return thisLevelChildrenData; } - - // delete an item on OneDrive - private void uploadDeleteItem(Item item, const(string) path) - { - log.log("Deleting item from OneDrive: ", path); - bool flagAsBigDelete = false; + + // Query the OneDrive API for the child objects for this element + JSONValue queryThisLevelChildren(string driveId, string idToQuery, string nextLink, OneDriveApi queryChildrenOneDriveApiInstance) { - // query the database - how many objects will this remove? - auto children = getChildren(item.driveId, item.id); - long itemsToDelete = count(children); - log.vdebug("Number of items to delete: ", itemsToDelete); + // function variables + JSONValue thisLevelChildren; - // Are we running in monitor mode? A local delete of a file will issue a inotify event, which will trigger the local & remote data immediately - if (!cfg.getValueBool("monitor")) { - // not running in monitor mode - if (itemsToDelete > cfg.getValueLong("classify_as_big_delete")) { - // A big delete detected - flagAsBigDelete = true; - if (!cfg.getValueBool("force")) { - log.error("ERROR: An attempt to remove a large volume of data from OneDrive has been detected. Exiting client to preserve data on OneDrive"); - log.error("ERROR: To delete a large volume of data use --force or increase the config value 'classify_as_big_delete' to a larger value"); - // Must exit here to preserve data on OneDrive - onedrive.shutdown(); - exit(-1); + // query children + try { + // attempt API call + addLogEntry("Attempting Query: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)", ["debug"]); + thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink); + addLogEntry("Query 'thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)' performed successfully", ["debug"]); + } catch (OneDriveException exception) { + // OneDrive threw an error + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Query Error: thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink)", ["debug"]); + addLogEntry("driveId: " ~ driveId, ["debug"]); + addLogEntry("idToQuery: " ~ idToQuery, ["debug"]); + addLogEntry("nextLink: " ~ nextLink, ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(queryChildrenOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query OneDrive drive item children - retrying applicable request in 30 seconds"); + addLogEntry("thisLevelChildren = queryChildrenOneDriveApiInstance.listChildren(driveId, idToQuery, nextLink) previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + queryThisLevelChildren(driveId, idToQuery, nextLink, queryChildrenOneDriveApiInstance); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); } } + + // return response + return thisLevelChildren; + } + + // Traverses the provided path online, via the OneDrive API, following correct parent driveId and itemId elements across the account + // to find if this full path exists. If this path exists online, the last item in the object path will be returned as a full JSON item. + // + // If the createPathIfMissing = false + no path exists online, a null invalid JSON item will be returned. + // If the createPathIfMissing = true + no path exists online, the requested path will be created in the correct location online. The resulting + // response to the directory creation will then be returned. + // + // This function also ensures that each path in the requested path actually matches the requested element to ensure that the OneDrive API response + // is not falsely matching a 'case insensitive' match to the actual request which is a POSIX compliance issue. + JSONValue queryOneDriveForSpecificPathAndCreateIfMissing(string thisNewPathToSearch, bool createPathIfMissing) { - if (!dryRun) { - // we are not in a --dry-run situation, process deletion to OneDrive - if ((item.driveId == "") && (item.id == "") && (item.eTag == "")){ - // These are empty ... we cannot delete if this is empty .... - log.vdebug("item.driveId, item.id & item.eTag are empty ... need to query OneDrive for values"); - log.vdebug("Checking OneDrive for path: ", path); - JSONValue onedrivePathDetails = onedrive.getPathDetails(path); // Returns a JSON String for the OneDrive Path - log.vdebug("OneDrive path details: ", onedrivePathDetails); - item.driveId = onedrivePathDetails["parentReference"]["driveId"].str; // Should give something like 12345abcde1234a1 - item.id = onedrivePathDetails["id"].str; // This item's ID. Should give something like 12345ABCDE1234A1!101 - item.eTag = onedrivePathDetails["eTag"].str; // Should be something like aNjM2NjJFRUVGQjY2NjJFMSE5MzUuMA - } - - // do the delete - try { - // what item are we trying to delete? - log.vdebug("Attempting to delete item from drive: ", item.driveId); - log.vdebug("Attempting to delete this item id: ", item.id); - // perform the delete via the API - onedrive.deleteById(item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - // item.id, item.eTag could not be found on driveId - log.vlog("OneDrive reported: The resource could not be found."); - } else { - // Not a 404 response .. is this a 401 response due to some sort of OneDrive Business security policy? - if ((e.httpStatusCode == 401) && (accountType != "personal")) { - log.vdebug("onedrive.deleteById generated a 401 error response when attempting to delete object by item id"); - auto errorArray = splitLines(e.msg); - JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); - if (errorMessage["error"]["message"].str == "Access denied. You do not have permission to perform this action or access this resource.") { - // Issue #1041 - Unable to delete OneDrive content when permissions prevent deletion - try { - log.vdebug("Attempting a reverse delete of all child objects from OneDrive"); - foreach_reverse (Item child; children) { - log.vdebug("Delete child item from drive: ", child.driveId); - log.vdebug("Delete this child item id: ", child.id); - onedrive.deleteById(child.driveId, child.id, child.eTag); - // delete the child reference in the local database - itemdb.deleteById(child.driveId, child.id); + // function variables + JSONValue getPathDetailsAPIResponse; + string currentPathTree; + Item parentDetails; + JSONValue topLevelChildren; + string nextLink; + bool directoryFoundOnline = false; + bool posixIssue = false; + + // Create a new API Instance for this thread and initialise it + OneDriveApi queryOneDriveForSpecificPath; + queryOneDriveForSpecificPath = new OneDriveApi(appConfig); + queryOneDriveForSpecificPath.initialise(); + + foreach (thisFolderName; pathSplitter(thisNewPathToSearch)) { + addLogEntry("Testing for the existance online of this folder path: " ~ thisFolderName, ["debug"]); + directoryFoundOnline = false; + + // If this is '.' this is the account root + if (thisFolderName == ".") { + currentPathTree = thisFolderName; + } else { + currentPathTree = currentPathTree ~ "/" ~ thisFolderName; + } + + addLogEntry("Attempting to query OneDrive for this path: " ~ currentPathTree, ["debug"]); + + // What query do we use? + if (thisFolderName == ".") { + // Query the root, set the right details + try { + getPathDetailsAPIResponse = queryOneDriveForSpecificPath.getPathDetails(currentPathTree); + parentDetails = makeItem(getPathDetailsAPIResponse); + // Save item to the database + saveItem(getPathDetailsAPIResponse); + directoryFoundOnline = true; + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(queryOneDriveForSpecificPath); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query path on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + queryOneDriveForSpecificPathAndCreateIfMissing(thisNewPathToSearch, createPathIfMissing); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + } else { + // Ensure we have a valid driveId to search here + if (parentDetails.driveId.empty) { + parentDetails.driveId = appConfig.defaultDriveId; + } + + // If the prior JSON 'getPathDetailsAPIResponse' is on this account driveId .. then continue to use getPathDetails + if (parentDetails.driveId == appConfig.defaultDriveId) { + + try { + // Query OneDrive API for this path + getPathDetailsAPIResponse = queryOneDriveForSpecificPath.getPathDetails(currentPathTree); + + // Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API + if (hasName(getPathDetailsAPIResponse)) { + performPosixTest(thisFolderName, getPathDetailsAPIResponse["name"].str); + } else { + throw new jsonResponseException("Unable to perform POSIX test as the OneDrive API request generated an invalid JSON response"); + } + + // No POSIX issue with requested path element + parentDetails = makeItem(getPathDetailsAPIResponse); + // Save item to the database + saveItem(getPathDetailsAPIResponse); + directoryFoundOnline = true; + + // Is this JSON a remote object + addLogEntry("Testing if this is a remote Shared Folder", ["debug"]); + if (isItemRemote(getPathDetailsAPIResponse)) { + // Remote Directory .. need a DB Tie Item + addLogEntry("Creating a DB Tie for this Shared Folder", ["debug"]); + + // New DB Tie Item to bind the 'remote' path to our parent path + Item tieDBItem; + // Set the name + tieDBItem.name = parentDetails.name; + // Set the correct item type + tieDBItem.type = ItemType.dir; + // Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie + tieDBItem.driveId = parentDetails.remoteDriveId; + tieDBItem.id = parentDetails.remoteId; + // Set the correct mtime + tieDBItem.mtime = parentDetails.mtime; + // Add tie DB record to the local database + addLogEntry("Adding DB Tie record to database: " ~ to!string(tieDBItem), ["debug"]); + itemDB.upsert(tieDBItem); + // Update parentDetails to use the DB Tie record + parentDetails = tieDBItem; + } + } catch (OneDriveException exception) { + if (exception.httpStatusCode == 404) { + directoryFoundOnline = false; + } else { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(queryOneDriveForSpecificPath); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); } - log.vdebug("Delete parent item from drive: ", item.driveId); - log.vdebug("Delete this parent item id: ", item.id); - onedrive.deleteById(item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query path on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + queryOneDriveForSpecificPathAndCreateIfMissing(thisNewPathToSearch, createPathIfMissing); + } else { + // Default operation if not 408,429,503,504 errors // display what the error is - log.vdebug("A further error was generated when attempting a reverse delete of objects from OneDrive"); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + displayOneDriveErrorMessage(exception.msg, thisFunctionName); } } + } catch (jsonResponseException e) { + addLogEntry(e.msg, ["debug"]); } - - // Not a 404 response .. is this a 403 response due to OneDrive Business Retention Policy being enabled? - if ((e.httpStatusCode == 403) && (accountType != "personal")) { - log.vdebug("onedrive.deleteById generated a 403 error response when attempting to delete object by item id"); - auto errorArray = splitLines(e.msg); - JSONValue errorMessage = parseJSON(replace(e.msg, errorArray[0], "")); - if (errorMessage["error"]["message"].str == "Request was cancelled by event received. If attempting to delete a non-empty folder, it's possible that it's on hold") { - // Issue #338 - Unable to delete OneDrive content when OneDrive Business Retention Policy is enabled - try { - log.vdebug("Attempting a reverse delete of all child objects from OneDrive"); - foreach_reverse (Item child; children) { - log.vdebug("Delete child item from drive: ", child.driveId); - log.vdebug("Delete this child item id: ", child.id); - onedrive.deleteById(child.driveId, child.id, child.eTag); - // delete the child reference in the local database - itemdb.deleteById(child.driveId, child.id); + } else { + // parentDetails.driveId is not the account drive id - thus will be a remote shared item + addLogEntry("This parent directory is a remote object this next path will be on a remote drive", ["debug"]); + + // For this parentDetails.driveId, parentDetails.id object, query the OneDrive API for it's children + for (;;) { + // Query this remote object for its children + topLevelChildren = queryOneDriveForSpecificPath.listChildren(parentDetails.driveId, parentDetails.id, nextLink); + // Process each child + foreach (child; topLevelChildren["value"].array) { + // Is this child a folder? + if (isItemFolder(child)) { + // Is this the child folder we are looking for, and is a POSIX match? + if (child["name"].str == thisFolderName) { + // EXACT MATCH including case sensitivity: Flag that we found the folder online + directoryFoundOnline = true; + // Use these details for the next entry path + getPathDetailsAPIResponse = child; + parentDetails = makeItem(getPathDetailsAPIResponse); + // Save item to the database + saveItem(getPathDetailsAPIResponse); + // No need to continue searching + break; + } else { + string childAsLower = toLower(child["name"].str); + string thisFolderNameAsLower = toLower(thisFolderName); + if (childAsLower == thisFolderNameAsLower) { + // This is a POSIX 'case in-sensitive match' ..... + // Local item name has a 'case-insensitive match' to an existing item on OneDrive + posixIssue = true; + throw new posixException(thisFolderName, child["name"].str); + } } - log.vdebug("Delete parent item from drive: ", item.driveId); - log.vdebug("Delete this parent item id: ", item.id); - onedrive.deleteById(item.driveId, item.id, item.eTag); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("A further error was generated when attempting a reverse delete of objects from OneDrive"); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; } } - } else { - // Not a 403 response & OneDrive Business Account / O365 Shared Folder / Library - log.vdebug("onedrive.deleteById generated an error response when attempting to delete object by item id"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; + + if (directoryFoundOnline) { + // We found the folder, no need to continue searching nextLink data + break; + } + + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in topLevelChildren) { + // Update nextLink to next changeSet bundle + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + nextLink = topLevelChildren["@odata.nextLink"].str; + } else break; } } } - // delete the reference in the local database - itemdb.deleteById(item.driveId, item.id); - if (item.remoteId != null) { - // If the item is a remote item, delete the reference in the local database - itemdb.deleteById(item.remoteDriveId, item.remoteId); - } - } - } - - // get the children of an item id from the database - private Item[] getChildren(string driveId, string id) - { - Item[] children; - children ~= itemdb.selectChildren(driveId, id); - foreach (Item child; children) { - if (child.type != ItemType.file) { - // recursively get the children of this child - children ~= getChildren(child.driveId, child.id); - } - } - return children; - } - - // update the item's last modified time - private void uploadLastModifiedTime(const(char)[] driveId, const(char)[] id, const(char)[] eTag, SysTime mtime) - { - string itemModifiedTime; - itemModifiedTime = mtime.toISOExtString(); - JSONValue data = [ - "fileSystemInfo": JSONValue([ - "lastModifiedDateTime": itemModifiedTime - ]) - ]; - - JSONValue response; - try { - response = onedrive.updateById(driveId, id, data, eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 412) { - // OneDrive threw a 412 error, most likely: ETag does not match current item's value - // Retry without eTag - log.vdebug("File Metadata Update Failed - OneDrive eTag / cTag match issue"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting file time stamp update - gracefully handling error"); - string nullTag = null; - response = onedrive.updateById(driveId, id, data, nullTag); - } - } - // save the updated response from OneDrive in the database - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } - - // save item details into database - private void saveItem(JSONValue jsonItem) - { - // jsonItem has to be a valid object - if (jsonItem.type() == JSONType.object){ - // Check if the response JSON has an 'id', otherwise makeItem() fails with 'Key not found: id' - if (hasId(jsonItem)) { - // Are we in a --upload-only & --remove-source-files scenario? - // We do not want to add the item to the database in this situation as there is no local reference to the file post file deletion - // If the item is a directory, we need to add this to the DB, if this is a file, we dont add this, the parent path is not in DB, thus any new files in this directory are not added - if ((uploadOnly) && (localDeleteAfterUpload) && (isItemFile(jsonItem))) { - // Log that we skipping adding item to the local DB and the reason why - log.vdebug("Skipping adding to database as --upload-only & --remove-source-files configured"); - } else { - // What is the JSON item we are trying to create a DB record with? - log.vdebug("Creating DB item from this JSON: ", jsonItem); - // Takes a JSON input and formats to an item which can be used by the database - Item item = makeItem(jsonItem); - // Add to the local database - log.vdebug("Adding to database: ", item); - itemdb.upsert(item); + // If we did not find the folder, we need to create this folder + if (!directoryFoundOnline) { + // Folder not found online + // Set any response to be an invalid JSON item + getPathDetailsAPIResponse = null; + // Was there a POSIX issue? + if (!posixIssue) { + // No POSIX issue + if (createPathIfMissing) { + // Create this path as it is missing on OneDrive online and there is no POSIX issue with a 'case-insensitive match' + addLogEntry("FOLDER NOT FOUND ONLINE AND WE ARE REQUESTED TO CREATE IT", ["debug"]); + addLogEntry("Create folder on this drive: " ~ parentDetails.driveId, ["debug"]); + addLogEntry("Create folder as a child on this object: " ~ parentDetails.id, ["debug"]); + addLogEntry("Create this folder name: " ~ thisFolderName, ["debug"]); + + // Generate the JSON needed to create the folder online + JSONValue newDriveItem = [ + "name": JSONValue(thisFolderName), + "folder": parseJSON("{}") + ]; - // If we have a remote drive ID, add this to our list of known drive id's - if (!item.remoteDriveId.empty) { - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, item.remoteDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= item.remoteDriveId; + JSONValue createByIdAPIResponse; + // Submit the creation request + // Fix for https://github.com/skilion/onedrive/issues/356 + if (!dryRun) { + try { + // Attempt to create a new folder on the configured parent driveId & parent id + createByIdAPIResponse = queryOneDriveForSpecificPath.createById(parentDetails.driveId, parentDetails.id, newDriveItem); + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(createByIdAPIResponse); + // Set getPathDetailsAPIResponse to createByIdAPIResponse + getPathDetailsAPIResponse = createByIdAPIResponse; + } catch (OneDriveException e) { + // 409 - API Race Condition + if (e.httpStatusCode == 409) { + // When we attempted to create it, OneDrive responded that it now already exists + addLogEntry("OneDrive reported that " ~ thisFolderName ~ " already exists .. OneDrive API race condition", ["verbose"]); + } else { + // some other error from OneDrive was returned - display what it is + addLogEntry("OneDrive generated an error when creating this path: " ~ thisFolderName); + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } + } + } else { + // Simulate a successful 'directory create' & save it to the dryRun database copy + // The simulated response has to pass 'makeItem' as part of saveItem + auto fakeResponse = createFakeResponse(thisNewPathToSearch); + // Save item to the database + saveItem(fakeResponse); } } } - } else { - // log error - log.error("ERROR: OneDrive response missing required 'id' element"); - log.error("ERROR: ", jsonItem); - } - } else { - // log error - log.error("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - } - } - - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_move - // This function is only called in monitor mode when an move event is coming from - // inotify and we try to move the item. - void uploadMoveItem(string from, string to) - { - log.log("Moving ", from, " to ", to); - - // 'to' file validation .. is the 'to' file valid for upload? - if (isSymlink(to)) { - // if config says so we skip all symlinked items - if (cfg.getValueBool("skip_symlinks")) { - log.vlog("Skipping item - skip symbolic links configured: ", to); - return; - - } - // skip unexisting symbolic links - else if (!exists(readLink(to))) { - log.logAndNotify("Skipping item - invalid symbolic link: ", to); - return; } } - // Check against Microsoft OneDrive restriction and limitations about Windows naming files - if (!isValidName(to)) { - log.logAndNotify("Skipping item - invalid name (Microsoft Naming Convention): ", to); - return; - } + // Shutdown API instance + queryOneDriveForSpecificPath.shutdown(); + // Free object and memory + object.destroy(queryOneDriveForSpecificPath); - // Check for bad whitespace items - if (!containsBadWhiteSpace(to)) { - log.logAndNotify("Skipping item - invalid name (Contains an invalid whitespace item): ", to); - return; - } + // Output our search results + addLogEntry("queryOneDriveForSpecificPathAndCreateIfMissing.getPathDetailsAPIResponse = " ~ to!string(getPathDetailsAPIResponse), ["debug"]); + return getPathDetailsAPIResponse; + } + + // Delete an item by it's path + // This function is only used in --monitor mode and --remove-directory directive + void deleteByPath(string path) { - // Check for HTML ASCII Codes as part of file name - if (!containsASCIIHTMLCodes(to)) { - log.logAndNotify("Skipping item - invalid name (Contains HTML ASCII Code): ", to); - return; - } + // function variables + Item dbItem; - // 'to' file has passed file validation - Item fromItem, toItem, parentItem; - if (!itemdb.selectByPath(from, defaultDriveId, fromItem)) { - if (cfg.getValueBool("skip_dotfiles") && isDotFile(to)){ - log.log("Skipping upload due to skip_dotfile = true"); - return; - } else { - uploadNewFile(to); - return; - } - } - if (fromItem.parentId == null) { - // the item is a remote folder, need to do the operation on the parent - enforce(itemdb.selectByPathWithoutRemote(from, defaultDriveId, fromItem)); - } - if (itemdb.selectByPath(to, defaultDriveId, toItem)) { - // the destination has been overwritten - uploadDeleteItem(toItem, to); - } - if (!itemdb.selectByPath(dirName(to), defaultDriveId, parentItem)) { - // the parent item is not in the database - - // is the destination a .folder that is being skipped? - if (cfg.getValueBool("skip_dotfiles")) { - if (isDotFile(dirName(to))) { - // target location is a .folder - log.vdebug("Target location is excluded from sync due to skip_dotfiles = true"); - // item will have been moved locally, but as this is now to a location that is not synced, needs to be removed from OneDrive - log.log("Item has been moved to a location that is excluded from sync operations. Removing item from OneDrive"); - uploadDeleteItem(fromItem, from); - return; - } - } - - // some other error - throw new SyncException("Can't move an item to an unsynced directory"); - } - if (cfg.getValueBool("skip_dotfiles") && isDotFile(to)){ - log.log("Removing item from OneDrive due to skip_dotfiles = true"); - uploadDeleteItem(fromItem, from); - return; - } - if (fromItem.driveId != parentItem.driveId) { - // items cannot be moved between drives - uploadDeleteItem(fromItem, from); - uploadNewFile(to); - } else { - if (!exists(to)) { - log.vlog("uploadMoveItem target has disappeared: ", to); - return; - } - SysTime mtime = timeLastModified(to).toUTC(); - JSONValue diff = [ - "name": JSONValue(baseName(to)), - "parentReference": JSONValue([ - "id": parentItem.id - ]), - "fileSystemInfo": JSONValue([ - "lastModifiedDateTime": mtime.toISOExtString() - ]) - ]; - - // Perform the move operation on OneDrive - JSONValue response; - try { - response = onedrive.updateById(fromItem.driveId, fromItem.id, diff, fromItem.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 412) { - // OneDrive threw a 412 error, most likely: ETag does not match current item's value - // Retry without eTag - log.vdebug("File Move Failed - OneDrive eTag / cTag match issue"); - log.vlog("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error"); - string nullTag = null; - // move the file but without the eTag - response = onedrive.updateById(fromItem.driveId, fromItem.id, diff, nullTag); - } - } - // save the move response from OneDrive in the database - // Is the response a valid JSON object - validation checking done in saveItem - saveItem(response); - } - } - - // delete an item by it's path - void deleteByPath(const(string) path) - { - Item item; // Need to check all driveid's we know about, not just the defaultDriveId bool itemInDB = false; foreach (searchDriveId; driveIDsArray) { - if (itemdb.selectByPath(path, searchDriveId, item)) { + if (itemDB.selectByPath(path, searchDriveId, dbItem)) { // item was found in the DB itemInDB = true; break; } } + + // Was the item found in the database? if (!itemInDB) { - throw new SyncException("The item to delete is not in the local database"); + // path to delete is not in the local database .. + // was this a --remove-directory attempt? + if (!appConfig.getValueBool("monitor")) { + // --remove-directory deletion attempt + addLogEntry("The item to delete is not in the local database - unable to delete online"); + return; + } else { + // normal use .. --monitor being used + throw new SyncException("The item to delete is not in the local database"); + } } - if (item.parentId == null) { + // This needs to be enforced as we have to know the parent id of the object being deleted + if (dbItem.parentId == null) { // the item is a remote folder, need to do the operation on the parent - enforce(itemdb.selectByPathWithoutRemote(path, defaultDriveId, item)); + enforce(itemDB.selectByPathIncludingRemoteItems(path, appConfig.defaultDriveId, dbItem)); } + try { if (noRemoteDelete) { // do not process remote delete - log.vlog("Skipping remote delete as --upload-only & --no-remote-delete configured"); + addLogEntry("Skipping remote delete as --upload-only & --no-remote-delete configured", ["verbose"]); } else { - uploadDeleteItem(item, path); + uploadDeletedItem(dbItem, path); } } catch (OneDriveException e) { if (e.httpStatusCode == 404) { - log.log(e.msg); + addLogEntry(e.msg); } else { // display what the error is displayOneDriveErrorMessage(e.msg, getFunctionName!({})); @@ -6178,35 +6529,210 @@ final class SyncEngine } } - // move a OneDrive folder from one name to another - void moveByPath(const(string) source, const(string) destination) - { - log.vlog("Moving remote folder: ", source, " -> ", destination); - - // Source and Destination are relative to ~/OneDrive - string sourcePath = source; - string destinationBasePath = dirName(destination).idup; - - // if destinationBasePath == '.' then destinationBasePath needs to be "" - if (destinationBasePath == ".") { - destinationBasePath = ""; + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_move + // This function is only called in monitor mode when an move event is coming from + // inotify and we try to move the item. + void uploadMoveItem(string oldPath, string newPath) { + // Log that we are doing a move + addLogEntry("Moving " ~ oldPath ~ " to " ~ newPath); + // Is this move unwanted? + bool unwanted = false; + // Item variables + Item oldItem, newItem, parentItem; + + // This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly + // Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252 + if (!unwanted) { + if(!isValid(newPath)) { + // Path is not valid according to https://dlang.org/phobos/std_encoding.html + addLogEntry("Skipping item - invalid character encoding sequence: " ~ newPath, ["info", "notify"]); + unwanted = true; + } + } + + // Check this path against the Client Side Filtering Rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + if (!unwanted) { + unwanted = checkPathAgainstClientSideFiltering(newPath); + } + + // Check this path against the Microsoft Naming Conventions & Restristions + // - Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders + // - Check path for bad whitespace items + // - Check path for HTML ASCII Codes + // - Check path for ASCII Control Codes + if (!unwanted) { + unwanted = checkPathAgainstMicrosoftNamingRestrictions(newPath); + } + + // 'newPath' has passed client side filtering validation + if (!unwanted) { + + if (!itemDB.selectByPath(oldPath, appConfig.defaultDriveId, oldItem)) { + // The old path|item is not synced with the database, upload as a new file + addLogEntry("Moved local item was not in-sync with local databse - uploading as new item"); + uploadNewFile(newPath); + return; + } + + if (oldItem.parentId == null) { + // the item is a remote folder, need to do the operation on the parent + enforce(itemDB.selectByPathIncludingRemoteItems(oldPath, appConfig.defaultDriveId, oldItem)); + } + + if (itemDB.selectByPath(newPath, appConfig.defaultDriveId, newItem)) { + // the destination has been overwritten + addLogEntry("Moved local item overwrote an existing item - deleting old online item"); + uploadDeletedItem(newItem, newPath); + } + + if (!itemDB.selectByPath(dirName(newPath), appConfig.defaultDriveId, parentItem)) { + // the parent item is not in the database + throw new SyncException("Can't move an item to an unsynced directory"); + } + + if (oldItem.driveId != parentItem.driveId) { + // items cannot be moved between drives + uploadDeletedItem(oldItem, oldPath); + + // what sort of move is this? + if (isFile(newPath)) { + // newPath is a file + uploadNewFile(newPath); + } else { + // newPath is a directory + scanLocalFilesystemPathForNewData(newPath); + } + } else { + if (!exists(newPath)) { + // is this --monitor use? + if (appConfig.getValueBool("monitor")) { + addLogEntry("uploadMoveItem target has disappeared: " ~ newPath, ["verbose"]); + return; + } + } + + // Configure the modification JSON item + SysTime mtime; + if (appConfig.getValueBool("monitor")) { + // Use the newPath modified timestamp + mtime = timeLastModified(newPath).toUTC(); + } else { + // Use the current system time + mtime = Clock.currTime().toUTC(); + } + + JSONValue data = [ + "name": JSONValue(baseName(newPath)), + "parentReference": JSONValue([ + "id": parentItem.id + ]), + "fileSystemInfo": JSONValue([ + "lastModifiedDateTime": mtime.toISOExtString() + ]) + ]; + + // Perform the move operation on OneDrive + JSONValue response; + + // Create a new API Instance for this thread and initialise it + OneDriveApi movePathOnlineApiInstance; + movePathOnlineApiInstance = new OneDriveApi(appConfig); + movePathOnlineApiInstance.initialise(); + + try { + response = movePathOnlineApiInstance.updateById(oldItem.driveId, oldItem.id, data, oldItem.eTag); + } catch (OneDriveException e) { + if (e.httpStatusCode == 412) { + // OneDrive threw a 412 error, most likely: ETag does not match current item's value + // Retry without eTag + addLogEntry("File Move Failed - OneDrive eTag / cTag match issue", ["debug"]); + addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error", ["verbose"]); + string nullTag = null; + // move the file but without the eTag + response = movePathOnlineApiInstance.updateById(oldItem.driveId, oldItem.id, data, nullTag); + } + } + // Shutdown API instance + movePathOnlineApiInstance.shutdown(); + // Free object and memory + object.destroy(movePathOnlineApiInstance); + + // save the move response from OneDrive in the database + // Is the response a valid JSON object - validation checking done in saveItem + saveItem(response); + } + } else { + // Moved item is unwanted + addLogEntry("Item has been moved to a location that is excluded from sync operations. Removing item from OneDrive"); + uploadDeletedItem(oldItem, oldPath); + } + } + + // Perform integrity validation of the file that was uploaded + bool performUploadIntegrityValidationChecks(JSONValue uploadResponse, string localFilePath, ulong localFileSize) { + + bool integrityValid = false; + + if (!disableUploadValidation) { + // Integrity validation has not been disabled (this is the default so we are always integrity checking our uploads) + if (uploadResponse.type() == JSONType.object) { + // Provided JSON is a valid JSON + ulong uploadFileSize = uploadResponse["size"].integer; + string uploadFileHash = uploadResponse["file"]["hashes"]["quickXorHash"].str; + string localFileHash = computeQuickXorHash(localFilePath); + + if ((localFileSize == uploadFileSize) && (localFileHash == uploadFileHash)) { + // Uploaded file integrity intact + addLogEntry("Uploaded local file matches reported online size and hash values", ["debug"]); + integrityValid = true; + } else { + // Upload integrity failure .. what failed? + // There are 2 scenarios where this happens: + // 1. Failed Transfer + // 2. Upload file is going to a SharePoint Site, where Microsoft enriches the file with additional metadata with no way to disable + addLogEntry("WARNING: Uploaded file integrity failure for: " ~ localFilePath, ["info", "notify"]); + + // What integrity failed - size? + if (localFileSize != uploadFileSize) { + addLogEntry("WARNING: Uploaded file integrity failure - Size Mismatch", ["verbose"]); + } + // What integrity failed - hash? + if (localFileHash != uploadFileHash) { + addLogEntry("WARNING: Uploaded file integrity failure - Hash Mismatch", ["verbose"]); + } + + // What account type is this? + if (appConfig.accountType != "personal") { + // Not a personal account, thus the integrity failure is most likely due to SharePoint + addLogEntry("CAUTION: Microsoft OneDrive when using SharePoint as a backend enhances files after you upload them, which means this file may now have technical differences from your local copy, resulting in a data integrity issue.", ["verbose"]); + addLogEntry("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details", ["verbose"]); + } + // How can this be disabled? + addLogEntry("To disable the integrity checking of uploaded files use --disable-upload-validation"); + } + } else { + addLogEntry("Upload file validation unable to be performed: input JSON was invalid"); + addLogEntry("WARNING: Skipping upload integrity check for: " ~ localFilePath); + } + } else { + // We are bypassing integrity checks due to --disable-upload-validation + addLogEntry("Upload file validation disabled due to --disable-upload-validation", ["debug"]); + addLogEntry("WARNING: Skipping upload integrity check for: " ~ localFilePath, ["info", "notify"]); } - string newFolderName = baseName(destination).idup; - string destinationPathString = "/drive/root:/" ~ destinationBasePath; - - // Build up the JSON changes - JSONValue moveData = ["name": newFolderName]; - JSONValue destinationPath = ["path": destinationPathString]; - moveData["parentReference"] = destinationPath; - - // Make the change on OneDrive - auto res = onedrive.moveByPath(sourcePath, moveData); + // Is the file integrity online valid? + return integrityValid; } - // Query Office 365 SharePoint Shared Library site to obtain it's Drive ID - void querySiteCollectionForDriveID(string o365SharedLibraryName) - { + // Query Office 365 SharePoint Shared Library site name to obtain it's Drive ID + void querySiteCollectionForDriveID(string sharepointLibraryNameToQuery) { // Steps to get the ID: // 1. Query https://graph.microsoft.com/v1.0/sites?search= with the name entered // 2. Evaluate the response. A valid response will contain the description and the id. If the response comes back with nothing, the site name cannot be found or no access @@ -6221,62 +6747,69 @@ final class SyncEngine string nextLink; string[] siteSearchResults; + // Create a new API Instance for this thread and initialise it + OneDriveApi querySharePointLibraryNameApiInstance; + querySharePointLibraryNameApiInstance = new OneDriveApi(appConfig); + querySharePointLibraryNameApiInstance.initialise(); + // The account type must not be a personal account type - if (accountType == "personal"){ - log.error("ERROR: A OneDrive Personal Account cannot be used with --get-O365-drive-id. Please re-authenticate your client using a OneDrive Business Account."); + if (appConfig.accountType == "personal") { + addLogEntry("ERROR: A OneDrive Personal Account cannot be used with --get-sharepoint-drive-id. Please re-authenticate your client using a OneDrive Business Account."); return; } // What query are we performing? - log.log("Office 365 Library Name Query: ", o365SharedLibraryName); + addLogEntry(); + addLogEntry("Office 365 Library Name Query: " ~ sharepointLibraryNameToQuery); for (;;) { try { - siteQuery = onedrive.o365SiteSearch(nextLink); + siteQuery = querySharePointLibraryNameApiInstance.o365SiteSearch(nextLink); } catch (OneDriveException e) { - log.error("ERROR: Query of OneDrive for Office 365 Library Name failed"); + addLogEntry("ERROR: Query of OneDrive for Office 365 Library Name failed"); // Forbidden - most likely authentication scope needs to be updated if (e.httpStatusCode == 403) { - log.error("ERROR: Authentication scope needs to be updated. Use --reauth and re-authenticate client."); + addLogEntry("ERROR: Authentication scope needs to be updated. Use --reauth and re-authenticate client."); return; } // Requested resource cannot be found if (e.httpStatusCode == 404) { string siteSearchUrl; if (nextLink.empty) { - siteSearchUrl = onedrive.getSiteSearchUrl(); + siteSearchUrl = querySharePointLibraryNameApiInstance.getSiteSearchUrl(); } else { siteSearchUrl = nextLink; } // log the error - log.error("ERROR: Your OneDrive Account and Authentication Scope cannot access this OneDrive API: ", siteSearchUrl); - log.error("ERROR: To resolve, please discuss this issue with whomever supports your OneDrive and SharePoint environment."); + addLogEntry("ERROR: Your OneDrive Account and Authentication Scope cannot access this OneDrive API: " ~ siteSearchUrl); + addLogEntry("ERROR: To resolve, please discuss this issue with whomever supports your OneDrive and SharePoint environment."); return; } // HTTP request returned status code 429 (Too Many Requests) if (e.httpStatusCode == 429) { // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); + handleOneDriveThrottleRequest(querySharePointLibraryNameApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children", ["debug"]); } // HTTP request returned status code 504 (Gateway Timeout) or 429 retry if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { // re-try the specific changes queries if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query Sharepoint Sites - retrying applicable request"); - log.vdebug("siteQuery = onedrive.o365SiteSearch(nextLink) previously threw an error - retrying"); + addLogEntry("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query Sharepoint Sites - retrying applicable request"); + addLogEntry("siteQuery = onedrive.o365SiteSearch(nextLink) previously threw an error - retrying", ["debug"]); + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); Thread.sleep(dur!"seconds"(30)); } // re-try original request - retried for 429 and 504 try { - log.vdebug("Retrying Query: siteQuery = onedrive.o365SiteSearch(nextLink)"); - siteQuery = onedrive.o365SiteSearch(nextLink); - log.vdebug("Query 'siteQuery = onedrive.o365SiteSearch(nextLink)' performed successfully on re-try"); + addLogEntry("Retrying Query: siteQuery = onedrive.o365SiteSearch(nextLink)", ["debug"]); + siteQuery = querySharePointLibraryNameApiInstance.o365SiteSearch(nextLink); + addLogEntry("Query 'siteQuery = onedrive.o365SiteSearch(nextLink)' performed successfully on re-try", ["debug"]); } catch (OneDriveException e) { // display what the error is - log.vdebug("Query Error: siteQuery = onedrive.o365SiteSearch(nextLink) on re-try after delay"); + addLogEntry("Query Error: siteQuery = onedrive.o365SiteSearch(nextLink) on re-try after delay", ["debug"]); // error was not a 504 this time displayOneDriveErrorMessage(e.msg, getFunctionName!({})); return; @@ -6291,23 +6824,23 @@ final class SyncEngine // is siteQuery a valid JSON object & contain data we can use? if ((siteQuery.type() == JSONType.object) && ("value" in siteQuery)) { // valid JSON object - log.vdebug("O365 Query Response: ", siteQuery); + addLogEntry("O365 Query Response: " ~ to!string(siteQuery), ["debug"]); foreach (searchResult; siteQuery["value"].array) { - // Need an 'exclusive' match here with o365SharedLibraryName as entered - log.vdebug("Found O365 Site: ", searchResult); + // Need an 'exclusive' match here with sharepointLibraryNameToQuery as entered + addLogEntry("Found O365 Site: " ~ to!string(searchResult), ["debug"]); // 'displayName' and 'id' have to be present in the search result record in order to query the site if (("displayName" in searchResult) && ("id" in searchResult)) { - if (o365SharedLibraryName == searchResult["displayName"].str){ + if (sharepointLibraryNameToQuery == searchResult["displayName"].str){ // 'displayName' matches search request site_id = searchResult["id"].str; JSONValue siteDriveQuery; try { - siteDriveQuery = onedrive.o365SiteDrives(site_id); + siteDriveQuery = querySharePointLibraryNameApiInstance.o365SiteDrives(site_id); } catch (OneDriveException e) { - log.error("ERROR: Query of OneDrive for Office Site ID failed"); + addLogEntry("ERROR: Query of OneDrive for Office Site ID failed"); // display what the error is displayOneDriveErrorMessage(e.msg, getFunctionName!({})); return; @@ -6319,7 +6852,7 @@ final class SyncEngine foreach (driveResult; siteDriveQuery["value"].array) { // Display results writeln("-----------------------------------------------"); - log.vdebug("Site Details: ", driveResult); + addLogEntry("Site Details: " ~ to!string(driveResult), ["debug"]); found = true; writeln("Site Name: ", searchResult["displayName"].str); writeln("Library Name: ", driveResult["name"].str); @@ -6330,8 +6863,8 @@ final class SyncEngine writeln("-----------------------------------------------"); } else { // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); + addLogEntry("ERROR: There was an error performing this operation on Microsoft OneDrive"); + addLogEntry("ERROR: Increase logging verbosity to assist determining why."); return; } } @@ -6345,13 +6878,13 @@ final class SyncEngine if ("id" in searchResult) idAvailable = true; // Display error details for this site data - writeln(); - log.error("ERROR: SharePoint Site details not provided for: ", siteNameAvailable); - log.error("ERROR: The SharePoint Site results returned from OneDrive API do not contain the required items to match. Please check your permissions with your site administrator."); - log.error("ERROR: Your site security settings is preventing the following details from being accessed: 'displayName' or 'id'"); - log.vlog(" - Is 'displayName' available = ", displayNameAvailable); - log.vlog(" - Is 'id' available = ", idAvailable); - log.error("ERROR: To debug this further, please increase verbosity (--verbose or --verbose --verbose) to provide further insight as to what details are actually being returned."); + addLogEntry(); + addLogEntry("ERROR: SharePoint Site details not provided for: " ~ siteNameAvailable); + addLogEntry("ERROR: The SharePoint Site results returned from OneDrive API do not contain the required items to match. Please check your permissions with your site administrator."); + addLogEntry("ERROR: Your site security settings is preventing the following details from being accessed: 'displayName' or 'id'"); + addLogEntry(" - Is 'displayName' available = " ~ to!string(displayNameAvailable), ["verbose"]); + addLogEntry(" - Is 'id' available = " ~ to!string(idAvailable), ["verbose"]); + addLogEntry("ERROR: To debug this further, please increase verbosity (--verbose or --verbose --verbose) to provide further insight as to what details are actually being returned."); } } @@ -6372,15 +6905,15 @@ final class SyncEngine siteSearchResults ~= siteSearchResultsEntry; } else { // displayName and id unavailable, display in debug log the entry - log.vdebug("Bad SharePoint Data for site: ", searchResult); + addLogEntry("Bad SharePoint Data for site: " ~ to!string(searchResult), ["debug"]); } } } } } else { // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); + addLogEntry("ERROR: There was an error performing this operation on Microsoft OneDrive"); + addLogEntry("ERROR: Increase logging verbosity to assist determining why."); return; } @@ -6389,914 +6922,623 @@ final class SyncEngine if ("@odata.nextLink" in siteQuery) { // Update nextLink to next set of SharePoint library names nextLink = siteQuery["@odata.nextLink"].str; - log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); } else break; } // Was the intended target found? if(!found) { - writeln(); - log.error("ERROR: The requested SharePoint site could not be found. Please check it's name and your permissions to access the site."); + + // Was the search a wildcard? + if (sharepointLibraryNameToQuery != "*") { + // Only print this out if the search was not a wildcard + addLogEntry(); + addLogEntry("ERROR: The requested SharePoint site could not be found. Please check it's name and your permissions to access the site."); + } // List all sites returned to assist user - writeln(); - log.log("The following SharePoint site names were returned:"); + addLogEntry(); + addLogEntry("The following SharePoint site names were returned:"); foreach (searchResultEntry; siteSearchResults) { // list the display name that we use to match against the user query - log.log(searchResultEntry); + addLogEntry(searchResultEntry); } } - } - - // Create an anonymous read-only shareable link for an existing file on OneDrive - void createShareableLinkForFile(string filePath, bool writeablePermissions) - { - JSONValue onedrivePathDetails; - JSONValue createShareableLinkResponse; - string driveId; - string itemId; - string fileShareLink; - - // Get the path details from OneDrive - try { - onedrivePathDetails = onedrive.getPathDetails(filePath); // Returns a JSON String for the OneDrive Path - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(filePath); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // Requested path could not be found - log.error("ERROR: The requested path to query was not found on OneDrive"); - log.error("ERROR: Cannot create a shareable link for a file that does not exist on OneDrive"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling queryDriveForChanges(path);"); - createShareableLinkForFile(filePath, writeablePermissions); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying request"); - // Retry original request by calling function again to avoid replicating any further error handling - createShareableLinkForFile(filePath, writeablePermissions); - // return back to original call - return; - } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } - // Was a valid JSON response received? - if (onedrivePathDetails.type() == JSONType.object) { - // valid JSON response for the file was received - // Configure the required variables - driveId = onedrivePathDetails["parentReference"]["driveId"].str; - itemId = onedrivePathDetails["id"].str; - - // What sort of shareable link is required? - JSONValue accessScope; - if (writeablePermissions) { - // configure the read-write access scope - accessScope = [ - "type": "edit", - "scope": "anonymous" - ]; - } else { - // configure the read-only access scope (default) - accessScope = [ - "type": "view", - "scope": "anonymous" - ]; - } - - // Create the shareable file link - createShareableLinkResponse = onedrive.createShareableLink(driveId, itemId, accessScope); - if ((createShareableLinkResponse.type() == JSONType.object) && ("link" in createShareableLinkResponse)) { - // Extract the file share link from the JSON response - fileShareLink = createShareableLinkResponse["link"]["webUrl"].str; - writeln("File Shareable Link: ", fileShareLink); - if (writeablePermissions) { - writeln("Shareable Link has read-write permissions - use and provide with caution"); - } - - } else { - // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - return; - } - } else { - // not a valid JSON object - log.error("ERROR: There was an error performing this operation on OneDrive"); - log.error("ERROR: Increase logging verbosity to assist determining why."); - return; - } + // Shutdown API instance + querySharePointLibraryNameApiInstance.shutdown(); + // Free object and memory + object.destroy(querySharePointLibraryNameApiInstance); } - // Query OneDrive for file details of a given path - void queryOneDriveForFileDetails(string localFilePath, string syncDir, string outputType) - { - // Query if file is valid locally - if (exists(localFilePath)) { - // File exists locally, does it exist in the database - // Path needs to be relative to sync_dir path - Item item; - string[] distinctDriveIds = itemdb.selectDistinctDriveIds(); - string relativePath = relativePath(localFilePath, syncDir); - bool fileInDB = false; - foreach (searchDriveId; distinctDriveIds) { - if (itemdb.selectByPath(relativePath, searchDriveId, item)) { - // File is in the local database cache - fileInDB = true; - JSONValue fileDetails; - try { - fileDetails = onedrive.getFileDetails(item.driveId, item.id); - } catch (OneDriveException e) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - - // debug output of response - log.vdebug("API Response: ", fileDetails); - - // What sort of response to we generate - // --get-file-link response - if (outputType == "URL") { - if ((fileDetails.type() == JSONType.object) && ("webUrl" in fileDetails)) { - // Valid JSON object - writeln(fileDetails["webUrl"].str); - } - } - - // --modified-by response - if (outputType == "ModifiedBy") { - if ((fileDetails.type() == JSONType.object) && ("lastModifiedBy" in fileDetails)) { - // Valid JSON object - writeln("Last modified: ", fileDetails["lastModifiedDateTime"].str); - writeln("Last modified by: ", fileDetails["lastModifiedBy"]["user"]["displayName"].str); - // if 'email' provided, add this to the output - if ("email" in fileDetails["lastModifiedBy"]["user"]) { - writeln("Email Address: ", fileDetails["lastModifiedBy"]["user"]["email"].str); - } - } - } - } - } - // was path found? - if (!fileInDB) { - // File has not been synced with OneDrive - log.error("Path has not been synced with OneDrive: ", localFilePath); - } - } else { - // File does not exist locally - log.error("Path not found on local system: ", localFilePath); - } - } + // Query the sync status of the client and the local system + void queryOneDriveForSyncStatus(string pathToQueryStatusOn) { - // Query the OneDrive 'drive' to determine if we are 'in sync' or if there are pending changes - void queryDriveForChanges(const(string) path) - { + // Query the account driveId and rootId to get the /delta JSON information + // Process that JSON data for relevancy // Function variables - int validChanges = 0; - long downloadSize = 0; - string driveId; - string folderId; - string deltaLink; - string thisItemId; - string thisItemParentPath; - string syncFolderName; - string syncFolderPath; - string syncFolderChildPath; - JSONValue changes; - JSONValue onedrivePathDetails; - - // Get the path details from OneDrive - try { - onedrivePathDetails = onedrive.getPathDetails(path); // Returns a JSON String for the OneDrive Path - } catch (OneDriveException e) { - log.vdebug("onedrivePathDetails = onedrive.getPathDetails(path); generated a OneDriveException"); - if (e.httpStatusCode == 404) { - // Requested path could not be found - log.error("ERROR: The requested path to query was not found on OneDrive"); - return; - } - - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling queryDriveForChanges(path);"); - queryDriveForChanges(path); - // return back to original call - return; - } - - if (e.httpStatusCode == 504) { - // HTTP request returned status code 504 (Gateway Timeout) - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' - retrying request"); - // Retry original request by calling function again to avoid replicating any further error handling - queryDriveForChanges(path); - // return back to original call - return; - } else { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } + ulong downloadSize = 0; + string deltaLink = null; + string driveIdToQuery = appConfig.defaultDriveId; + string itemIdToQuery = appConfig.defaultRootId; + JSONValue deltaChanges; - if(isItemRemote(onedrivePathDetails)){ - // remote changes - driveId = onedrivePathDetails["remoteItem"]["parentReference"]["driveId"].str; // Should give something like 66d53be8a5056eca - folderId = onedrivePathDetails["remoteItem"]["id"].str; // Should give something like BC7D88EC1F539DCF!107 - syncFolderName = onedrivePathDetails["name"].str; - // A remote drive item will not have ["parentReference"]["path"] - syncFolderPath = ""; - syncFolderChildPath = ""; - } else { - driveId = defaultDriveId; - folderId = onedrivePathDetails["id"].str; // Should give something like 12345ABCDE1234A1!101 - syncFolderName = onedrivePathDetails["name"].str; - if (hasParentReferencePath(onedrivePathDetails)) { - syncFolderPath = onedrivePathDetails["parentReference"]["path"].str; - syncFolderChildPath = syncFolderPath ~ "/" ~ syncFolderName ~ "/"; - } else { - // root drive item will not have ["parentReference"]["path"] - syncFolderPath = ""; - syncFolderChildPath = ""; - } - } + // Array of JSON items + JSONValue[] jsonItemsArray; - // Query Database for the deltaLink - deltaLink = itemdb.getDeltaLink(driveId, folderId); + // Query Database for a potential deltaLink starting point + deltaLink = itemDB.getDeltaLink(driveIdToQuery, itemIdToQuery); - const(char)[] idToQuery; - if (driveId == defaultDriveId) { - // The drive id matches our users default drive id - idToQuery = defaultRootId.dup; - } else { - // The drive id does not match our users default drive id - // Potentially the 'path id' we are requesting the details of is a Shared Folder (remote item) - // Use folderId - idToQuery = folderId; - } + // Log what we are doing + addLogEntry("Querying the change status of Drive ID: " ~ driveIdToQuery ~ " .", ["consoleOnlyNoNewLine"]); - // Query OneDrive changes - try { - changes = onedrive.viewChangesByItemId(driveId, idToQuery, deltaLink); - } catch (OneDriveException e) { - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - calling queryDriveForChanges(path);"); - queryDriveForChanges(path); - // return back to original call - return; - } else { - // OneDrive threw an error - log.vdebug("Error query: changes = onedrive.viewChangesById(driveId, idToQuery, deltaLink)"); - log.vdebug("OneDrive threw an error when querying for these changes:"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("Previous deltaLink: ", deltaLink); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return; - } - } + // Query the OenDrive API using the applicable details, following nextLink if applicable + // Create a new API Instance for querying /delta and initialise it + OneDriveApi getDeltaQueryOneDriveApiInstance; + getDeltaQueryOneDriveApiInstance = new OneDriveApi(appConfig); + getDeltaQueryOneDriveApiInstance.initialise(); - // Are there any changes on OneDrive? - if (count(changes["value"].array) != 0) { - // Were we given a remote path to check if we are in sync for, or the root? - if (path != "/") { - // we were given a directory to check, we need to validate the list of changes against this path only - foreach (item; changes["value"].array) { - // Is this change valid for the 'path' we are checking? - if (hasParentReferencePath(item)) { - thisItemId = item["parentReference"]["id"].str; - thisItemParentPath = item["parentReference"]["path"].str; - } else { - thisItemId = item["id"].str; - // Is the defaultDriveId == driveId - if (driveId == defaultDriveId){ - // 'root' items will not have ["parentReference"]["path"] - if (isItemRoot(item)){ - thisItemParentPath = ""; + for (;;) { + // Add a processing '.' + addLogEntry(".", ["consoleOnlyNoNewLine"]); + + // Get the /delta changes via the OneDrive API + // getDeltaChangesByItemId has the re-try logic for transient errors + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, deltaLink, getDeltaQueryOneDriveApiInstance); + + // If the initial deltaChanges response is an invalid JSON object, keep trying .. + if (deltaChanges.type() != JSONType.object) { + while (deltaChanges.type() != JSONType.object) { + // Handle the invalid JSON response adn retry + addLogEntry("ERROR: Query of the OneDrive API via deltaChanges = getDeltaChangesByItemId() returned an invalid JSON response", ["debug"]); + deltaChanges = getDeltaChangesByItemId(driveIdToQuery, itemIdToQuery, deltaLink, getDeltaQueryOneDriveApiInstance); + } + } + + // We have a valid deltaChanges JSON array. This means we have at least 200+ JSON items to process. + // The API response however cannot be run in parallel as the OneDrive API sends the JSON items in the order in which they must be processed + foreach (onedriveJSONItem; deltaChanges["value"].array) { + // is the JSON a root object - we dont want to count this + if (!isItemRoot(onedriveJSONItem)) { + // Files are the only item that we want to calculate + if (isItemFile(onedriveJSONItem)) { + // JSON item is a file + // Is the item filtered out due to client side filtering rules? + if (!checkJSONAgainstClientSideFiltering(onedriveJSONItem)) { + // Is the path of this JSON item 'in-scope' or 'out-of-scope' ? + if (pathToQueryStatusOn != "/") { + // We need to check the path of this item against pathToQueryStatusOn + string thisItemPath = ""; + if (("path" in onedriveJSONItem["parentReference"]) != null) { + // If there is a parent reference path, try and use it + string selfBuiltPath = onedriveJSONItem["parentReference"]["path"].str ~ "/" ~ onedriveJSONItem["name"].str; + + // Check for ':' and split if present + auto splitIndex = selfBuiltPath.indexOf(":"); + if (splitIndex != -1) { + // Keep only the part after ':' + selfBuiltPath = selfBuiltPath[splitIndex + 1 .. $]; + } + + // Set thisItemPath to the self built path + thisItemPath = selfBuiltPath; + } else { + // no parent reference path available + thisItemPath = onedriveJSONItem["name"].str; + } + // can we find 'pathToQueryStatusOn' in 'thisItemPath' ? + if (canFind(thisItemPath, pathToQueryStatusOn)) { + // Add this to the array for processing + jsonItemsArray ~= onedriveJSONItem; + } } else { - thisItemParentPath = item["parentReference"]["path"].str; + // We are not doing a --single-directory check + // Add this to the array for processing + jsonItemsArray ~= onedriveJSONItem; } - } else { - // A remote drive item will not have ["parentReference"]["path"] - thisItemParentPath = ""; - } - } - - if ( (thisItemId == folderId) || (canFind(thisItemParentPath, syncFolderChildPath)) || (canFind(thisItemParentPath, folderId)) ){ - // This is a change we want count - validChanges++; - if ((isItemFile(item)) && (hasFileSize(item))) { - downloadSize = downloadSize + item["size"].integer; } } } - // Are there any valid changes? - if (validChanges != 0){ - writeln("Selected directory is out of sync with OneDrive"); - if (downloadSize > 0){ - downloadSize = downloadSize / 1000; - writeln("Approximate data to download from OneDrive: ", downloadSize, " KB"); - } - } else { - writeln("No pending remote changes - selected directory is in sync"); - } - } else { - writeln("Local directory is out of sync with OneDrive"); - foreach (item; changes["value"].array) { - if ((isItemFile(item)) && (hasFileSize(item))) { - downloadSize = downloadSize + item["size"].integer; - } - } - if (downloadSize > 0){ - downloadSize = downloadSize / 1000; - writeln("Approximate data to download from OneDrive: ", downloadSize, " KB"); - } } - } else { - writeln("No pending remote changes - in sync"); - } - } - - // Create a fake OneDrive response suitable for use with saveItem - JSONValue createFakeResponse(const(string) path) - { - import std.digest.sha; - // Generate a simulated JSON response which can be used - // At a minimum we need: - // 1. eTag - // 2. cTag - // 3. fileSystemInfo - // 4. file or folder. if file, hash of file - // 5. id - // 6. name - // 7. parent reference - - string fakeDriveId = defaultDriveId; - string fakeRootId = defaultRootId; - SysTime mtime = timeLastModified(path).toUTC(); - - // Need to update the 'fakeDriveId' & 'fakeRootId' with elements from the --dry-run database - // Otherwise some calls to validate objects will fail as the actual driveId being used is invalid - string parentPath = dirName(path); - Item databaseItem; - - if (parentPath != ".") { - // Not a 'root' parent - // For each driveid in the existing driveIDsArray - foreach (searchDriveId; driveIDsArray) { - log.vdebug("FakeResponse: searching database for: ", searchDriveId, " ", parentPath); - if (itemdb.selectByPath(parentPath, searchDriveId, databaseItem)) { - log.vdebug("FakeResponse: Found Database Item: ", databaseItem); - fakeDriveId = databaseItem.driveId; - fakeRootId = databaseItem.id; - } + + // The response may contain either @odata.deltaLink or @odata.nextLink + if ("@odata.deltaLink" in deltaChanges) { + deltaLink = deltaChanges["@odata.deltaLink"].str; + addLogEntry("Setting next deltaLink to (@odata.deltaLink): " ~ deltaLink, ["debug"]); } + + // Update deltaLink to next changeSet bundle + if ("@odata.nextLink" in deltaChanges) { + deltaLink = deltaChanges["@odata.nextLink"].str; + addLogEntry("Setting next deltaLink to (@odata.nextLink): " ~ deltaLink, ["debug"]); + } + else break; } + // Needed after printing out '....' when fetching changes from OneDrive API + addLogEntry("\n", ["consoleOnlyNoNewLine"]); - // real id / eTag / cTag are different format for personal / business account - auto sha1 = new SHA1Digest(); - ubyte[] fakedOneDriveItemValues = sha1.digest(path); - - JSONValue fakeResponse; - - if (isDir(path)) { - // path is a directory - fakeResponse = [ - "id": JSONValue(toHexString(fakedOneDriveItemValues)), - "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "fileSystemInfo": JSONValue([ - "createdDateTime": mtime.toISOExtString(), - "lastModifiedDateTime": mtime.toISOExtString() - ]), - "name": JSONValue(baseName(path)), - "parentReference": JSONValue([ - "driveId": JSONValue(fakeDriveId), - "driveType": JSONValue(accountType), - "id": JSONValue(fakeRootId) - ]), - "folder": JSONValue("") - ]; - } else { - // path is a file - // compute file hash - both business and personal responses use quickXorHash - string quickXorHash = computeQuickXorHash(path); - - fakeResponse = [ - "id": JSONValue(toHexString(fakedOneDriveItemValues)), - "cTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "eTag": JSONValue(toHexString(fakedOneDriveItemValues)), - "fileSystemInfo": JSONValue([ - "createdDateTime": mtime.toISOExtString(), - "lastModifiedDateTime": mtime.toISOExtString() - ]), - "name": JSONValue(baseName(path)), - "parentReference": JSONValue([ - "driveId": JSONValue(fakeDriveId), - "driveType": JSONValue(accountType), - "id": JSONValue(fakeRootId) - ]), - "file": JSONValue([ - "hashes":JSONValue([ - "quickXorHash": JSONValue(quickXorHash) - ]) - - ]) - ]; - } - - log.vdebug("Generated Fake OneDrive Response: ", fakeResponse); - return fakeResponse; - } - - void handleOneDriveThrottleRequest() - { - // If OneDrive sends a status code 429 then this function will be used to process the Retry-After response header which contains the value by which we need to wait - log.vdebug("Handling a OneDrive HTTP 429 Response Code (Too Many Requests)"); - // Read in the Retry-After HTTP header as set and delay as per this value before retrying the request - auto retryAfterValue = onedrive.getRetryAfterValue(); - log.vdebug("Using Retry-After Value = ", retryAfterValue); - - // HTTP request returned status code 429 (Too Many Requests) - // https://github.com/abraunegg/onedrive/issues/133 - // https://github.com/abraunegg/onedrive/issues/815 - - ulong delayBeforeRetry = 0; - if (retryAfterValue != 0) { - // Use the HTTP Response Header Value - delayBeforeRetry = retryAfterValue; - } else { - // Use a 120 second delay as a default given header value was zero - // This value is based on log files and data when determining correct process for 429 response handling - delayBeforeRetry = 120; - // Update that we are over-riding the provided value with a default - log.vdebug("HTTP Response Header retry-after value was 0 - Using a preconfigured default of: ", delayBeforeRetry); - } - - // Sleep thread as per request - log.log("Thread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); - log.log("Sleeping for ", delayBeforeRetry, " seconds"); - Thread.sleep(dur!"seconds"(delayBeforeRetry)); - - // Reset retry-after value to zero as we have used this value now and it may be changed in the future to a different value - onedrive.resetRetryAfterValue(); - } - - // Generage a /delta compatible response when using National Azure AD deployments that do not support /delta queries - // see: https://docs.microsoft.com/en-us/graph/deployments#supported-features - JSONValue generateDeltaResponse(const(char)[] driveId, const(char)[] idToQuery) - { - // JSON value which will be responded with - JSONValue deltaResponse; - // initial data - JSONValue rootData; - JSONValue driveData; - JSONValue topLevelChildren; - JSONValue[] childrenData; - string nextLink; - - // Get drive details for the provided driveId - try { - driveData = onedrive.getPathDetailsById(driveId, idToQuery); - } catch (OneDriveException e) { - log.vdebug("driveData = onedrive.getPathDetailsById(driveId, idToQuery) generated a OneDriveException"); - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - if (e.httpStatusCode == 429) { - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - retrying applicable request"); - handleOneDriveThrottleRequest(); + // Are there any JSON items to process? + if (count(jsonItemsArray) != 0) { + // There are items to process + foreach (onedriveJSONItem; jsonItemsArray.array) { + + // variables we need + string thisItemParentDriveId; + string thisItemId; + string thisItemHash; + bool existingDBEntry = false; + + // Is this file a remote item (on a shared folder) ? + if (isItemRemote(onedriveJSONItem)) { + // remote drive item + thisItemParentDriveId = onedriveJSONItem["remoteItem"]["parentReference"]["driveId"].str; + thisItemId = onedriveJSONItem["id"].str; + } else { + // standard drive item + thisItemParentDriveId = onedriveJSONItem["parentReference"]["driveId"].str; + thisItemId = onedriveJSONItem["id"].str; } - if (e.httpStatusCode == 504) { - log.vdebug("Retrying original request that generated the HTTP 504 (Gateway Timeout) - retrying applicable request"); - Thread.sleep(dur!"seconds"(30)); + + // Get the file hash + if (hasHashes(onedriveJSONItem)) { + thisItemHash = onedriveJSONItem["file"]["hashes"]["quickXorHash"].str; + + // Check if the item has been seen before + Item existingDatabaseItem; + existingDBEntry = itemDB.selectById(thisItemParentDriveId, thisItemId, existingDatabaseItem); + + if (existingDBEntry) { + // item exists in database .. do the database details match the JSON record? + if (existingDatabaseItem.quickXorHash != thisItemHash) { + // file hash is different, this will trigger a download event + downloadSize = downloadSize + onedriveJSONItem["size"].integer; + } + } else { + // item does not exist in the database + // this item has already passed client side filtering rules (skip_dir, skip_file, sync_list) + // this will trigger a download event + downloadSize = downloadSize + onedriveJSONItem["size"].integer; + } } - // Retry original request by calling function again to avoid replicating any further error handling - driveData = onedrive.getPathDetailsById(driveId, idToQuery); + } + } + + // Was anything detected that would constitute a download? + if (downloadSize > 0) { + // we have something to download + if (pathToQueryStatusOn != "/") { + writeln("The selected local directory via --single-directory is out of sync with Microsoft OneDrive"); } else { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + writeln("The configured local 'sync_dir' directory is out of sync with Microsoft OneDrive"); } + writeln("Approximate data to download from Microsoft OneDrive: ", (downloadSize/1024), " KB"); + } else { + // No changes were returned + writeln("There are no pending changes from Microsoft OneDrive; your local directory matches the data online."); } + } + + // Query OneDrive for file details of a given path, returning either the 'webURL' or 'lastModifiedBy' JSON facet + void queryOneDriveForFileDetails(string inputFilePath, string runtimePath, string outputType) { + + // Calculate the full local file path + string fullLocalFilePath = buildNormalizedPath(buildPath(runtimePath, inputFilePath)); - if (!isItemRoot(driveData)) { - // Get root details for the provided driveId - try { - rootData = onedrive.getDriveIdRoot(driveId); - } catch (OneDriveException e) { - log.vdebug("rootData = onedrive.getDriveIdRoot(driveId) generated a OneDriveException"); - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - if (e.httpStatusCode == 429) { - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - retrying applicable request"); - handleOneDriveThrottleRequest(); + // Query if file is valid locally + if (exists(fullLocalFilePath)) { + // search drive_id list + string[] distinctDriveIds = itemDB.selectDistinctDriveIds(); + bool pathInDB = false; + Item dbItem; + + foreach (searchDriveId; distinctDriveIds) { + // Does this path exist in the database, use the 'inputFilePath' + if (itemDB.selectByPath(inputFilePath, searchDriveId, dbItem)) { + // item is in the database + pathInDB = true; + JSONValue fileDetailsFromOneDrive; + + // Create a new API Instance for this thread and initialise it + OneDriveApi queryOneDriveForFileDetailsApiInstance; + queryOneDriveForFileDetailsApiInstance = new OneDriveApi(appConfig); + queryOneDriveForFileDetailsApiInstance.initialise(); + + try { + fileDetailsFromOneDrive = queryOneDriveForFileDetailsApiInstance.getPathDetailsById(dbItem.driveId, dbItem.id); + } catch (OneDriveException exception) { + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + return; } - if (e.httpStatusCode == 504) { - log.vdebug("Retrying original request that generated the HTTP 504 (Gateway Timeout) - retrying applicable request"); - Thread.sleep(dur!"seconds"(30)); + + // Is the API response a valid JSON file? + if (fileDetailsFromOneDrive.type() == JSONType.object) { + + // debug output of response + addLogEntry("API Response: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); + + // What sort of response to we generate + // --get-file-link response + if (outputType == "URL") { + if ((fileDetailsFromOneDrive.type() == JSONType.object) && ("webUrl" in fileDetailsFromOneDrive)) { + // Valid JSON object + addLogEntry(); + writeln("WebURL: ", fileDetailsFromOneDrive["webUrl"].str); + } + } + + // --modified-by response + if (outputType == "ModifiedBy") { + if ((fileDetailsFromOneDrive.type() == JSONType.object) && ("lastModifiedBy" in fileDetailsFromOneDrive)) { + // Valid JSON object + writeln(); + writeln("Last modified: ", fileDetailsFromOneDrive["lastModifiedDateTime"].str); + writeln("Last modified by: ", fileDetailsFromOneDrive["lastModifiedBy"]["user"]["displayName"].str); + // if 'email' provided, add this to the output + if ("email" in fileDetailsFromOneDrive["lastModifiedBy"]["user"]) { + writeln("Email Address: ", fileDetailsFromOneDrive["lastModifiedBy"]["user"]["email"].str); + } + } + } + + // --create-share-link response + if (outputType == "ShareableLink") { + + JSONValue accessScope; + JSONValue createShareableLinkResponse; + string thisDriveId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; + string thisItemId = fileDetailsFromOneDrive["id"].str; + string fileShareLink; + bool writeablePermissions = appConfig.getValueBool("with_editing_perms"); + + // What sort of shareable link is required? + if (writeablePermissions) { + // configure the read-write access scope + accessScope = [ + "type": "edit", + "scope": "anonymous" + ]; + } else { + // configure the read-only access scope (default) + accessScope = [ + "type": "view", + "scope": "anonymous" + ]; + } + + // Try and create the shareable file link + try { + createShareableLinkResponse = queryOneDriveForFileDetailsApiInstance.createShareableLink(thisDriveId, thisItemId, accessScope); + } catch (OneDriveException exception) { + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + return; + } + + // Is the API response a valid JSON file? + if ((createShareableLinkResponse.type() == JSONType.object) && ("link" in createShareableLinkResponse)) { + // Extract the file share link from the JSON response + fileShareLink = createShareableLinkResponse["link"]["webUrl"].str; + writeln("File Shareable Link: ", fileShareLink); + if (writeablePermissions) { + writeln("Shareable Link has read-write permissions - use and provide with caution"); + } + } + } } - // Retry original request by calling function again to avoid replicating any further error handling - rootData = onedrive.getDriveIdRoot(driveId); - } else { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + // Shutdown the API access + queryOneDriveForFileDetailsApiInstance.shutdown(); + // Free object and memory + object.destroy(queryOneDriveForFileDetailsApiInstance); } } - // Add driveData JSON data to array - log.vlog("Adding OneDrive root details for processing"); - childrenData ~= rootData; + + // was path found? + if (!pathInDB) { + // File has not been synced with OneDrive + addLogEntry("Selected path has not been synced with OneDrive: " ~ inputFilePath); + } + } else { + // File does not exist locally + addLogEntry("Selected path not found on local system: " ~ inputFilePath); + } + } + + // Query OneDrive for the quota details + void queryOneDriveForQuotaDetails() { + // This function is similar to getRemainingFreeSpace() but is different in data being analysed and output method + JSONValue currentDriveQuota; + string driveId; + + if (appConfig.getValueString("drive_id").length) { + driveId = appConfig.getValueString("drive_id"); + } else { + driveId = appConfig.defaultDriveId; } - // Add driveData JSON data to array - log.vlog("Adding OneDrive folder details for processing"); - childrenData ~= driveData; + try { + // Create a new OneDrive API instance + OneDriveApi getCurrentDriveQuotaApiInstance; + getCurrentDriveQuotaApiInstance = new OneDriveApi(appConfig); + getCurrentDriveQuotaApiInstance.initialise(); + addLogEntry("Seeking available quota for this drive id: " ~ driveId, ["debug"]); + currentDriveQuota = getCurrentDriveQuotaApiInstance.getDriveQuota(driveId); + // Shut this API instance down + getCurrentDriveQuotaApiInstance.shutdown(); + // Free object and memory + object.destroy(getCurrentDriveQuotaApiInstance); + } catch (OneDriveException e) { + addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]); + } - for (;;) { - // query top level children - try { - topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink); - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("nextLink: ", nextLink); + // validate that currentDriveQuota is a JSON value + if (currentDriveQuota.type() == JSONType.object) { + // was 'quota' in response? + if ("quota" in currentDriveQuota) { + + // debug output of response + addLogEntry("currentDriveQuota: " ~ to!string(currentDriveQuota), ["debug"]); - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); + // human readable output of response + string deletedValue = "Not Provided"; + string remainingValue = "Not Provided"; + string stateValue = "Not Provided"; + string totalValue = "Not Provided"; + string usedValue = "Not Provided"; + + // Update values + if ("deleted" in currentDriveQuota["quota"]) { + deletedValue = byteToGibiByte(currentDriveQuota["quota"]["deleted"].integer); } - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); + if ("remaining" in currentDriveQuota["quota"]) { + remainingValue = byteToGibiByte(currentDriveQuota["quota"]["remaining"].integer); } - // HTTP request returned status code 500 (Internal Server Error) - if (e.httpStatusCode == 500) { - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + if ("state" in currentDriveQuota["quota"]) { + stateValue = currentDriveQuota["quota"]["state"].str; } - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query OneDrive drive children - retrying applicable request"); - log.vdebug("topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429 and 504 - try { - log.vdebug("Retrying Query: topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink); - log.vdebug("Query 'topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)' performed successfully on re-try"); - } catch (OneDriveException e) { - // display what the error is - log.vdebug("Query Error: topLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink) on re-try after delay"); - // error was not a 504 this time - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - } else { - // Default operation if not 404, 410, 429, 500 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + if ("total" in currentDriveQuota["quota"]) { + totalValue = byteToGibiByte(currentDriveQuota["quota"]["total"].integer); } - } - - // process top level children - log.vlog("Adding ", count(topLevelChildren["value"].array), " OneDrive items for processing from OneDrive folder"); - foreach (child; topLevelChildren["value"].array) { - // add this child to the array of objects - childrenData ~= child; - // is this child a folder? - if (isItemFolder(child)){ - // We have to query this folders children if childCount > 0 - if (child["folder"]["childCount"].integer > 0){ - // This child folder has children - string childIdToQuery = child["id"].str; - string childDriveToQuery = child["parentReference"]["driveId"].str; - auto childParentPath = child["parentReference"]["path"].str.split(":"); - string folderPathToScan = childParentPath[1] ~ "/" ~ child["name"].str; - string pathForLogging = "/" ~ driveData["name"].str ~ "/" ~ child["name"].str; - JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, pathForLogging); - foreach (grandChild; grandChildrenData.array) { - // add the grandchild to the array - childrenData ~= grandChild; - } - } + + if ("used" in currentDriveQuota["quota"]) { + usedValue = byteToGibiByte(currentDriveQuota["quota"]["used"].integer); } + + writeln("Microsoft OneDrive quota information as reported for this Drive ID: ", driveId); + writeln(); + writeln("Deleted: ", deletedValue, " GB (", currentDriveQuota["quota"]["deleted"].integer, " bytes)"); + writeln("Remaining: ", remainingValue, " GB (", currentDriveQuota["quota"]["remaining"].integer, " bytes)"); + writeln("State: ", stateValue); + writeln("Total: ", totalValue, " GB (", currentDriveQuota["quota"]["total"].integer, " bytes)"); + writeln("Used: ", usedValue, " GB (", currentDriveQuota["quota"]["used"].integer, " bytes)"); + writeln(); + } else { + writeln("Microsoft OneDrive quota information is being restricted for this Drive ID: ", driveId); } - // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response - // to indicate more items are available and provide the request URL for the next page of items. - if ("@odata.nextLink" in topLevelChildren) { - // Update nextLink to next changeSet bundle - log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); - nextLink = topLevelChildren["@odata.nextLink"].str; - } else break; + + } + } + + // Query the system for session_upload.* files + bool checkForInterruptedSessionUploads() { + + bool interruptedUploads = false; + ulong interruptedUploadsCount; + + // Scan the filesystem for the files we are interested in, build up interruptedUploadsSessionFiles array + foreach (sessionFile; dirEntries(appConfig.configDirName, "session_upload.*", SpanMode.shallow)) { + // calculate the full path + string tempPath = buildNormalizedPath(buildPath(appConfig.configDirName, sessionFile)); + // add to array + interruptedUploadsSessionFiles ~= [tempPath]; } - // craft response from all returned elements - deltaResponse = [ - "@odata.context": JSONValue("https://graph.microsoft.com/v1.0/$metadata#Collection(driveItem)"), - "value": JSONValue(childrenData.array) - ]; + // Count all 'session_upload' files in appConfig.configDirName + //interruptedUploadsCount = count(dirEntries(appConfig.configDirName, "session_upload.*", SpanMode.shallow)); + interruptedUploadsCount = count(interruptedUploadsSessionFiles); + if (interruptedUploadsCount != 0) { + interruptedUploads = true; + } - // return the generated JSON response - return deltaResponse; + // return if there are interrupted uploads to process + return interruptedUploads; } - // query child for children - JSONValue[] queryForChildren(const(char)[] driveId, const(char)[] idToQuery, const(char)[] childParentPath, string pathForLogging) - { - // function variables - JSONValue thisLevelChildren; - JSONValue[] thisLevelChildrenData; - string nextLink; - - for (;;) { - // query children - thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink); - - // process this level children - if (!childParentPath.empty) { - // We dont use childParentPath to log, as this poses an information leak risk. - // The full parent path of the child, as per the JSON might be: - // /Level 1/Level 2/Level 3/Child Shared Folder/some folder/another folder - // But 'Child Shared Folder' is what is shared, thus '/Level 1/Level 2/Level 3/' is a potential information leak if logged. - // Plus, the application output now shows accuratly what is being shared - so that is a good thing. - log.vlog("Adding ", count(thisLevelChildren["value"].array), " OneDrive items for processing from ", pathForLogging); - } - foreach (child; thisLevelChildren["value"].array) { - // add this child to the array of objects - thisLevelChildrenData ~= child; - // is this child a folder? - if (isItemFolder(child)){ - // We have to query this folders children if childCount > 0 - if (child["folder"]["childCount"].integer > 0){ - // This child folder has children - string childIdToQuery = child["id"].str; - string childDriveToQuery = child["parentReference"]["driveId"].str; - auto grandchildParentPath = child["parentReference"]["path"].str.split(":"); - string folderPathToScan = grandchildParentPath[1] ~ "/" ~ child["name"].str; - string newLoggingPath = pathForLogging ~ "/" ~ child["name"].str; - JSONValue[] grandChildrenData = queryForChildren(childDriveToQuery, childIdToQuery, folderPathToScan, newLoggingPath); - foreach (grandChild; grandChildrenData.array) { - // add the grandchild to the array - thisLevelChildrenData ~= grandChild; - } + // Process interrupted 'session_upload' files + void processForInterruptedSessionUploads() { + // For each upload_session file that has been found, process the data to ensure it is still valid + foreach (sessionFilePath; interruptedUploadsSessionFiles) { + if (!validateUploadSessionFileData(sessionFilePath)) { + // Remove upload_session file as it is invalid + // upload_session file file contains an error - cant resume this session + addLogEntry("Restore file upload session failed - cleaning up resumable session data file: " ~ sessionFilePath, ["verbose"]); + + // cleanup session path + if (exists(sessionFilePath)) { + if (!dryRun) { + remove(sessionFilePath); } } } - // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response - // to indicate more items are available and provide the request URL for the next page of items. - if ("@odata.nextLink" in thisLevelChildren) { - // Update nextLink to next changeSet bundle - nextLink = thisLevelChildren["@odata.nextLink"].str; - log.vdebug("Setting nextLink to (@odata.nextLink): ", nextLink); - } else break; } - // return response - return thisLevelChildrenData; - } - - // Query from OneDrive the child objects for this element - JSONValue queryThisLevelChildren(const(char)[] driveId, const(char)[] idToQuery, string nextLink) - { - JSONValue thisLevelChildren; - - // query children - try { - // attempt API call - log.vdebug("Attempting Query: thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink); - log.vdebug("Query 'thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)' performed successfully"); - } catch (OneDriveException e) { - // OneDrive threw an error - log.vdebug("------------------------------------------------------------------"); - log.vdebug("Query Error: thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink)"); - log.vdebug("driveId: ", driveId); - log.vdebug("idToQuery: ", idToQuery); - log.vdebug("nextLink: ", nextLink); + // At this point we should have an array of JSON items to resume uploading + if (count(jsonItemsToResumeUpload) > 0) { + // there are valid items to resume upload + + // Lets deal with all the JSON items that need to be reumed for upload in a batch process + ulong batchSize = appConfig.concurrentThreads; + ulong batchCount = (jsonItemsToResumeUpload.length + batchSize - 1) / batchSize; + ulong batchesProcessed = 0; - // HTTP request returned status code 404 (Not Found) - if (e.httpStatusCode == 404) { - // Stop application - log.log("\n\nOneDrive returned a 'HTTP 404 - Item not found'"); - log.log("The item id to query was not found on OneDrive"); - log.log("\nRemove your '", cfg.databaseFilePath, "' file and try to sync again\n"); - } - - // HTTP request returned status code 429 (Too Many Requests) - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to query OneDrive drive children"); - } - - // HTTP request returned status code 504 (Gateway Timeout) or 429 retry - if ((e.httpStatusCode == 429) || (e.httpStatusCode == 504)) { - // re-try the specific changes queries - if (e.httpStatusCode == 504) { - // transient error - try again in 30 seconds - log.log("OneDrive returned a 'HTTP 504 - Gateway Timeout' when attempting to query OneDrive drive children - retrying applicable request"); - log.vdebug("thisLevelChildren = onedrive.listChildren(driveId, idToQuery, nextLink) previously threw an error - retrying"); - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - log.vdebug("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request"); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429 and 504 - but loop back calling this function - log.vdebug("Retrying Query: thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink)"); - thisLevelChildren = queryThisLevelChildren(driveId, idToQuery, nextLink); - } else { - // Default operation if not 404, 429 or 504 errors - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + foreach (chunk; jsonItemsToResumeUpload.chunks(batchSize)) { + // send an array containing 'appConfig.concurrentThreads' (16) JSON items to resume upload + resumeSessionUploadsInParallel(chunk); } } - // return response - return thisLevelChildren; } - // OneDrive Business Shared Folder support - void listOneDriveBusinessSharedFolders() - { - // List OneDrive Business Shared Folders - log.log("\nListing available OneDrive Business Shared Folders:"); - // Query the GET /me/drive/sharedWithMe API - JSONValue graphQuery; + bool validateUploadSessionFileData(string sessionFilePath) { + + JSONValue sessionFileData; + + // Try and read the text from the session file as a JSON array try { - graphQuery = onedrive.getSharedWithMe(); - } catch (OneDriveException e) { - if (e.httpStatusCode == 401) { - // HTTP request returned status code 401 (Unauthorized) - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - handleClientUnauthorised(); + sessionFileData = readText(sessionFilePath).parseJSON(); + } catch (JSONException e) { + addLogEntry("SESSION-RESUME: Invalid JSON data in: " ~ sessionFilePath, ["debug"]); + return false; + } + + // Does the file we wish to resume uploading exist locally still? + if ("localPath" in sessionFileData) { + string sessionLocalFilePath = sessionFileData["localPath"].str; + addLogEntry("SESSION-RESUME: sessionLocalFilePath: " ~ sessionLocalFilePath, ["debug"]); + + // Does the file exist? + if (!exists(sessionLocalFilePath)) { + addLogEntry("The local file to upload does not exist locally anymore", ["verbose"]); + return false; } - if (e.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(); - // Retry original request by calling function again to avoid replicating any further error handling - log.vdebug("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - graphQuery = onedrive.getSharedWithMe();"); - graphQuery = onedrive.getSharedWithMe(); + + // Can we read the file? + if (!readLocalFile(sessionLocalFilePath)) { + // filesystem error already returned if unable to read + return false; } - if (e.httpStatusCode >= 500) { - // There was a HTTP 5xx Server Side Error - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // Must exit here - onedrive.shutdown(); - exit(-1); + + } else { + addLogEntry("SESSION-RESUME: No localPath data in: " ~ sessionFilePath, ["debug"]); + return false; + } + + // Check the session data for expirationDateTime + if ("expirationDateTime" in sessionFileData) { + auto expiration = SysTime.fromISOExtString(sessionFileData["expirationDateTime"].str); + if (expiration < Clock.currTime()) { + addLogEntry("The upload session has expired for: " ~ sessionFilePath, ["verbose"]); + return false; } + } else { + addLogEntry("SESSION-RESUME: No expirationDateTime data in: " ~ sessionFilePath, ["debug"]); + return false; } - if (graphQuery.type() == JSONType.object) { - if (count(graphQuery["value"].array) == 0) { - // no shared folders returned - write("\nNo OneDrive Business Shared Folders were returned\n"); - } else { - // shared folders were returned - log.vdebug("onedrive.getSharedWithMe API Response: ", graphQuery); - foreach (searchResult; graphQuery["value"].array) { - // loop variables - string sharedFolderName; - string sharedByName; - string sharedByEmail; - // is the shared item with us a 'folder' ? - // we only handle folders, not files or other items - if (isItemFolder(searchResult)) { - // Debug response output - log.vdebug("shared folder entry: ", searchResult); - sharedFolderName = searchResult["name"].str; - - // configure who this was shared by - if ("sharedBy" in searchResult["remoteItem"]["shared"]) { - // we have shared by details we can use - if ("displayName" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByName = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["displayName"].str; - } - if ("email" in searchResult["remoteItem"]["shared"]["sharedBy"]["user"]) { - sharedByEmail = searchResult["remoteItem"]["shared"]["sharedBy"]["user"]["email"].str; - } - } - // Output query result - log.log("---------------------------------------"); - log.log("Shared Folder: ", sharedFolderName); - if ((sharedByName != "") && (sharedByEmail != "")) { - log.log("Shared By: ", sharedByName, " (", sharedByEmail, ")"); - } else { - if (sharedByName != "") { - log.log("Shared By: ", sharedByName); - } - } - log.vlog("Item Id: ", searchResult["remoteItem"]["id"].str); - log.vlog("Parent Drive Id: ", searchResult["remoteItem"]["parentReference"]["driveId"].str); - if ("id" in searchResult["remoteItem"]["parentReference"]) { - log.vlog("Parent Item Id: ", searchResult["remoteItem"]["parentReference"]["id"].str); - } + // Check the online upload status, using the uloadURL in sessionFileData + if ("uploadUrl" in sessionFileData) { + JSONValue response; + + // Create a new OneDrive API instance + OneDriveApi validateUploadSessionFileDataApiInstance; + validateUploadSessionFileDataApiInstance = new OneDriveApi(appConfig); + validateUploadSessionFileDataApiInstance.initialise(); + + try { + response = validateUploadSessionFileDataApiInstance.requestUploadStatus(sessionFileData["uploadUrl"].str); + } catch (OneDriveException e) { + // handle any onedrive error response as invalid + addLogEntry("SESSION-RESUME: Invalid response when using uploadUrl in: " ~ sessionFilePath, ["debug"]); + return false; + } + + // Shutdown API instance + validateUploadSessionFileDataApiInstance.shutdown(); + // Free object and memory + object.destroy(validateUploadSessionFileDataApiInstance); + + // Do we have a valid response from OneDrive? + if (response.type() == JSONType.object) { + // Valid JSON object was returned + if (("expirationDateTime" in response) && ("nextExpectedRanges" in response)) { + // The 'uploadUrl' is valid, and the response contains elements we need + sessionFileData["expirationDateTime"] = response["expirationDateTime"]; + sessionFileData["nextExpectedRanges"] = response["nextExpectedRanges"]; + + if (sessionFileData["nextExpectedRanges"].array.length == 0) { + addLogEntry("The upload session was already completed", ["verbose"]); + return false; } + } else { + addLogEntry("SESSION-RESUME: No expirationDateTime & nextExpectedRanges data in Microsoft OneDrive API response: " ~ to!string(response), ["debug"]); + return false; } + } else { + // not a JSON object + addLogEntry("Restore file upload session failed - invalid response from Microsoft OneDrive", ["verbose"]); + return false; } - write("\n"); } else { - // Log that an invalid JSON object was returned - log.error("ERROR: onedrive.getSharedWithMe call returned an invalid JSON Object"); - } - } - - // Query itemdb.computePath() and catch potential assert when DB consistency issue occurs - string computeItemPath(string thisDriveId, string thisItemId) - { - static import core.exception; - string calculatedPath; - log.vdebug("Attempting to calculate local filesystem path for ", thisDriveId, " and ", thisItemId); - try { - calculatedPath = itemdb.computePath(thisDriveId, thisItemId); - } catch (core.exception.AssertError) { - // broken tree in the database, we cant compute the path for this item id, exit - log.error("ERROR: A database consistency issue has been caught. A --resync is needed to rebuild the database."); - // Must exit here to preserve data - onedrive.shutdown(); - exit(-1); + addLogEntry("SESSION-RESUME: No uploadUrl data in: " ~ sessionFilePath, ["debug"]); + return false; } - // return calculated path as string - return calculatedPath; - } - - void handleClientUnauthorised() - { - // common code for handling when a client is unauthorised - writeln(); - log.errorAndNotify("ERROR: Check your configuration as your refresh_token may be empty or invalid. You may need to issue a --reauth and re-authorise this client."); - writeln(); - // Must exit here - onedrive.shutdown(); - exit(-1); + // Add 'sessionFilePath' to 'sessionFileData' so that it can be used when we re-use the JSON data to resume the upload + sessionFileData["sessionFilePath"] = sessionFilePath; + + // Add sessionFileData to jsonItemsToResumeUpload as it is now valid + jsonItemsToResumeUpload ~= sessionFileData; + return true; } - // Wrapper function for makeDatabaseItem so we can check if the item, if a file, has any hashes - private Item makeItem(JSONValue onedriveJSONItem) - { - Item newDatabaseItem = makeDatabaseItem(onedriveJSONItem); - - // Check for hashes in this DB item - if (newDatabaseItem.type == ItemType.file) { - // Does this file have a size greater than 0 - zero size files will potentially not have a hash - if (hasFileSize(onedriveJSONItem)) { - if (onedriveJSONItem["size"].integer > 0) { - // Does the item have any hashes? - if ((newDatabaseItem.quickXorHash.empty) && (newDatabaseItem.sha256Hash.empty)) { - // Odd .. no hash ...... - string apiMessage = "WARNING: OneDrive API inconsistency - this file does not have any hash: "; - // This is computationally expensive .. but we are only doing this if there are no hashses provided: - bool parentInDatabase = itemdb.idInLocalDatabase(newDatabaseItem.driveId, newDatabaseItem.parentId); - if (parentInDatabase) { - // Calculate this item path - string newItemPath = computeItemPath(newDatabaseItem.driveId, newDatabaseItem.parentId) ~ "/" ~ newDatabaseItem.name; - log.log(apiMessage, newItemPath); - } else { - // Use the item ID - log.log(apiMessage, newDatabaseItem.id); + void resumeSessionUploadsInParallel(JSONValue[] array) { + // This function recieved an array of 16 JSON items to resume upload + foreach (i, jsonItemToResume; taskPool.parallel(array)) { + // Take each JSON item and resume upload using the JSON data + + JSONValue uploadResponse; + OneDriveApi uploadFileOneDriveApiInstance; + uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); + uploadFileOneDriveApiInstance.initialise(); + + // Pull out data from this JSON element + string threadUploadSessionFilePath = jsonItemToResume["sessionFilePath"].str; + ulong thisFileSizeLocal = getSize(jsonItemToResume["localPath"].str); + + // Try to resume the session upload using the provided data + try { + uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSizeLocal, jsonItemToResume, threadUploadSessionFilePath); + } catch (OneDriveException exception) { + writeln("CODING TO DO: Handle an exception when performing a resume session upload"); + } + + // Was the response from the OneDrive API a valid JSON item? + if (uploadResponse.type() == JSONType.object) { + // A valid JSON object was returned - session resumption upload sucessful + + // Are we in an --upload-only & --remove-source-files scenario? + // Use actual config values as we are doing an upload session recovery + if (localDeleteAfterUpload) { + // Log that we are deleting a local item + addLogEntry("Removing local file as --upload-only & --remove-source-files configured"); + // are we in a --dry-run scenario? + if (!dryRun) { + // No --dry-run ... process local file delete + // Only perform the delete if we have a valid file path + if (exists(jsonItemToResume["localPath"].str)) { + // file exists + addLogEntry("Removing local file: " ~ jsonItemToResume["localPath"].str, ["debug"]); + safeRemove(jsonItemToResume["localPath"].str); } } + // as file is removed, we have nothing to add to the local database + addLogEntry("Skipping adding to database as --upload-only & --remove-source-files configured", ["debug"]); + } else { + // Save JSON item in database + saveItem(uploadResponse); } + } else { + // No valid response was returned + addLogEntry("CODING TO DO: what to do when session upload resumption JSON data is not valid ... nothing ? error message ?"); } + + // Shutdown API instance + uploadFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(uploadFileOneDriveApiInstance); } - return newDatabaseItem; } - -} +} \ No newline at end of file diff --git a/src/upload.d b/src/upload.d deleted file mode 100644 index 012598a05..000000000 --- a/src/upload.d +++ /dev/null @@ -1,302 +0,0 @@ -import std.algorithm, std.conv, std.datetime, std.file, std.json; -import std.stdio, core.thread, std.string; -import progress, onedrive, util; -static import log; - -private long fragmentSize = 10 * 2^^20; // 10 MiB - -struct UploadSession -{ - private OneDriveApi onedrive; - private bool verbose; - // https://dev.onedrive.com/resources/uploadSession.htm - private JSONValue session; - // path where to save the session - private string sessionFilePath; - - this(OneDriveApi onedrive, string sessionFilePath) - { - assert(onedrive); - this.onedrive = onedrive; - this.sessionFilePath = sessionFilePath; - this.verbose = verbose; - } - - JSONValue upload(string localPath, const(char)[] parentDriveId, const(char)[] parentId, const(char)[] filename, const(char)[] eTag = null) - { - // Fix https://github.com/abraunegg/onedrive/issues/2 - // More Details https://github.com/OneDrive/onedrive-api-docs/issues/778 - - SysTime localFileLastModifiedTime = timeLastModified(localPath).toUTC(); - localFileLastModifiedTime.fracSecs = Duration.zero; - - JSONValue fileSystemInfo = [ - "item": JSONValue([ - "@name.conflictBehavior": JSONValue("replace"), - "fileSystemInfo": JSONValue([ - "lastModifiedDateTime": localFileLastModifiedTime.toISOExtString() - ]) - ]) - ]; - - // Try to create the upload session for this file - session = onedrive.createUploadSession(parentDriveId, parentId, filename, eTag, fileSystemInfo); - - if ("uploadUrl" in session){ - session["localPath"] = localPath; - save(); - return upload(); - } else { - // there was an error - log.vlog("Create file upload session failed ... skipping file upload"); - // return upload() will return a JSONValue response, create an empty JSONValue response to return - JSONValue response; - return response; - } - } - - /* Restore the previous upload session. - * Returns true if the session is valid. Call upload() to resume it. - * Returns false if there is no session or the session is expired. */ - bool restore() - { - if (exists(sessionFilePath)) { - log.vlog("Trying to restore the upload session ..."); - // We cant use JSONType.object check, as this is currently a string - // We cant use a try & catch block, as it does not catch std.json.JSONException - auto sessionFileText = readText(sessionFilePath); - if(canFind(sessionFileText,"@odata.context")) { - session = readText(sessionFilePath).parseJSON(); - } else { - log.vlog("Upload session resume data is invalid"); - remove(sessionFilePath); - return false; - } - - // Check the session resume file for expirationDateTime - if ("expirationDateTime" in session){ - // expirationDateTime in the file - auto expiration = SysTime.fromISOExtString(session["expirationDateTime"].str); - if (expiration < Clock.currTime()) { - log.vlog("The upload session is expired"); - return false; - } - if (!exists(session["localPath"].str)) { - log.vlog("The file does not exist anymore"); - return false; - } - // Can we read the file - as a permissions issue or file corruption will cause a failure on resume - // https://github.com/abraunegg/onedrive/issues/113 - if (readLocalFile(session["localPath"].str)){ - // able to read the file - // request the session status - JSONValue response; - try { - response = onedrive.requestUploadStatus(session["uploadUrl"].str); - } catch (OneDriveException e) { - // handle any onedrive error response - if (e.httpStatusCode == 400) { - log.vlog("Upload session not found"); - return false; - } - } - - // do we have a valid response from OneDrive? - if (response.type() == JSONType.object){ - // JSON object - if (("expirationDateTime" in response) && ("nextExpectedRanges" in response)){ - // has the elements we need - session["expirationDateTime"] = response["expirationDateTime"]; - session["nextExpectedRanges"] = response["nextExpectedRanges"]; - if (session["nextExpectedRanges"].array.length == 0) { - log.vlog("The upload session is completed"); - return false; - } - } else { - // bad data - log.vlog("Restore file upload session failed - invalid data response from OneDrive"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - } else { - // not a JSON object - log.vlog("Restore file upload session failed - invalid response from OneDrive"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - return true; - } else { - // unable to read the local file - log.vlog("Restore file upload session failed - unable to read the local file"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - } else { - // session file contains an error - cant resume - log.vlog("Restore file upload session failed - cleaning up session resume"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return false; - } - } - return false; - } - - JSONValue upload() - { - // Response for upload - JSONValue response; - - // session JSON needs to contain valid elements - long offset; - long fileSize; - - if ("nextExpectedRanges" in session){ - offset = session["nextExpectedRanges"][0].str.splitter('-').front.to!long; - } - - if ("localPath" in session){ - fileSize = getSize(session["localPath"].str); - } - - if ("uploadUrl" in session){ - // Upload file via session created - // Upload Progress Bar - size_t iteration = (roundTo!int(double(fileSize)/double(fragmentSize)))+1; - Progress p = new Progress(iteration); - p.title = "Uploading"; - long fragmentCount = 0; - long fragSize = 0; - - // Initialise the download bar at 0% - p.next(); - - while (true) { - fragmentCount++; - log.vdebugNewLine("Fragment: ", fragmentCount, " of ", iteration); - p.next(); - log.vdebugNewLine("fragmentSize: ", fragmentSize, "offset: ", offset, " fileSize: ", fileSize ); - fragSize = fragmentSize < fileSize - offset ? fragmentSize : fileSize - offset; - log.vdebugNewLine("Using fragSize: ", fragSize); - - // fragSize must not be a negative value - if (fragSize < 0) { - // Session upload will fail - // not a JSON object - fragment upload failed - log.vlog("File upload session failed - invalid calculation of fragment size"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - // set response to null as error - response = null; - return response; - } - - // If the resume upload fails, we need to check for a return code here - try { - response = onedrive.uploadFragment( - session["uploadUrl"].str, - session["localPath"].str, - offset, - fragSize, - fileSize - ); - } catch (OneDriveException e) { - // if a 100 response is generated, continue - if (e.httpStatusCode == 100) { - continue; - } - // there was an error response from OneDrive when uploading the file fragment - // handle 'HTTP request returned status code 429 (Too Many Requests)' first - if (e.httpStatusCode == 429) { - auto retryAfterValue = onedrive.getRetryAfterValue(); - log.vdebug("Fragment upload failed - received throttle request response from OneDrive"); - log.vdebug("Using Retry-After Value = ", retryAfterValue); - // Sleep thread as per request - log.log("\nThread sleeping due to 'HTTP request returned status code 429' - The request has been throttled"); - log.log("Sleeping for ", retryAfterValue, " seconds"); - Thread.sleep(dur!"seconds"(retryAfterValue)); - log.log("Retrying fragment upload"); - } else { - // insert a new line as well, so that the below error is inserted on the console in the right location - log.vlog("\nFragment upload failed - received an exception response from OneDrive"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // retry fragment upload in case error is transient - log.vlog("Retrying fragment upload"); - } - - try { - response = onedrive.uploadFragment( - session["uploadUrl"].str, - session["localPath"].str, - offset, - fragSize, - fileSize - ); - } catch (OneDriveException e) { - // OneDrive threw another error on retry - log.vlog("Retry to upload fragment failed"); - // display what the error is - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - // set response to null as the fragment upload was in error twice - response = null; - } - } - // was the fragment uploaded without issue? - if (response.type() == JSONType.object){ - offset += fragmentSize; - if (offset >= fileSize) break; - // update the session details - session["expirationDateTime"] = response["expirationDateTime"]; - session["nextExpectedRanges"] = response["nextExpectedRanges"]; - save(); - } else { - // not a JSON object - fragment upload failed - log.vlog("File upload session failed - invalid response from OneDrive"); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - // set response to null as error - response = null; - return response; - } - } - // upload complete - p.next(); - writeln(); - if (exists(sessionFilePath)) { - remove(sessionFilePath); - } - return response; - } else { - // session elements were not present - log.vlog("Session has no valid upload URL ... skipping this file upload"); - // return an empty JSON response - response = null; - return response; - } - } - - string getUploadSessionLocalFilePath() { - // return the session file path - string localPath = ""; - if ("localPath" in session){ - localPath = session["localPath"].str; - } - return localPath; - } - - // save session details to temp file - private void save() - { - std.file.write(sessionFilePath, session.toString()); - } -} diff --git a/src/util.d b/src/util.d index cbaa5b8ef..684981128 100644 --- a/src/util.d +++ b/src/util.d @@ -1,6 +1,12 @@ +// What is this module called? +module util; + +// What does this module require to function? +import core.stdc.stdlib: EXIT_SUCCESS, EXIT_FAILURE, exit; import std.base64; import std.conv; -import std.digest.crc, std.digest.sha; +import std.digest.crc; +import std.digest.sha; import std.net.curl; import std.datetime; import std.file; @@ -13,222 +19,345 @@ import std.algorithm; import std.uri; import std.json; import std.traits; -import qxor; import core.stdc.stdlib; +import core.thread; +import core.memory; +import std.math; +import std.format; +import std.random; +import std.array; +import std.ascii; +import std.range; +import std.exception; +import core.sys.posix.pwd; +import core.sys.posix.unistd; +import core.stdc.string; +// What other modules that we have created do we need to import? import log; import config; +import qxor; +import curlEngine; +// module variables shared string deviceName; -static this() -{ +static this() { deviceName = Socket.hostName; } -// gives a new name to the specified file or directory -void safeRename(const(char)[] path) -{ - auto ext = extension(path); - auto newPath = path.chomp(ext) ~ "-" ~ deviceName; +// Creates a safe backup of the given item, and only performs the function if not in a --dry-run scenario +void safeBackup(const(char)[] path, bool dryRun) { + auto ext = extension(path); + auto newPath = path.chomp(ext) ~ "-" ~ deviceName; + int n = 2; + + // Limit to 1000 iterations .. 1000 file backups + while (exists(newPath ~ ext) && n < 1000) { + newPath = newPath.chomp("-" ~ (n - 1).to!string) ~ "-" ~ n.to!string; + n++; + } + + // Check if unique file name was found if (exists(newPath ~ ext)) { - int n = 2; - char[] newPath2; - do { - newPath2 = newPath ~ "-" ~ n.to!string; - n++; - } while (exists(newPath2 ~ ext)); - newPath = newPath2; + // On the 1000th backup of this file, this should be triggered + addLogEntry("Failed to backup " ~ to!string(path) ~ ": Unique file name could not be found after 1000 attempts", ["error"]); + return; // Exit function as a unique file name could not be found } + + // Configure the new name newPath ~= ext; - rename(path, newPath); + + // Log that we are perform the backup by renaming the file + addLogEntry("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: " ~ to!string(path) ~ " -> " ~ to!string(newPath)); + + if (!dryRun) { + // Not a --dry-run scenario - do the file rename + // + // There are 2 options to rename a file + // rename() - https://dlang.org/library/std/file/rename.html + // std.file.copy() - https://dlang.org/library/std/file/copy.html + // + // rename: + // It is not possible to rename a file across different mount points or drives. On POSIX, the operation is atomic. That means, if to already exists there will be no time period during the operation where to is missing. + // + // std.file.copy + // Copy file from to file to. File timestamps are preserved. File attributes are preserved, if preserve equals Yes.preserveAttributes + // + // Use rename() as Linux is POSIX compliant, we have an atomic operation where at no point in time the 'to' is missing. + try { + rename(path, newPath); + } catch (Exception e) { + // Handle exceptions, e.g., log error + addLogEntry("Renaming of local file failed for " ~ to!string(path) ~ ": " ~ e.msg, ["error"]); + } + } else { + addLogEntry("DRY-RUN: Skipping renaming local file to preserve existing file and prevent data loss: " ~ to!string(path) ~ " -> " ~ to!string(newPath), ["debug"]); + } } -// deletes the specified file without throwing an exception if it does not exists -void safeRemove(const(char)[] path) -{ +// Deletes the specified file without throwing an exception if it does not exists +void safeRemove(const(char)[] path) { if (exists(path)) remove(path); } -// returns the quickXorHash base64 string of a file -string computeQuickXorHash(string path) -{ +// Returns the SHA1 hash hex string of a file +string computeSha1Hash(string path) { + SHA1 sha; + auto file = File(path, "rb"); + scope(exit) file.close(); // Ensure file is closed post read + foreach (ubyte[] data; chunks(file, 4096)) { + sha.put(data); + } + + // Store the hash in a local variable before converting to string + auto hashResult = sha.finish(); + return toHexString(hashResult).idup; // Convert the hash to a hex string +} + +// Returns the quickXorHash base64 string of a file +string computeQuickXorHash(string path) { QuickXor qxor; auto file = File(path, "rb"); + scope(exit) file.close(); // Ensure file is closed post read foreach (ubyte[] data; chunks(file, 4096)) { qxor.put(data); } - return Base64.encode(qxor.finish()); + + // Store the hash in a local variable before converting to string + auto hashResult = qxor.finish(); + return Base64.encode(hashResult).idup; // Convert the hash to a base64 string } -// returns the SHA256 hex string of a file +// Returns the SHA256 hex string of a file string computeSHA256Hash(string path) { SHA256 sha256; auto file = File(path, "rb"); + scope(exit) file.close(); // Ensure file is closed post read foreach (ubyte[] data; chunks(file, 4096)) { sha256.put(data); } - return sha256.finish().toHexString().dup; -} - -// converts wildcards (*, ?) to regex -Regex!char wild2regex(const(char)[] pattern) -{ - string str; - str.reserve(pattern.length + 2); - str ~= "^"; - foreach (c; pattern) { - switch (c) { - case '*': - str ~= "[^/]*"; - break; - case '.': - str ~= "\\."; - break; - case '?': - str ~= "[^/]"; - break; - case '|': - str ~= "$|^"; - break; - case '+': - str ~= "\\+"; - break; - case ' ': - str ~= "\\s+"; - break; - case '/': - str ~= "\\/"; - break; - case '(': - str ~= "\\("; - break; - case ')': - str ~= "\\)"; - break; - default: - str ~= c; - break; - } - } - str ~= "$"; - return regex(str, "i"); -} - -// returns true if the network connection is available -bool testNetwork(Config cfg) -{ - // Use low level HTTP struct - auto http = HTTP(); - http.url = "https://login.microsoftonline.com"; - // DNS lookup timeout - http.dnsTimeout = (dur!"seconds"(cfg.getValueLong("dns_timeout"))); - // Timeout for connecting - http.connectTimeout = (dur!"seconds"(cfg.getValueLong("connect_timeout"))); - // Data Timeout for HTTPS connections - http.dataTimeout = (dur!"seconds"(cfg.getValueLong("data_timeout"))); - // maximum time any operation is allowed to take - // This includes dns resolution, connecting, data transfer, etc. - http.operationTimeout = (dur!"seconds"(cfg.getValueLong("operation_timeout"))); - // What IP protocol version should be used when using Curl - IPv4 & IPv6, IPv4 or IPv6 - http.handle.set(CurlOption.ipresolve,cfg.getValueLong("ip_protocol_version")); // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only - // HTTP connection test method - http.method = HTTP.Method.head; - // Attempt to contact the Microsoft Online Service - try { - log.vdebug("Attempting to contact online service"); - http.perform(); - log.vdebug("Shutting down HTTP engine as successfully reached OneDrive Online Service"); - http.shutdown(); - return true; - } catch (SocketException e) { - // Socket issue - log.vdebug("HTTP Socket Issue"); - log.error("Cannot connect to Microsoft OneDrive Service - Socket Issue"); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return false; - } catch (CurlException e) { - // No network connection to OneDrive Service - log.vdebug("No Network Connection"); - log.error("Cannot connect to Microsoft OneDrive Service - Network Connection Issue"); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - return false; - } + // Store the hash in a local variable before converting to string + auto hashResult = sha256.finish(); + return toHexString(hashResult).idup; // Convert the hash to a hex string } -// Can we read the file - as a permissions issue or file corruption will cause a failure +// Converts wildcards (*, ?) to regex +// The changes here need to be 100% regression tested before full release +Regex!char wild2regex(const(char)[] pattern) { + string str; + str.reserve(pattern.length + 2); + str ~= "^"; + foreach (c; pattern) { + switch (c) { + case '*': + str ~= ".*"; // Changed to match any character. Was: str ~= "[^/]*"; + break; + case '.': + str ~= "\\."; + break; + case '?': + str ~= "."; // Changed to match any single character. Was: str ~= "[^/]"; + break; + case '|': + str ~= "$|^"; + break; + case '+': + str ~= "\\+"; + break; + case ' ': + str ~= "\\s"; // Changed to match exactly one whitespace. str ~= "\\s+"; + break; + case '/': + str ~= "\\/"; + break; + case '(': + str ~= "\\("; + break; + case ')': + str ~= "\\)"; + break; + default: + str ~= c; + break; + } + } + str ~= "$"; + return regex(str, "i"); +} + +// Test Internet access to Microsoft OneDrive +bool testInternetReachability(ApplicationConfig appConfig) { + CurlEngine curlEngine; + bool result = false; + try { + // Use preconfigured object with all the correct http values assigned + curlEngine = new CurlEngine(); + curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version")); + + // Configure the remaining items required + // URL to use + // HTTP connection test method + + curlEngine.connect(HTTP.Method.head, "https://login.microsoftonline.com"); + addLogEntry("Attempting to contact Microsoft OneDrive Login Service", ["debug"]); + curlEngine.http.perform(); + addLogEntry("Shutting down HTTP engine as successfully reached OneDrive Login Service", ["debug"]); + result = true; + } catch (SocketException e) { + addLogEntry("HTTP Socket Issue", ["debug"]); + addLogEntry("Cannot connect to Microsoft OneDrive Login Service - Socket Issue"); + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } catch (CurlException e) { + addLogEntry("No Network Connection", ["debug"]); + addLogEntry("Cannot connect to Microsoft OneDrive Login Service - Network Connection Issue"); + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } finally { + if (curlEngine) { + curlEngine.http.shutdown(); + object.destroy(curlEngine); + } + } + + // Return test result + return result; +} + +// Retry Internet access test to Microsoft OneDrive +bool retryInternetConnectivtyTest(ApplicationConfig appConfig) { + int retryAttempts = 0; + int backoffInterval = 1; // initial backoff interval in seconds + int maxBackoffInterval = 3600; // maximum backoff interval in seconds + int maxRetryCount = 100; // max retry attempts, reduced for practicality + bool isOnline = false; + + while (retryAttempts < maxRetryCount && !isOnline) { + if (backoffInterval < maxBackoffInterval) { + backoffInterval = min(backoffInterval * 2, maxBackoffInterval); // exponential increase + } + + addLogEntry(" Retry Attempt: " ~ to!string(retryAttempts + 1), ["debug"]); + addLogEntry(" Retry In (seconds): " ~ to!string(backoffInterval), ["debug"]); + + Thread.sleep(dur!"seconds"(backoffInterval)); + isOnline = testInternetReachability(appConfig); // assuming this function is defined elsewhere + + if (isOnline) { + addLogEntry("Internet connectivity to Microsoft OneDrive service has been restored"); + } + + retryAttempts++; + } + + if (!isOnline) { + addLogEntry("ERROR: Was unable to reconnect to the Microsoft OneDrive service after " ~ to!string(maxRetryCount) ~ " attempts!"); + } + + // Return state + return isOnline; +} + +// Can we read the local file - as a permissions issue or file corruption will cause a failure // https://github.com/abraunegg/onedrive/issues/113 // returns true if file can be accessed -bool readLocalFile(string path) -{ - try { - // attempt to read up to the first 1 byte of the file - // validates we can 'read' the file based on file permissions - read(path,1); - } catch (std.file.FileException e) { - // unable to read the new local file - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return false; - } - return true; +bool readLocalFile(string path) { + try { + // Attempt to read up to the first 1 byte of the file + auto data = read(path, 1); + + // Check if the read operation was successful + if (data.length != 1) { + addLogEntry("Failed to read the required amount from the file: " ~ path); + return false; + } + } catch (std.file.FileException e) { + // Unable to read the file, log the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return false; + } + return true; } -// calls globMatch for each string in pattern separated by '|' -bool multiGlobMatch(const(char)[] path, const(char)[] pattern) -{ - foreach (glob; pattern.split('|')) { - if (globMatch!(std.path.CaseSensitive.yes)(path, glob)) { - return true; - } - } - return false; +// Calls globMatch for each string in pattern separated by '|' +bool multiGlobMatch(const(char)[] path, const(char)[] pattern) { + if (path.length == 0 || pattern.length == 0) { + return false; + } + + if (!pattern.canFind('|')) { + return globMatch!(std.path.CaseSensitive.yes)(path, pattern); + } + + foreach (glob; pattern.split('|')) { + if (globMatch!(std.path.CaseSensitive.yes)(path, glob)) { + return true; + } + } + return false; } -bool isValidName(string path) -{ - // Restriction and limitations about windows naming files +// Does the path pass the Microsoft restriction and limitations about naming files and folders +bool isValidName(string path) { + // Restriction and limitations about windows naming files and folders // https://msdn.microsoft.com/en-us/library/aa365247 // https://support.microsoft.com/en-us/help/3125202/restrictions-and-limitations-when-you-sync-files-and-folders - // allow root item - if (path == ".") { - return true; - } + if (path == ".") { + return true; + } - bool matched = true; - string itemName = baseName(path); - - auto invalidNameReg = - ctRegex!( - // Leading whitespace and trailing whitespace/dot - `^\s.*|^.*[\s\.]$|` ~ - // Invalid characters - `.*[<>:"\|\?*/\\].*|` ~ - // Reserved device name and trailing .~ - `(?:^CON|^PRN|^AUX|^NUL|^COM[0-9]|^LPT[0-9])(?:[.].+)?$` - ); - auto m = match(itemName, invalidNameReg); - matched = m.empty; - - // Additional explicit validation checks - if (itemName == ".lock") {matched = false;} - if (itemName == "desktop.ini") {matched = false;} - // _vti_ cannot appear anywhere in a file or folder name - if(canFind(itemName, "_vti_")){matched = false;} - // Item name cannot equal '~' - if (itemName == "~") {matched = false;} + string itemName = baseName(path).toLower(); // Ensure case-insensitivity + + // Check for explicitly disallowed names + // https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#invalidfilefoldernames + string[] disallowedNames = [ + ".lock", "desktop.ini", "CON", "PRN", "AUX", "NUL", + "COM0", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9", + "LPT0", "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9" + ]; + + // Creating an associative array for faster lookup + bool[string] disallowedSet; + foreach (name; disallowedNames) { + disallowedSet[name.toLower()] = true; // Normalise to lowercase + } + + if (disallowedSet.get(itemName, false) || itemName.startsWith("~$") || canFind(itemName, "_vti_")) { + return false; + } + + // Regular expression for invalid patterns + // https://support.microsoft.com/en-us/office/restrictions-and-limitations-in-onedrive-and-sharepoint-64883a5d-228e-48f5-b3d2-eb39e07630fa?ui=en-us&rs=en-us&ad=us#invalidcharacters + // Leading whitespace and trailing whitespace/dot + // Invalid characters + auto invalidNameReg = ctRegex!(`^\s.*|^.*[\s\.]$|.*[<>:"\|\?*/\\].*`); - // return response - return matched; + auto matchResult = match(itemName, invalidNameReg); + if (!matchResult.empty) { + return false; + } + + // Determine if the path is at the root level, if yes, check that 'forms' is not the first folder + auto segments = pathSplitter(path).array; + if (segments.length <= 2 && segments.back.toLower() == "forms") { // Check only the last segment, convert to lower as OneDrive is not POSIX compliant, easier to compare + return false; + } + + return true; } -bool containsBadWhiteSpace(string path) -{ - // allow root item - if (path == ".") { - return true; - } +// Does the path contain any bad whitespace characters +bool containsBadWhiteSpace(string path) { + // Check for null or empty string + if (path.length == 0) { + return false; + } + + // Check for root item + if (path == ".") { + return false; + } // https://github.com/abraunegg/onedrive/issues/35 // Issue #35 presented an interesting issue where the filename contained a newline item @@ -237,50 +366,92 @@ bool containsBadWhiteSpace(string path) // /v1.0/me/drive/root:/.%2FState-of-the-art%2C%20challenges%2C%20and%20open%20issues%20in%20the%20integration%20of%20Internet%20of%0AThings%20and%20Cloud%20Computing.pdf // The '$'\n'' is translated to %0A which causes the OneDrive query to fail // Check for the presence of '%0A' via regex - - string itemName = encodeComponent(baseName(path)); - auto invalidWhitespaceReg = - ctRegex!( - // Check for \n which is %0A when encoded - `%0A` - ); - auto m = match(itemName, invalidWhitespaceReg); - return m.empty; -} - -bool containsASCIIHTMLCodes(string path) -{ + + string itemName = encodeComponent(baseName(path)); + // Check for encoded newline character + return itemName.indexOf("%0A") != -1; +} + +// Does the path contain any ASCII HTML Codes +bool containsASCIIHTMLCodes(string path) { + // Check for null or empty string + if (path.length == 0) { + return false; + } + + // Check for root item + if (path == ".") { + return false; + } + // https://github.com/abraunegg/onedrive/issues/151 - // If a filename contains ASCII HTML codes, regardless of if it gets encoded, it generates an error + // If a filename contains ASCII HTML codes, it generates an error when attempting to upload this to Microsoft OneDrive // Check if the filename contains an ASCII HTML code sequence - auto invalidASCIICode = - ctRegex!( - // Check to see if &#XXXX is in the filename - `(?:&#|&#[0-9][0-9]|&#[0-9][0-9][0-9]|&#[0-9][0-9][0-9][0-9])` - ); - - auto m = match(path, invalidASCIICode); - return m.empty; + // Check for the pattern &# followed by 1 to 4 digits and a semicolon + auto invalidASCIICode = ctRegex!(`&#[0-9]{1,4};`); + + // Use match to search for ASCII HTML codes in the path + auto matchResult = match(path, invalidASCIICode); + + // Return true if ASCII HTML codes are found + return !matchResult.empty; +} + +// Does the path contain any ASCII Control Codes +bool containsASCIIControlCodes(string path) { + // Check for null or empty string + if (path.length == 0) { + return false; + } + + // Check for root item + if (path == ".") { + return false; + } + + // https://github.com/abraunegg/onedrive/discussions/2553#discussioncomment-7995254 + // Define a ctRegex pattern for ASCII control codes + auto controlCodePattern = ctRegex!(`[^\x20-\x7E./]`); + + // Use match to search for ASCII control codes in the path + auto matchResult = match(path, controlCodePattern); + + // Return true if matchResult is not empty (indicating a control code was found) + return !matchResult.empty; +} + +// Does the path contain any HTML URL encoded items (e.g., '%20' for space) +bool containsURLEncodedItems(string path) { + // Check for null or empty string + if (path.length == 0) { + return false; + } + + // Pattern for percent encoding: % followed by two hexadecimal digits + auto urlEncodedPattern = ctRegex!(`%[0-9a-fA-F]{2}`); + + // Search for URL encoded items in the string + auto matchResult = match(path, urlEncodedPattern); + + // Return true if URL encoded items are found + return !matchResult.empty; } // Parse and display error message received from OneDrive -void displayOneDriveErrorMessage(string message, string callingFunction) -{ - writeln(); - log.error("ERROR: Microsoft OneDrive API returned an error with the following message:"); +void displayOneDriveErrorMessage(string message, string callingFunction) { + addLogEntry(); + addLogEntry("ERROR: Microsoft OneDrive API returned an error with the following message:"); auto errorArray = splitLines(message); - log.error(" Error Message: ", errorArray[0]); + addLogEntry(" Error Message: " ~ to!string(errorArray[0])); // Extract 'message' as the reason JSONValue errorMessage = parseJSON(replace(message, errorArray[0], "")); - // extra debug - log.vdebug("Raw Error Data: ", message); - log.vdebug("JSON Message: ", errorMessage); // What is the reason for the error if (errorMessage.type() == JSONType.object) { // configure the error reason string errorReason; + string errorCode; string requestDate; string requestId; @@ -303,11 +474,20 @@ void displayOneDriveErrorMessage(string message, string callingFunction) // Display the error reason if (errorReason.startsWith(" 0 ? to!string(errorArray[0]) : "No error message available"; + addLogEntry(" Error Message: " ~ errorMessage); + + // Log the calling function + addLogEntry(" Calling Function: " ~ callingFunction, ["verbose"]); + + try { + // Safely check for disk space + ulong localActualFreeSpace = to!ulong(getAvailableDiskSpace(".")); + if (localActualFreeSpace == 0) { + // Must force exit here, allow logging to be done + Thread.sleep(dur!("msecs")(500)); + exit(EXIT_FAILURE); + } + } catch (Exception e) { + // Handle exceptions from disk space check or type conversion + addLogEntry(" Exception in disk space check: " ~ e.msg); + } +} + +// Display the POSIX Error Message +void displayPosixErrorMessage(string message) { + addLogEntry(); // used rather than writeln + addLogEntry("ERROR: Microsoft OneDrive API returned data that highlights a POSIX compliance issue:"); + addLogEntry(" Error Message: " ~ message); } // Get the function name that is being called to assist with identifying where an error is being generated @@ -368,20 +605,18 @@ JSONValue getLatestReleaseDetails() { string latestTag; string publishedDate; + // Query GitHub for the 'latest' release details try { - content = get("https://api.github.com/repos/abraunegg/onedrive/releases/latest"); - } catch (CurlException e) { - // curl generated an error - meaning we could not query GitHub - log.vdebug("Unable to query GitHub for latest release"); - } + content = get("https://api.github.com/repos/abraunegg/onedrive/releases/latest"); + githubLatest = content.parseJSON(); + } catch (CurlException e) { + addLogEntry("CurlException: Unable to query GitHub for latest release - " ~ e.msg, ["debug"]); + return parseJSON(`{"Error": "CurlException", "message": "` ~ e.msg ~ `"}`); + } catch (JSONException e) { + addLogEntry("JSONException: Unable to parse GitHub JSON response - " ~ e.msg, ["debug"]); + return parseJSON(`{"Error": "JSONException", "message": "` ~ e.msg ~ `"}`); + } - try { - githubLatest = content.parseJSON(); - } catch (JSONException e) { - // unable to parse the content JSON, set to blank JSON - log.vdebug("Unable to parse GitHub JSON response"); - githubLatest = parseJSON("{}"); - } // githubLatest has to be a valid JSON object if (githubLatest.type() == JSONType.object){ @@ -392,7 +627,7 @@ JSONValue getLatestReleaseDetails() { latestTag = strip(githubLatest["tag_name"].str, "v"); } else { // set to latestTag zeros - log.vdebug("'tag_name' unavailable in JSON response. Setting GitHub 'tag_name' release version to 0.0.0"); + addLogEntry("'tag_name' unavailable in JSON response. Setting GitHub 'tag_name' release version to 0.0.0", ["debug"]); latestTag = "0.0.0"; } // use the returned published_at date @@ -401,15 +636,15 @@ JSONValue getLatestReleaseDetails() { publishedDate = githubLatest["published_at"].str; } else { // set to v2.0.0 release date - log.vdebug("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z"); + addLogEntry("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z", ["debug"]); publishedDate = "2018-07-18T18:00:00Z"; } } else { // JSONValue is not an object - log.vdebug("Invalid JSON Object. Setting GitHub 'tag_name' release version to 0.0.0"); + addLogEntry("Invalid JSON Object response from GitHub. Setting GitHub 'tag_name' release version to 0.0.0", ["debug"]); latestTag = "0.0.0"; - log.vdebug("Invalid JSON Object. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z"); - publishedDate = "2018-07-18T18:00:00Z"; + addLogEntry("Invalid JSON Object. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z", ["debug"]); + publishedDate = "2018-07-18T18:00:00Z"; } // return the latest github version and published date as our own JSON @@ -432,29 +667,26 @@ JSONValue getCurrentVersionDetails(string thisVersion) { string versionTag = "v" ~ thisVersion; string publishedDate; + // Query GitHub for the release details to match the running version try { - content = get("https://api.github.com/repos/abraunegg/onedrive/releases"); - } catch (CurlException e) { - // curl generated an error - meaning we could not query GitHub - log.vdebug("Unable to query GitHub for release details"); - } - - try { - githubDetails = content.parseJSON(); - } catch (JSONException e) { - // unable to parse the content JSON, set to blank JSON - log.vdebug("Unable to parse GitHub JSON response"); - githubDetails = parseJSON("{}"); - } + content = get("https://api.github.com/repos/abraunegg/onedrive/releases"); + githubDetails = content.parseJSON(); + } catch (CurlException e) { + addLogEntry("CurlException: Unable to query GitHub for release details - " ~ e.msg, ["debug"]); + return parseJSON(`{"Error": "CurlException", "message": "` ~ e.msg ~ `"}`); + } catch (JSONException e) { + addLogEntry("JSONException: Unable to parse GitHub JSON response - " ~ e.msg, ["debug"]); + return parseJSON(`{"Error": "JSONException", "message": "` ~ e.msg ~ `"}`); + } // githubDetails has to be a valid JSON array if (githubDetails.type() == JSONType.array){ foreach (searchResult; githubDetails.array) { // searchResult["tag_name"].str; if (searchResult["tag_name"].str == versionTag) { - log.vdebug("MATCHED version"); - log.vdebug("tag_name: ", searchResult["tag_name"].str); - log.vdebug("published_at: ", searchResult["published_at"].str); + addLogEntry("MATCHED version", ["debug"]); + addLogEntry("tag_name: " ~ searchResult["tag_name"].str, ["debug"]); + addLogEntry("published_at: " ~ searchResult["published_at"].str, ["debug"]); publishedDate = searchResult["published_at"].str; } } @@ -462,13 +694,13 @@ JSONValue getCurrentVersionDetails(string thisVersion) { if (publishedDate.empty) { // empty .. no version match ? // set to v2.0.0 release date - log.vdebug("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z"); + addLogEntry("'published_at' unavailable in JSON response. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z", ["debug"]); publishedDate = "2018-07-18T18:00:00Z"; } } else { // JSONValue is not an Array - log.vdebug("Invalid JSON Array. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z"); - publishedDate = "2018-07-18T18:00:00Z"; + addLogEntry("Invalid JSON Array. Setting GitHub 'published_at' date to 2018-07-18T18:00:00Z", ["debug"]); + publishedDate = "2018-07-18T18:00:00Z"; } // return the latest github version and published date as our own JSON @@ -502,11 +734,11 @@ void checkApplicationVersion() { string applicationVersion = currentVersionArray[0]; // debug output - log.vdebug("applicationVersion: ", applicationVersion); - log.vdebug("latestVersion: ", latestVersion); - log.vdebug("publishedDate: ", publishedDate); - log.vdebug("currentTime: ", currentTime); - log.vdebug("releaseGracePeriod: ", releaseGracePeriod); + addLogEntry("applicationVersion: " ~ applicationVersion, ["debug"]); + addLogEntry("latestVersion: " ~ latestVersion, ["debug"]); + addLogEntry("publishedDate: " ~ to!string(publishedDate), ["debug"]); + addLogEntry("currentTime: " ~ to!string(currentTime), ["debug"]); + addLogEntry("releaseGracePeriod: " ~ to!string(releaseGracePeriod), ["debug"]); // display details if not current // is application version is older than available on GitHub @@ -520,14 +752,14 @@ void checkApplicationVersion() { JSONValue thisVersionDetails = getCurrentVersionDetails(applicationVersion); SysTime thisVersionPublishedDate = SysTime.fromISOExtString(thisVersionDetails["publishedDate"].str).toUTC(); thisVersionPublishedDate.fracSecs = Duration.zero; - log.vdebug("thisVersionPublishedDate: ", thisVersionPublishedDate); + addLogEntry("thisVersionPublishedDate: " ~ to!string(thisVersionPublishedDate), ["debug"]); // the running version grace period is its release date + 1 month SysTime thisVersionReleaseGracePeriod = thisVersionPublishedDate; thisVersionReleaseGracePeriod = thisVersionReleaseGracePeriod.add!"months"(1); - log.vdebug("thisVersionReleaseGracePeriod: ", thisVersionReleaseGracePeriod); + addLogEntry("thisVersionReleaseGracePeriod: " ~ to!string(thisVersionReleaseGracePeriod), ["debug"]); - // is this running version obsolete ? + // Is this running version obsolete ? if (!displayObsolete) { // if releaseGracePeriod > currentTime // display an information warning that there is a new release available @@ -541,69 +773,278 @@ void checkApplicationVersion() { } // display version response - writeln(); + addLogEntry(); if (!displayObsolete) { // display the new version is available message - log.logAndNotify("INFO: A new onedrive client version is available. Please upgrade your client version when possible."); + addLogEntry("INFO: A new onedrive client version is available. Please upgrade your client version when possible.", ["info", "notify"]); } else { // display the obsolete message - log.logAndNotify("WARNING: Your onedrive client version is now obsolete and unsupported. Please upgrade your client version."); + addLogEntry("WARNING: Your onedrive client version is now obsolete and unsupported. Please upgrade your client version.", ["info", "notify"]); } - log.log("Current Application Version: ", applicationVersion); - log.log("Version Available: ", latestVersion); - writeln(); + addLogEntry("Current Application Version: " ~ applicationVersion); + addLogEntry("Version Available: " ~ latestVersion); + addLogEntry(); } } } -// Unit Tests -unittest -{ - assert(multiGlobMatch(".hidden", ".*")); - assert(multiGlobMatch(".hidden", "file|.*")); - assert(!multiGlobMatch("foo.bar", "foo|bar")); - // that should detect invalid file/directory name. - assert(isValidName(".")); - assert(isValidName("./general.file")); - assert(!isValidName("./ leading_white_space")); - assert(!isValidName("./trailing_white_space ")); - assert(!isValidName("./trailing_dot.")); - assert(!isValidName("./includesin the path")); - assert(!isValidName("./includes:in the path")); - assert(!isValidName(`./includes"in the path`)); - assert(!isValidName("./includes|in the path")); - assert(!isValidName("./includes?in the path")); - assert(!isValidName("./includes*in the path")); - assert(!isValidName("./includes / in the path")); - assert(!isValidName(`./includes\ in the path`)); - assert(!isValidName(`./includes\\ in the path`)); - assert(!isValidName(`./includes\\\\ in the path`)); - assert(!isValidName("./includes\\ in the path")); - assert(!isValidName("./includes\\\\ in the path")); - assert(!isValidName("./CON")); - assert(!isValidName("./CON.text")); - assert(!isValidName("./PRN")); - assert(!isValidName("./AUX")); - assert(!isValidName("./NUL")); - assert(!isValidName("./COM0")); - assert(!isValidName("./COM1")); - assert(!isValidName("./COM2")); - assert(!isValidName("./COM3")); - assert(!isValidName("./COM4")); - assert(!isValidName("./COM5")); - assert(!isValidName("./COM6")); - assert(!isValidName("./COM7")); - assert(!isValidName("./COM8")); - assert(!isValidName("./COM9")); - assert(!isValidName("./LPT0")); - assert(!isValidName("./LPT1")); - assert(!isValidName("./LPT2")); - assert(!isValidName("./LPT3")); - assert(!isValidName("./LPT4")); - assert(!isValidName("./LPT5")); - assert(!isValidName("./LPT6")); - assert(!isValidName("./LPT7")); - assert(!isValidName("./LPT8")); - assert(!isValidName("./LPT9")); +bool hasId(JSONValue item) { + return ("id" in item) != null; +} + +bool hasQuota(JSONValue item) { + return ("quota" in item) != null; +} + +bool isItemDeleted(JSONValue item) { + return ("deleted" in item) != null; +} + +bool isItemRoot(JSONValue item) { + return ("root" in item) != null; +} + +bool hasParentReference(const ref JSONValue item) { + return ("parentReference" in item) != null; +} + +bool hasParentReferenceId(JSONValue item) { + return ("id" in item["parentReference"]) != null; } + +bool hasParentReferencePath(JSONValue item) { + return ("path" in item["parentReference"]) != null; +} + +bool isFolderItem(const ref JSONValue item) { + return ("folder" in item) != null; +} + +bool isFileItem(const ref JSONValue item) { + return ("file" in item) != null; +} + +bool isItemRemote(const ref JSONValue item) { + return ("remoteItem" in item) != null; +} + +bool isItemFile(const ref JSONValue item) { + return ("file" in item) != null; +} + +bool isItemFolder(const ref JSONValue item) { + return ("folder" in item) != null; +} + +bool hasFileSize(const ref JSONValue item) { + return ("size" in item) != null; +} + +// Function to determine if the final component of the provided path is a .file or .folder +bool isDotFile(const(string) path) { + // Check for null or empty path + if (path is null || path.length == 0) { + return false; + } + + // Special case for root + if (path == ".") { + return false; + } + + // Extract the last component of the path + auto paths = pathSplitter(buildNormalizedPath(path)); + + // Optimised way to fetch the last component + string lastComponent = paths.empty ? "" : paths.back; + + // Check if the last component starts with a dot + return startsWith(lastComponent, "."); +} + +bool isMalware(const ref JSONValue item) { + return ("malware" in item) != null; +} + +bool hasHashes(const ref JSONValue item) { + return ("hashes" in item["file"]) != null; +} + +bool hasQuickXorHash(const ref JSONValue item) { + return ("quickXorHash" in item["file"]["hashes"]) != null; +} + +bool hasSHA256Hash(const ref JSONValue item) { + return ("sha256Hash" in item["file"]["hashes"]) != null; +} + +bool isMicrosoftOneNoteMimeType1(const ref JSONValue item) { + return (item["file"]["mimeType"].str) == "application/msonenote"; +} + +bool isMicrosoftOneNoteMimeType2(const ref JSONValue item) { + return (item["file"]["mimeType"].str) == "application/octet-stream"; +} + +bool hasUploadURL(const ref JSONValue item) { + return ("uploadUrl" in item) != null; +} + +bool hasNextExpectedRanges(const ref JSONValue item) { + return ("nextExpectedRanges" in item) != null; +} + +bool hasLocalPath(const ref JSONValue item) { + return ("localPath" in item) != null; +} + +bool hasETag(const ref JSONValue item) { + return ("eTag" in item) != null; +} + +bool hasSharedElement(const ref JSONValue item) { + return ("shared" in item) != null; +} + +bool hasName(const ref JSONValue item) { + return ("name" in item) != null; +} + +// Convert bytes to GB +string byteToGibiByte(ulong bytes) { + if (bytes == 0) { + return "0.00"; // or handle the zero case as needed + } + + double gib = bytes / 1073741824.0; // 1024^3 for direct conversion + return format("%.2f", gib); // Format to ensure two decimal places +} + +// Test if entrypoint.sh exists on the root filesystem +bool entrypointExists(string basePath = "/") { + try { + // Build the path to the entrypoint.sh file + string entrypointPath = buildNormalizedPath(buildPath(basePath, "entrypoint.sh")); + + // Check if the path exists and return the result + return exists(entrypointPath); + } catch (Exception e) { + // Handle any exceptions (e.g., permission issues, invalid path) + writeln("An error occurred: ", e.msg); + return false; + } +} + +// Generate a random alphanumeric string with specified length +string generateAlphanumericString(size_t length = 16) { + // Ensure length is not zero + if (length == 0) { + throw new Exception("Length must be greater than 0"); + } + + auto asciiLetters = to!(dchar[])(letters); + auto asciiDigits = to!(dchar[])(digits); + dchar[] randomString; + randomString.length = length; + + // Create a random number generator + auto rndGen = Random(unpredictableSeed); + + // Fill the string with random alphanumeric characters + fill(randomString[], randomCover(chain(asciiLetters, asciiDigits), rndGen)); + + return to!string(randomString); +} + +void displayMemoryUsagePreGC() { + // Display memory usage + writeln(); + writeln("Memory Usage pre GC (KB)"); + writeln("------------------------"); + writeMemoryStats(); + writeln(); +} + +void displayMemoryUsagePostGC() { + // Display memory usage + writeln(); + writeln("Memory Usage post GC (KB)"); + writeln("-------------------------"); + writeMemoryStats(); + writeln(); +} + +void writeMemoryStats() { + // write memory stats + writeln("memory usedSize = ", (GC.stats.usedSize/1024)); + writeln("memory freeSize = ", (GC.stats.freeSize/1024)); + writeln("memory allocatedInCurrentThread = ", (GC.stats.allocatedInCurrentThread/1024)); +} + +// Return the username of the UID running the 'onedrive' process +string getUserName() { + // Retrieve the UID of the current user + auto uid = getuid(); + + // Retrieve password file entry for the user + auto pw = getpwuid(uid); + enforce(pw !is null, "Failed to retrieve user information for UID: " ~ to!string(uid)); + + // Extract username and convert to immutable string + string userName = to!string(fromStringz(pw.pw_name)); + + // Log User identifiers from process + addLogEntry("Process ID: " ~ to!string(pw), ["debug"]); + addLogEntry("User UID: " ~ to!string(pw.pw_uid), ["debug"]); + addLogEntry("User GID: " ~ to!string(pw.pw_gid), ["debug"]); + + // Check if username is valid + if (!userName.empty) { + addLogEntry("User Name: " ~ userName, ["debug"]); + return userName; + } else { + // Log and return unknown user + addLogEntry("User Name: unknown", ["debug"]); + return "unknown"; + } +} + +// Calculate the ETA for when a 'large file' will be completed (upload & download operations) +int calc_eta(size_t counter, size_t iterations, ulong start_time) { + if (counter == 0) { + return 0; // Avoid division by zero + } + + double ratio = cast(double) counter / iterations; + auto current_time = Clock.currTime.toUnixTime(); + ulong duration = (current_time - start_time); + + // Segments left to download + auto segments_remaining = (iterations > counter) ? (iterations - counter) : 0; + + // Calculate the average time per iteration so far + double avg_time_per_iteration = cast(double) duration / counter; + + // Debug output for the ETA calculation + addLogEntry("counter: " ~ to!string(counter), ["debug"]); + addLogEntry("iterations: " ~ to!string(iterations), ["debug"]); + addLogEntry("segments_remaining: " ~ to!string(segments_remaining), ["debug"]); + addLogEntry("ratio: " ~ format("%.2f", ratio), ["debug"]); + addLogEntry("start_time: " ~ to!string(start_time), ["debug"]); + addLogEntry("current_time: " ~ to!string(current_time), ["debug"]); + addLogEntry("duration: " ~ to!string(duration), ["debug"]); + addLogEntry("avg_time_per_iteration: " ~ format("%.2f", avg_time_per_iteration), ["debug"]); + + // Return the ETA or duration + if (counter != iterations) { + auto eta_sec = avg_time_per_iteration * segments_remaining; + // ETA Debug + addLogEntry("eta_sec: " ~ to!string(eta_sec), ["debug"]); + addLogEntry("estimated_total_time: " ~ to!string(avg_time_per_iteration * iterations), ["debug"]); + // Return ETA + return eta_sec > 0 ? cast(int) ceil(eta_sec) : 0; + } else { + // Return the average time per iteration for the last iteration + return cast(int) ceil(avg_time_per_iteration); + } +} \ No newline at end of file From 58598f8076b4f0bb4b939a2d1ffea68938bee1e0 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Tue, 9 Jan 2024 09:23:48 +1100 Subject: [PATCH 002/305] Delete documents again as POSIX rename failure * Delete documents again as POSIX rename failure --- docs/Docker.md | 397 ---------- docs/INSTALL.md | 282 -------- docs/Podman.md | 361 ---------- docs/USAGE.md | 943 ------------------------ docs/advanced-usage.md | 302 -------- docs/application-config-options.md | 1075 ---------------------------- docs/application-security.md | 97 --- docs/build-rpm-howto.md | 379 ---------- docs/business-shared-folders.md | 40 -- docs/known-issues.md | 60 -- docs/national-cloud-deployments.md | 145 ---- docs/privacy-policy.md | 65 -- docs/sharepoint-libraries.md | 228 ------ docs/terms-of-service.md | 54 -- docs/ubuntu-package-install.md | 420 ----------- 15 files changed, 4848 deletions(-) delete mode 100644 docs/Docker.md delete mode 100644 docs/INSTALL.md delete mode 100644 docs/Podman.md delete mode 100644 docs/USAGE.md delete mode 100644 docs/advanced-usage.md delete mode 100644 docs/application-config-options.md delete mode 100644 docs/application-security.md delete mode 100644 docs/build-rpm-howto.md delete mode 100644 docs/business-shared-folders.md delete mode 100644 docs/known-issues.md delete mode 100644 docs/national-cloud-deployments.md delete mode 100644 docs/privacy-policy.md delete mode 100644 docs/sharepoint-libraries.md delete mode 100644 docs/terms-of-service.md delete mode 100644 docs/ubuntu-package-install.md diff --git a/docs/Docker.md b/docs/Docker.md deleted file mode 100644 index 1bf6251ff..000000000 --- a/docs/Docker.md +++ /dev/null @@ -1,397 +0,0 @@ -# Run the OneDrive Client for Linux under Docker -This client can be run as a Docker container, with 3 available container base options for you to choose from: - -| Container Base | Docker Tag | Description | i686 | x86_64 | ARMHF | AARCH64 | -|----------------|-------------|----------------------------------------------------------------|:------:|:------:|:-----:|:-------:| -| Alpine Linux | edge-alpine | Docker container based on Alpine 3.18 using 'master' |❌|✔|❌|✔| -| Alpine Linux | alpine | Docker container based on Alpine 3.18 using latest release |❌|✔|❌|✔| -| Debian | debian | Docker container based on Debian Stable using latest release |✔|✔|✔|✔| -| Debian | edge | Docker container based on Debian Stable using 'master' |✔|✔|✔|✔| -| Debian | edge-debian | Docker container based on Debian Stable using 'master' |✔|✔|✔|✔| -| Debian | latest | Docker container based on Debian Stable using latest release |✔|✔|✔|✔| -| Fedora | edge-fedora | Docker container based on Fedora 38 using 'master' |❌|✔|❌|✔| -| Fedora | fedora | Docker container based on Fedora 38 using latest release |❌|✔|❌|✔| - -These containers offer a simple monitoring-mode service for the OneDrive Client for Linux. - -The instructions below have been validated on: -* Fedora 38 - -The instructions below will utilise the 'edge' tag, however this can be substituted for any of the other docker tags such as 'latest' from the table above if desired. - -The 'edge' Docker Container will align closer to all documentation and features, where as 'latest' is the release version from a static point in time. The 'latest' tag however may contain bugs and/or issues that will have been fixed, and those fixes are contained in 'edge'. - -Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in. - -**Note:** The below instructions for docker has been tested and validated when logging into the system as an unprivileged user (non 'root' user). - -## High Level Configuration Steps -1. Install 'docker' as per your distribution platform's instructions if not already installed. -2. Configure 'docker' to allow non-privileged users to run Docker commands -3. Disable 'SELinux' as per your distribution platform's instructions -4. Test 'docker' by running a test container without using `sudo` -5. Prepare the required docker volumes to store the configuration and data -6. Run the 'onedrive' container and perform authorisation -7. Running the 'onedrive' container under 'docker' - -## Configuration Steps - -### 1. Install 'docker' on your platform -Install 'docker' as per your distribution platform's instructions if not already installed. - -### 2. Configure 'docker' to allow non-privileged users to run Docker commands -Read https://docs.docker.com/engine/install/linux-postinstall/ to configure the 'docker' user group with your user account to allow your non 'root' user to run 'docker' commands. - -### 3. Disable SELinux on your platform -In order to run the Docker container, SELinux must be disabled. Without doing this, when the application is authenticated in the steps below, the following error will be presented: -```text -ERROR: The local file system returned an error with the following message: - Error Message: /onedrive/conf/refresh_token: Permission denied - -The database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3 -``` -The only known work-around for the above problem at present is to disable SELinux. Please refer to your distribution platform's instructions on how to perform this step. - -* Fedora: https://docs.fedoraproject.org/en-US/quick-docs/selinux-changing-states-and-modes/#_disabling_selinux -* Red Hat Enterprise Linux: https://access.redhat.com/solutions/3176 - -Post disabling SELinux and reboot your system, confirm that `getenforce` returns `Disabled`: -```text -$ getenforce -Disabled -``` - -If you are still experiencing permission issues despite disabling SELinux, please read https://www.redhat.com/sysadmin/container-permission-denied-errors - -### 4. Test 'docker' on your platform -Ensure that 'docker' is running as a system service, and is enabled to be activated on system reboot: -```bash -sudo systemctl enable --now docker -``` - -Test that 'docker' is operational for your 'non-root' user, as per below: -```bash -[alex@fedora-38-docker-host ~]$ docker run hello-world -Unable to find image 'hello-world:latest' locally -latest: Pulling from library/hello-world -719385e32844: Pull complete -Digest: sha256:88ec0acaa3ec199d3b7eaf73588f4518c25f9d34f58ce9a0df68429c5af48e8d -Status: Downloaded newer image for hello-world:latest - -Hello from Docker! -This message shows that your installation appears to be working correctly. - -To generate this message, Docker took the following steps: - 1. The Docker client contacted the Docker daemon. - 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. - (amd64) - 3. The Docker daemon created a new container from that image which runs the - executable that produces the output you are currently reading. - 4. The Docker daemon streamed that output to the Docker client, which sent it - to your terminal. - -To try something more ambitious, you can run an Ubuntu container with: - $ docker run -it ubuntu bash - -Share images, automate workflows, and more with a free Docker ID: - https://hub.docker.com/ - -For more examples and ideas, visit: - https://docs.docker.com/get-started/ - -[alex@fedora-38-docker-host ~]$ -``` - -### 5. Configure the required docker volumes -The 'onedrive' Docker container requires 2 docker volumes to operate: -* Config Volume -* Data Volume - -The first volume is the configuration volume that stores all the applicable application configuration + current runtime state. In a non-containerised environment, this normally resides in `~/.config/onedrive` - in a containerised environment this is stored in the volume tagged as `/onedrive/conf` - -The second volume is the data volume, where all your data from Microsoft OneDrive is stored locally. This volume is mapped to an actual directory point on your local filesystem and this is stored in the volume tagged as `/onedrive/data` - -#### 5.1 Prepare the 'config' volume -Create the 'config' volume with the following command: -```bash -docker volume create onedrive_conf -``` - -This will create a docker volume labeled `onedrive_conf`, where all configuration of your onedrive account will be stored. You can add a custom config file in this location at a later point in time if required. - -#### 5.2 Prepare the 'data' volume -Create the 'data' volume with the following command: -```bash -docker volume create onedrive_data -``` - -This will create a docker volume labeled `onedrive_data` and will map to a path on your local filesystem. This is where your data from Microsoft OneDrive will be stored. Keep in mind that: - -* The owner of this specified folder must not be root -* The owner of this specified folder must have permissions for its parent directory -* Docker will attempt to change the permissions of the volume to the user the container is configured to run as - -**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Docker container will fail to start with the following error message: -```bash -ROOT level privileges prohibited! -``` - -### 6. First run of Docker container under docker and performing authorisation -The 'onedrive' client within the container first needs to be authorised with your Microsoft account. This is achieved by initially running docker in interactive mode. - -Run the docker image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`). - -**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the docker container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the docker volume mapping to occur. - -It is also a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`). The script below will use `id` to evaluate your system environment to use the correct values. -```bash -export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" -export ONEDRIVE_UID=`id -u` -export ONEDRIVE_GID=`id -g` -mkdir -p ${ONEDRIVE_DATA_DIR} -docker run -it --name onedrive -v onedrive_conf:/onedrive/conf \ - -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" \ - -e "ONEDRIVE_UID=${ONEDRIVE_UID}" \ - -e "ONEDRIVE_GID=${ONEDRIVE_GID}" \ - driveone/onedrive:edge -``` - -When the Docker container successfully starts: -* You will be asked to open a specific link using your web browser -* Login to your Microsoft Account and give the application the permission -* After giving the permission, you will be redirected to a blank page -* Copy the URI of the blank page into the application prompt to authorise the application - -Once the 'onedrive' application is authorised, the client will automatically start monitoring your `ONEDRIVE_DATA_DIR` for data changes to be uploaded to OneDrive. Files stored on OneDrive will be downloaded to this location. - -If the client is working as expected, you can detach from the container with Ctrl+p, Ctrl+q. - -### 7. Running the 'onedrive' container under 'docker' - -#### 7.1 Check if the monitor service is running -```bash -docker ps -f name=onedrive -``` - -#### 7.2 Show 'onedrive' runtime logs -```bash -docker logs onedrive -``` - -#### 7.3 Stop running 'onedrive' container -```bash -docker stop onedrive -``` - -#### 7.4 Start 'onedrive' container -```bash -docker start onedrive -``` - -#### 7.5 Remove 'onedrive' container -```bash -docker rm -f onedrive -``` - -## Advanced Usage - -### How to use Docker-compose -You can utilise `docker-compose` if available on your platform if you are able to use docker compose schemas > 3. - -In the following example it is assumed you have a `ONEDRIVE_DATA_DIR` environment variable and have already created the `onedrive_conf` volume. - -You can also use docker bind mounts for the configuration folder, e.g. `export ONEDRIVE_CONF="${HOME}/OneDriveConfig"`. - -``` -version: "3" -services: - onedrive: - image: driveone/onedrive:edge - restart: unless-stopped - environment: - - ONEDRIVE_UID=${PUID} - - ONEDRIVE_GID=${PGID} - volumes: - - onedrive_conf:/onedrive/conf - - ${ONEDRIVE_DATA_DIR}:/onedrive/data -``` - -Note that you still have to perform step 3: First Run. - -### Editing the running configuration and using a 'config' file -The 'onedrive' client should run in default configuration, however you can change this default configuration by placing a custom config file in the `onedrive_conf` docker volume. First download the default config from [here](https://raw.githubusercontent.com/abraunegg/onedrive/master/config) -Then put it into your onedrive_conf volume path, which can be found with: - -```bash -docker volume inspect onedrive_conf -``` - -Or you can map your own config folder to the config volume. Make sure to copy all files from the docker volume into your mapped folder first. - -The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) - -### Syncing multiple accounts -There are many ways to do this, the easiest is probably to do the following: -1. Create a second docker config volume (replace `Work` with your desired name): `docker volume create onedrive_conf_Work` -2. And start a second docker monitor container (again replace `Work` with your desired name): -``` -export ONEDRIVE_DATA_DIR_WORK="/home/abraunegg/OneDriveWork" -mkdir -p ${ONEDRIVE_DATA_DIR_WORK} -docker run -it --restart unless-stopped --name onedrive_Work -v onedrive_conf_Work:/onedrive/conf -v "${ONEDRIVE_DATA_DIR_WORK}:/onedrive/data" driveone/onedrive:edge -``` - -### Run or update the Docker container with one script -If you are experienced with docker and onedrive, you can use the following script: - -```bash -# Update ONEDRIVE_DATA_DIR with correct OneDrive directory path -ONEDRIVE_DATA_DIR="${HOME}/OneDrive" -# Create directory if non-existant -mkdir -p ${ONEDRIVE_DATA_DIR} - -firstRun='-d' -docker pull driveone/onedrive:edge -docker inspect onedrive_conf > /dev/null 2>&1 || { docker volume create onedrive_conf; firstRun='-it'; } -docker inspect onedrive > /dev/null 2>&1 && docker rm -f onedrive -docker run $firstRun --restart unless-stopped --name onedrive -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge -``` - -## Supported Docker Environment Variables -| Variable | Purpose | Sample Value | -| ---------------- | --------------------------------------------------- |:--------------------------------------------------------------------------------------------------------------------------------:| -| ONEDRIVE_UID | UserID (UID) to run as | 1000 | -| ONEDRIVE_GID | GroupID (GID) to run as | 1000 | -| ONEDRIVE_VERBOSE | Controls "--verbose" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_DEBUG | Controls "--verbose --verbose" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_DEBUG_HTTPS | Controls "--debug-https" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_RESYNC | Controls "--resync" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_DOWNLOADONLY | Controls "--download-only" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_UPLOADONLY | Controls "--upload-only" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_NOREMOTEDELETE | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | -| ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | -| ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | -| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) | -| ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | -| ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | - -### Environment Variables Usage Examples -**Verbose Output:** -```bash -docker container run -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge -``` -**Debug Output:** -```bash -docker container run -e ONEDRIVE_DEBUG=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge -``` -**Perform a --resync:** -```bash -docker container run -e ONEDRIVE_RESYNC=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge -``` -**Perform a --resync and --verbose:** -```bash -docker container run -e ONEDRIVE_RESYNC=1 -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge -``` -**Perform a --logout and re-authenticate:** -```bash -docker container run -it -e ONEDRIVE_LOGOUT=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge -``` - -## Building a custom Docker image - -### Build Environment Requirements -* Build environment must have at least 1GB of memory & 2GB swap space - -You can validate your build environment memory status with the following command: -```text -cat /proc/meminfo | grep -E 'MemFree|Swap' -``` -This should result in the following similar output: -```text -MemFree: 3704644 kB -SwapCached: 0 kB -SwapTotal: 8117244 kB -SwapFree: 8117244 kB -``` - -If you do not have enough swap space, you can use the following script to dynamically allocate a swapfile for building the Docker container: - -```bash -cd /var -sudo fallocate -l 1.5G swapfile -sudo chmod 600 swapfile -sudo mkswap swapfile -sudo swapon swapfile -# make swap permanent -sudo nano /etc/fstab -# add "/swapfile swap swap defaults 0 0" at the end of file -# check it has been assigned -swapon -s -free -h -``` - -If you are running a Raspberry Pi, you will need to edit your system configuration to increase your swapfile: - -* Modify the file `/etc/dphys-swapfile` and edit the `CONF_SWAPSIZE`, for example: `CONF_SWAPSIZE=2048`. - -A reboot of your Raspberry Pi is required to make this change effective. - -### Building and running a custom Docker image -You can also build your own image instead of pulling the one from [hub.docker.com](https://hub.docker.com/r/driveone/onedrive): -```bash -git clone https://github.com/abraunegg/onedrive -cd onedrive -docker build . -t local-onedrive -f contrib/docker/Dockerfile -docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive:latest -``` - -There are alternate, smaller images available by using `Dockerfile-debian` or `Dockerfile-alpine`. These [multi-stage builder pattern](https://docs.docker.com/develop/develop-images/multistage-build/) Dockerfiles require Docker version at least 17.05. - -### How to build and run a custom Docker image based on Debian -``` bash -docker build . -t local-ondrive-debian -f contrib/docker/Dockerfile-debian -docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-ondrive-debian:latest -``` - -### How to build and run a custom Docker image based on Alpine Linux -``` bash -docker build . -t local-ondrive-alpine -f contrib/docker/Dockerfile-alpine -docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-ondrive-alpine:latest -``` - -### How to build and run a custom Docker image for ARMHF (Raspberry Pi) -Compatible with: -* Raspberry Pi -* Raspberry Pi 2 -* Raspberry Pi Zero -* Raspberry Pi 3 -* Raspberry Pi 4 -``` bash -docker build . -t local-onedrive-armhf -f contrib/docker/Dockerfile-debian -docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive-armhf:latest -``` - -### How to build and run a custom Docker image for AARCH64 Platforms -``` bash -docker build . -t local-onedrive-aarch64 -f contrib/docker/Dockerfile-debian -docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive-aarch64:latest -``` -### How to support double-byte languages -In some geographic regions, you may need to change and/or update the locale specification of the Docker container to better support the local language used for your local filesystem. To do this, follow the example below: -``` -FROM driveone/onedrive - -ENV DEBIAN_FRONTEND noninteractive - -RUN apt-get update -RUN apt-get install -y locales - -RUN echo "ja_JP.UTF-8 UTF-8" > /etc/locale.gen && \ - locale-gen ja_JP.UTF-8 && \ - dpkg-reconfigure locales && \ - /usr/sbin/update-locale LANG=ja_JP.UTF-8 - -ENV LC_ALL ja_JP.UTF-8 -``` -The above example changes the Docker container to support Japanese. To support your local language, change `ja_JP.UTF-8` to the required entry. \ No newline at end of file diff --git a/docs/INSTALL.md b/docs/INSTALL.md deleted file mode 100644 index f5338122d..000000000 --- a/docs/INSTALL.md +++ /dev/null @@ -1,282 +0,0 @@ -# Installing or Upgrading using Distribution Packages or Building the OneDrive Client for Linux from source - -## Installing or Upgrading using Distribution Packages -This project has been packaged for the following Linux distributions as per below. The current client release is: [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) - -Only the current release version or greater is supported. Earlier versions are not supported and should not be installed or used. - -#### Important Note: -Distribution packages may be of an older release when compared to the latest release that is [available](https://github.com/abraunegg/onedrive/releases). If any package version indicator below is 'red' for your distribution, it is recommended that you build from source. Do not install the software from the available distribution package. If a package is out of date, please contact the package maintainer for resolution. - -| Distribution | Package Name & Package Link |   PKG_Version   |  i686  | x86_64 | ARMHF | AARCH64 | Extra Details | -|---------------------------------|------------------------------------------------------------------------------|:---------------:|:----:|:------:|:-----:|:-------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Alpine Linux | [onedrive](https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge) |Alpine Linux Edge package|❌|✔|❌|✔ | | -| Arch Linux

Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |AUR package|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)

**Note:** You must first install 'base-devel' as this is a pre-requisite for using the AUR

**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'

**Note:** System must have at least 1GB of memory & 1GB swap space -| Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |Debian 11 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | -| Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |Debian 12 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | -| Debian Sid | [onedrive](https://packages.debian.org/sid/onedrive) |Debian Sid package|✔|✔|✔|✔| | -| Fedora | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |Fedora Rawhide package|✔|✔|✔|✔| | -| Gentoo | [onedrive](https://gpo.zugaina.org/net-misc/onedrive) | No API Available |✔|✔|❌|❌| | -| Homebrew | [onedrive](https://formulae.brew.sh/formula/onedrive) | Homebrew package |❌|✔|❌|❌| | -| Linux Mint 20.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories

It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Linux Mint 21.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories

It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| NixOS | [onedrive](https://search.nixos.org/packages?channel=20.09&from=0&size=50&sort=relevance&query=onedrive)|nixpkgs unstable package|❌|✔|❌|❌| Use package `onedrive` either by adding it to `configuration.nix` or by using the command `nix-env -iA .onedrive`. This does not install a service. To install a service, use unstable channel (will stabilize in 20.09) and add `services.onedrive.enable=true` in `configuration.nix`. You can also add a custom package using the `services.onedrive.package` option (recommended since package lags upstream). Enabling the service installs a default package too (based on the channel). You can also add multiple onedrive accounts trivially, see [documentation](https://github.com/NixOS/nixpkgs/pull/77734#issuecomment-575874225). | -| OpenSuSE | [onedrive](https://software.opensuse.org/package/onedrive) |openSUSE Tumbleweed package|✔|✔|❌|❌| | -| OpenSuSE Build Service | [onedrive](https://build.opensuse.org/package/show/home:npreining:debian-ubuntu-onedrive/onedrive) | No API Available |✔|✔|✔|✔| Package Build Service for Debian and Ubuntu | -| Raspbian | [onedrive](https://archive.raspbian.org/raspbian/pool/main/o/onedrive/) |Raspbian Stable package |❌|❌|✔|✔| **Note:** Do not install from Raspbian Package Repositories

It is recommended that for Raspbian that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | -| Slackware | [onedrive](https://slackbuilds.org/result/?search=onedrive&sv=) |SlackBuilds package|✔|✔|❌|❌| | -| Solus | [onedrive](https://dev.getsol.us/search/query/FB7PIf1jG9Z9/#R) |Solus package|✔|✔|❌|❌| | -| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |Ubuntu 23.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | -| Void Linux | [onedrive](https://voidlinux.org/packages/?arch=x86_64&q=onedrive) |Void Linux x86_64 package|✔|✔|❌|❌| | - -#### Important information for all Ubuntu and Ubuntu based distribution users: -This information is specifically for the following platforms and distributions: -* Ubuntu -* Lubuntu -* Linux Mint -* POP OS -* Peppermint OS - -Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all§ion=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Universe packages are out-of-date and are not supported and should not be used. If you wish to use a package, it is highly recommended that you utilise the [OpenSuSE Build Service](ubuntu-package-install.md) to install packages for these platforms. If the OpenSuSE Build Service does not cater for your version, your only option is to build from source. - -If you wish to change this situation so that you can just use the Universe packages via 'apt install onedrive', consider becoming the Ubuntu package maintainer and contribute back to your community. - -## Building from Source - High Level Requirements -* Build environment must have at least 1GB of memory & 1GB swap space -* Install the required distribution package dependencies -* [libcurl](http://curl.haxx.se/libcurl/) -* [SQLite 3](https://www.sqlite.org/) >= 3.7.15 -* [Digital Mars D Compiler (DMD)](http://dlang.org/download.html) or [LDC – the LLVM-based D Compiler](https://github.com/ldc-developers/ldc) - -**Note:** DMD version >= 2.088.0 or LDC version >= 1.18.0 is required to compile this application - -### Example for installing DMD Compiler -```text -curl -fsS https://dlang.org/install.sh | bash -s dmd -``` - -### Example for installing LDC Compiler -```text -curl -fsS https://dlang.org/install.sh | bash -s ldc -``` - -## Distribution Package Dependencies -### Dependencies: Ubuntu 16.x -Ubuntu Linux 16.x LTS reached the end of its five-year LTS window on April 30th 2021 and is no longer supported. - -### Dependencies: Ubuntu 18.x / Lubuntu 18.x -Ubuntu Linux 18.x LTS reached the end of its five-year LTS window on May 31th 2023 and is no longer supported. - -### Dependencies: Debian 9 -Debian 9 reached the end of its five-year support window on June 30th 2022 and is no longer supported. - -### Dependencies: Ubuntu 20.x -> Ubuntu 23.x / Debian 10 -> Debian 12 - x86_64 -These dependencies are also applicable for all Ubuntu based distributions such as: -* Lubuntu -* Linux Mint -* POP OS -* Peppermint OS -```text -sudo apt install build-essential -sudo apt install libcurl4-openssl-dev libsqlite3-dev pkg-config git curl -curl -fsS https://dlang.org/install.sh | bash -s dmd -``` -For notifications the following is also necessary: -```text -sudo apt install libnotify-dev -``` - -### Dependencies: CentOS 6.x / RHEL 6.x -CentOS 6.x and RHEL 6.x reached End of Life status on November 30th 2020 and is no longer supported. - -### Dependencies: Fedora < Version 18 / CentOS 7.x / RHEL 7.x -```text -sudo yum groupinstall 'Development Tools' -sudo yum install libcurl-devel sqlite-devel -curl -fsS https://dlang.org/install.sh | bash -s dmd-2.099.0 -``` -For notifications the following is also necessary: -```text -sudo yum install libnotify-devel -``` - -### Dependencies: Fedora > Version 18 / CentOS 8.x / RHEL 8.x / RHEL 9.x -```text -sudo dnf groupinstall 'Development Tools' -sudo dnf install libcurl-devel sqlite-devel -curl -fsS https://dlang.org/install.sh | bash -s dmd -``` -For notifications the following is also necessary: -```text -sudo dnf install libnotify-devel -``` - -### Dependencies: Arch Linux & Manjaro Linux -```text -sudo pacman -S make pkg-config curl sqlite ldc -``` -For notifications the following is also necessary: -```text -sudo pacman -S libnotify -``` - -### Dependencies: Raspbian (ARMHF) and Ubuntu 22.x / Debian 11 / Debian 12 / Raspbian (ARM64) -**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later. - -These instructions were validated using: -* `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-armhf-lite) using Raspberry Pi 3B (revision 1.2) -* `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-arm64-lite) using Raspberry Pi 3B (revision 1.2) -* `Linux ubuntu 5.15.0-1005-raspi #5-Ubuntu SMP PREEMPT Mon Apr 4 12:21:48 UTC 2022 aarch64 aarch64 aarch64 GNU/Linux` (ubuntu-22.04-preinstalled-server-arm64+raspi) using Raspberry Pi 3B (revision 1.2) - -**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`. - -```text -sudo apt install build-essential -sudo apt install libcurl4-openssl-dev libsqlite3-dev pkg-config git curl ldc -``` -For notifications the following is also necessary: -```text -sudo apt install libnotify-dev -``` - -### Dependencies: Gentoo -```text -sudo emerge app-portage/layman -sudo layman -a dlang -``` -Add ebuild from contrib/gentoo to a local overlay to use. - -For notifications the following is also necessary: -```text -sudo emerge x11-libs/libnotify -``` - -### Dependencies: OpenSuSE Leap 15.0 -```text -sudo zypper addrepo https://download.opensuse.org/repositories/devel:languages:D/openSUSE_Leap_15.0/devel:languages:D.repo -sudo zypper refresh -sudo zypper install gcc git libcurl-devel sqlite3-devel dmd phobos-devel phobos-devel-static -``` -For notifications the following is also necessary: -```text -sudo zypper install libnotify-devel -``` - -### Dependencies: OpenSuSE Leap 15.1 -```text -sudo zypper addrepo https://download.opensuse.org/repositories/devel:languages:D/openSUSE_Leap_15.1/devel:languages:D.repo -sudo zypper refresh -sudo zypper install gcc git libcurl-devel sqlite3-devel dmd phobos-devel phobos-devel-static -``` -For notifications the following is also necessary: -```text -sudo zypper install libnotify-devel -``` - -### Dependencies: OpenSuSE Leap 15.2 -```text -sudo zypper refresh -sudo zypper install gcc git libcurl-devel sqlite3-devel dmd phobos-devel phobos-devel-static -``` -For notifications the following is also necessary: -```text -sudo zypper install libnotify-devel -``` - -## Compilation & Installation -### High Level Steps -1. Install the platform dependencies for your Linux OS -2. Activate your DMD or LDC compiler -3. Clone the GitHub repository, run configure and make, then install -4. Deactivate your DMD or LDC compiler - -### Building using DMD Reference Compiler -Before cloning and compiling, if you have installed DMD via curl for your OS, you will need to activate DMD as per example below: -```text -Run `source ~/dlang/dmd-2.088.0/activate` in your shell to use dmd-2.088.0. -This will setup PATH, LIBRARY_PATH, LD_LIBRARY_PATH, DMD, DC, and PS1. -Run `deactivate` later on to restore your environment. -``` -Without performing this step, the compilation process will fail. - -**Note:** Depending on your DMD version, substitute `2.088.0` above with your DMD version that is installed. - -```text -git clone https://github.com/abraunegg/onedrive.git -cd onedrive -./configure -make clean; make; -sudo make install -``` - -### Build options -#### GUI Notification Support -GUI notification support can be enabled using the `configure` switch `--enable-notifications`. - -#### systemd service directory customisation support -Systemd service files are installed in the appropriate directories on the system, -as provided by `pkg-config systemd` settings. If the need for overriding the -deduced path are necessary, the two options `--with-systemdsystemunitdir` (for -the Systemd system unit location), and `--with-systemduserunitdir` (for the -Systemd user unit location) can be specified. Passing in `no` to one of these -options disabled service file installation. - -#### Additional Compiler Debug -By passing `--enable-debug` to the `configure` call, `onedrive` gets built with additional debug -information, useful (for example) to get `perf`-issued figures. - -#### Shell Completion Support -By passing `--enable-completions` to the `configure` call, shell completion functions are -installed for `bash`, `zsh` and `fish`. The installation directories are determined -as far as possible automatically, but can be overridden by passing -`--with-bash-completion-dir=`, `--with-zsh-completion-dir=`, and -`--with-fish-completion-dir=` to `configure`. - -### Building using a different compiler (for example [LDC](https://wiki.dlang.org/LDC)) -#### ARMHF Architecture (Raspbian) and ARM64 Architecture (Ubuntu 22.x / Debian 11 / Raspbian) -**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later. - -**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`. -```text -git clone https://github.com/abraunegg/onedrive.git -cd onedrive -./configure DC=/usr/bin/ldmd2 -make clean; make -sudo make install -``` - -## Upgrading the client -If you have installed the client from a distribution package, the client will be updated when the distribution package is updated by the package maintainer and will be updated to the new application version when you perform your package update. - -If you have built the client from source, to upgrade your client, it is recommended that you first uninstall your existing 'onedrive' binary (see below), then re-install the client by re-cloning, re-compiling and re-installing the client again to install the new version. - -**Note:** Following the uninstall process will remove all client components including *all* systemd files, including any custom files created for specific access such as SharePoint Libraries. - -You can optionally choose to not perform this uninstallation step, and simply re-install the client by re-cloning, re-compiling and re-installing the client again - however the risk here is that you end up with two onedrive client binaries on your system, and depending on your system search path preferences, this will determine which binary is used. - -**Important:** Before performing any upgrade, it is highly recommended for you to stop any running systemd service if applicable to ensure that these services are restarted using the updated client version. - -Post re-install, to confirm that you have the new version of the client installed, use `onedrive --version` to determine the client version that is now installed. - -## Uninstalling the client -### Uninstalling the client if installed from distribution package -Follow your distribution documentation to uninstall the package that you installed - -### Uninstalling the client if installed and built from source -From within your GitHub repository clone, perform the following to remove the 'onedrive' binary: -```text -sudo make uninstall -``` - -If you are not upgrading your client, to remove your application state and configuration, perform the following additional step: -``` -rm -rf ~/.config/onedrive -``` -**Note:** If you are using the `--confdir option`, substitute `~/.config/onedrive` for the correct directory storing your client configuration. - -If you want to just delete the application key, but keep the items database: -```text -rm -f ~/.config/onedrive/refresh_token -``` diff --git a/docs/Podman.md b/docs/Podman.md deleted file mode 100644 index 4f3474f34..000000000 --- a/docs/Podman.md +++ /dev/null @@ -1,361 +0,0 @@ -# Run the OneDrive Client for Linux under Podman -This client can be run as a Podman container, with 3 available container base options for you to choose from: - -| Container Base | Docker Tag | Description | i686 | x86_64 | ARMHF | AARCH64 | -|----------------|-------------|----------------------------------------------------------------|:------:|:------:|:-----:|:-------:| -| Alpine Linux | edge-alpine | Podman container based on Alpine 3.18 using 'master' |❌|✔|❌|✔| -| Alpine Linux | alpine | Podman container based on Alpine 3.18 using latest release |❌|✔|❌|✔| -| Debian | debian | Podman container based on Debian Stable using latest release |✔|✔|✔|✔| -| Debian | edge | Podman container based on Debian Stable using 'master' |✔|✔|✔|✔| -| Debian | edge-debian | Podman container based on Debian Stable using 'master' |✔|✔|✔|✔| -| Debian | latest | Podman container based on Debian Stable using latest release |✔|✔|✔|✔| -| Fedora | edge-fedora | Podman container based on Fedora 38 using 'master' |❌|✔|❌|✔| -| Fedora | fedora | Podman container based on Fedora 38 using latest release |❌|✔|❌|✔| - -These containers offer a simple monitoring-mode service for the OneDrive Client for Linux. - -The instructions below have been validated on: -* Fedora 38 - -The instructions below will utilise the 'edge' tag, however this can be substituted for any of the other docker tags such as 'latest' from the table above if desired. - -The 'edge' Docker Container will align closer to all documentation and features, where as 'latest' is the release version from a static point in time. The 'latest' tag however may contain bugs and/or issues that will have been fixed, and those fixes are contained in 'edge'. - -Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in. - -**Note:** The below instructions for podman has been tested and validated when logging into the system as an unprivileged user (non 'root' user). - -## High Level Configuration Steps -1. Install 'podman' as per your distribution platform's instructions if not already installed. -2. Disable 'SELinux' as per your distribution platform's instructions -3. Test 'podman' by running a test container -4. Prepare the required podman volumes to store the configuration and data -5. Run the 'onedrive' container and perform authorisation -6. Running the 'onedrive' container under 'podman' - -## Configuration Steps - -### 1. Install 'podman' on your platform -Install 'podman' as per your distribution platform's instructions if not already installed. - -### 2. Disable SELinux on your platform -In order to run the Docker container under 'podman', SELinux must be disabled. Without doing this, when the application is authenticated in the steps below, the following error will be presented: -```text -ERROR: The local file system returned an error with the following message: - Error Message: /onedrive/conf/refresh_token: Permission denied - -The database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3 -``` -The only known work-around for the above problem at present is to disable SELinux. Please refer to your distribution platform's instructions on how to perform this step. - -* Fedora: https://docs.fedoraproject.org/en-US/quick-docs/selinux-changing-states-and-modes/#_disabling_selinux -* Red Hat Enterprise Linux: https://access.redhat.com/solutions/3176 - -Post disabling SELinux and reboot your system, confirm that `getenforce` returns `Disabled`: -```text -$ getenforce -Disabled -``` - -If you are still experiencing permission issues despite disabling SELinux, please read https://www.redhat.com/sysadmin/container-permission-denied-errors - -### 3. Test 'podman' on your platform -Test that 'podman' is operational for your 'non-root' user, as per below: -```bash -[alex@fedora38-podman ~]$ podman pull fedora -Resolved "fedora" as an alias (/etc/containers/registries.conf.d/000-shortnames.conf) -Trying to pull registry.fedoraproject.org/fedora:latest... -Getting image source signatures -Copying blob b30887322388 done | -Copying config a1cd3cbf8a done | -Writing manifest to image destination -a1cd3cbf8adaa422629f2fcdc629fd9297138910a467b11c66e5ddb2c2753dff -[alex@fedora38-podman ~]$ podman run fedora /bin/echo "Welcome to the Podman World" -Welcome to the Podman World -[alex@fedora38-podman ~]$ -``` - -### 4. Configure the required podman volumes -The 'onedrive' Docker container requires 2 podman volumes to operate: -* Config Volume -* Data Volume - -The first volume is the configuration volume that stores all the applicable application configuration + current runtime state. In a non-containerised environment, this normally resides in `~/.config/onedrive` - in a containerised environment this is stored in the volume tagged as `/onedrive/conf` - -The second volume is the data volume, where all your data from Microsoft OneDrive is stored locally. This volume is mapped to an actual directory point on your local filesystem and this is stored in the volume tagged as `/onedrive/data` - -#### 4.1 Prepare the 'config' volume -Create the 'config' volume with the following command: -```bash -podman volume create onedrive_conf -``` - -This will create a podman volume labeled `onedrive_conf`, where all configuration of your onedrive account will be stored. You can add a custom config file in this location at a later point in time if required. - -#### 4.2 Prepare the 'data' volume -Create the 'data' volume with the following command: -```bash -podman volume create onedrive_data -``` - -This will create a podman volume labeled `onedrive_data` and will map to a path on your local filesystem. This is where your data from Microsoft OneDrive will be stored. Keep in mind that: - -* The owner of this specified folder must not be root -* Podman will attempt to change the permissions of the volume to the user the container is configured to run as - -**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Podman container will fail to start with the following error message: -```bash -ROOT level privileges prohibited! -``` - -### 5. First run of Docker container under podman and performing authorisation -The 'onedrive' client within the container first needs to be authorised with your Microsoft account. This is achieved by initially running podman in interactive mode. - -Run the podman image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`). - -**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the podman container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the podman volume mapping to occur. - -It is also a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`). The script below will use `id` to evaluate your system environment to use the correct values. -```bash -export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" -export ONEDRIVE_UID=`id -u` -export ONEDRIVE_GID=`id -g` -mkdir -p ${ONEDRIVE_DATA_DIR} -podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ - -v onedrive_conf:/onedrive/conf:U,Z \ - -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" \ - driveone/onedrive:edge -``` - -**Important:** In some scenarios, 'podman' sets the configuration and data directories to a different UID & GID as specified. To resolve this situation, you must run 'podman' with the `--userns=keep-id` flag to ensure 'podman' uses the UID and GID as specified. The updated script example when using `--userns=keep-id` is below: - -```bash -export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" -export ONEDRIVE_UID=`id -u` -export ONEDRIVE_GID=`id -g` -mkdir -p ${ONEDRIVE_DATA_DIR} -podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ - --userns=keep-id \ - -v onedrive_conf:/onedrive/conf:U,Z \ - -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" \ - driveone/onedrive:edge -``` - - -**Important:** If you plan to use the 'podman' built in auto-updating of container images described in 'Systemd Service & Auto Updating' below, you must pass an additional argument to set a label during the first run. The updated script example to support auto-updating of container images is below: - -```bash -export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" -export ONEDRIVE_UID=`id -u` -export ONEDRIVE_GID=`id -g` -mkdir -p ${ONEDRIVE_DATA_DIR} -podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ - --userns=keep-id \ - -v onedrive_conf:/onedrive/conf:U,Z \ - -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" \ - -e PODMAN=1 \ - --label "io.containers.autoupdate=image" \ - driveone/onedrive:edge -``` - -When the Podman container successfully starts: -* You will be asked to open a specific link using your web browser -* Login to your Microsoft Account and give the application the permission -* After giving the permission, you will be redirected to a blank page -* Copy the URI of the blank page into the application prompt to authorise the application - -Once the 'onedrive' application is authorised, the client will automatically start monitoring your `ONEDRIVE_DATA_DIR` for data changes to be uploaded to OneDrive. Files stored on OneDrive will be downloaded to this location. - -If the client is working as expected, you can detach from the container with Ctrl+p, Ctrl+q. - -### 6. Running the 'onedrive' container under 'podman' - -#### 6.1 Check if the monitor service is running -```bash -podman ps -f name=onedrive -``` - -#### 6.2 Show 'onedrive' runtime logs -```bash -podman logs onedrive -``` - -#### 6.3 Stop running 'onedrive' container -```bash -podman stop onedrive -``` - -#### 6.4 Start 'onedrive' container -```bash -podman start onedrive -``` - -#### 6.5 Remove 'onedrive' container -```bash -podman rm -f onedrive -``` - - -## Advanced Usage - -### Systemd Service & Auto Updating - -Podman supports running containers as a systemd service and also auto updating of the container images. Using the existing running container you can generate a systemd unit file to be installed by the **root** user. To have your container image auto-update with podman, it must first be created with the label `"io.containers.autoupdate=image"` mentioned in step 5 above. - -``` -cd /tmp -podman generate systemd --new --restart-policy on-failure --name -f onedrive -/tmp/container-onedrive.service - -# copy the generated systemd unit file to the systemd path and reload the daemon - -cp -Z ~/container-onedrive.service /usr/lib/systemd/system -systemctl daemon-reload - -#optionally enable it to startup on boot - -systemctl enable container-onedrive.service - -#check status - -systemctl status container-onedrive - -#start/stop/restart container as a systemd service - -systemctl stop container-onedrive -systemctl start container-onedrive -``` - -To update the image using podman (Ad-hoc) -``` -podman auto-update -``` - -To update the image using systemd (Automatic/Scheduled) -``` -# Enable the podman-auto-update.timer service at system start: - -systemctl enable podman-auto-update.timer - -# Start the service - -systemctl start podman-auto-update.timer - -# Containers with the autoupdate label will be updated on the next scheduled timer - -systemctl list-timers --all -``` - -### Editing the running configuration and using a 'config' file -The 'onedrive' client should run in default configuration, however you can change this default configuration by placing a custom config file in the `onedrive_conf` podman volume. First download the default config from [here](https://raw.githubusercontent.com/abraunegg/onedrive/master/config) -Then put it into your onedrive_conf volume path, which can be found with: - -```bash -podman volume inspect onedrive_conf -``` -Or you can map your own config folder to the config volume. Make sure to copy all files from the volume into your mapped folder first. - -The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) - -### Syncing multiple accounts -There are many ways to do this, the easiest is probably to do the following: -1. Create a second podman config volume (replace `work` with your desired name): `podman volume create onedrive_conf_work` -2. And start a second podman monitor container (again replace `work` with your desired name): - -```bash -export ONEDRIVE_DATA_DIR_WORK="/home/abraunegg/OneDriveWork" -export ONEDRIVE_UID=`id -u` -export ONEDRIVE_GID=`id -g` -mkdir -p ${ONEDRIVE_DATA_DIR_WORK} -podman run -it --name onedrive_work --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ - --userns=keep-id \ - -v onedrive_conf_work:/onedrive/conf:U,Z \ - -v "${ONEDRIVE_DATA_DIR_WORK}:/onedrive/data:U,Z" \ - -e PODMAN=1 \ - --label "io.containers.autoupdate=image" \ - driveone/onedrive:edge -``` - -## Supported Podman Environment Variables -| Variable | Purpose | Sample Value | -| ---------------- | --------------------------------------------------- |:-------------:| -| ONEDRIVE_UID | UserID (UID) to run as | 1000 | -| ONEDRIVE_GID | GroupID (GID) to run as | 1000 | -| ONEDRIVE_VERBOSE | Controls "--verbose" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_DEBUG | Controls "--verbose --verbose" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_DEBUG_HTTPS | Controls "--debug-https" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_RESYNC | Controls "--resync" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_DOWNLOADONLY | Controls "--download-only" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_UPLOADONLY | Controls "--upload-only" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_NOREMOTEDELETE | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | -| ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | -| ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | -| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) | -| ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | -| ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | -| ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | - -### Environment Variables Usage Examples -**Verbose Output:** -```bash -podman run -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge -``` -**Debug Output:** -```bash -podman run -e ONEDRIVE_DEBUG=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge -``` -**Perform a --resync:** -```bash -podman run -e ONEDRIVE_RESYNC=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge -``` -**Perform a --resync and --verbose:** -```bash -podman run -e ONEDRIVE_RESYNC=1 -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge -``` -**Perform a --logout and re-authenticate:** -```bash -podman run -it -e ONEDRIVE_LOGOUT=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge -``` - -## Building a custom Podman image -You can also build your own image instead of pulling the one from [hub.docker.com](https://hub.docker.com/r/driveone/onedrive): -```bash -git clone https://github.com/abraunegg/onedrive -cd onedrive -podman build . -t local-onedrive -f contrib/docker/Dockerfile -``` - -There are alternate, smaller images available by building -Dockerfile-debian or Dockerfile-alpine. These [multi-stage builder pattern](https://docs.docker.com/develop/develop-images/multistage-build/) -Dockerfiles require Docker version at least 17.05. - -### How to build and run a custom Podman image based on Debian -``` bash -podman build . -t local-ondrive-debian -f contrib/docker/Dockerfile-debian -podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-ondrive-debian:latest -``` - -### How to build and run a custom Podman image based on Alpine Linux -``` bash -podman build . -t local-ondrive-alpine -f contrib/docker/Dockerfile-alpine -podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-ondrive-alpine:latest -``` - -### How to build and run a custom Podman image for ARMHF (Raspberry Pi) -Compatible with: -* Raspberry Pi -* Raspberry Pi 2 -* Raspberry Pi Zero -* Raspberry Pi 3 -* Raspberry Pi 4 -``` bash -podman build . -t local-onedrive-armhf -f contrib/docker/Dockerfile-debian -podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-onedrive-armhf:latest -``` - -### How to build and run a custom Podman image for AARCH64 Platforms -``` bash -podman build . -t local-onedrive-aarch64 -f contrib/docker/Dockerfile-debian -podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-onedrive-aarch64:latest -``` diff --git a/docs/USAGE.md b/docs/USAGE.md deleted file mode 100644 index 880de9522..000000000 --- a/docs/USAGE.md +++ /dev/null @@ -1,943 +0,0 @@ -# Using the OneDrive Client for Linux -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Table of Contents - -- [Important Notes](#important-notes) - - [Upgrading from the 'skilion' Client](#upgrading-from-the-sklion-client) - - [Guidelines for Naming Local Files and Folders in the Synchronisation Directory](#guidelines-for-naming-local-files-and-folders-in-the-synchronisation-directory) - - [Compatibility with curl](#compatibility-with-curl) -- [First Steps](#first-steps) - - [Authorise the Application with Your Microsoft OneDrive Account](#authorise-the-application-with-your-microsoft-onedrive-account) - - [Display Your Applicable Runtime Configuration](#display-your-applicable-runtime-configuration) - - [Understanding OneDrive Client for Linux Operational Modes](#understanding-onedrive-client-for-linux-operational-modes) - - [Standalone Synchronisation Operational Mode (Standalone Mode)](#standalone-synchronisation-operational-mode-standalone-mode) - - [Ongoing Synchronisation Operational Mode (Monitor Mode)](#ongoing-synchronisation-operational-mode-monitor-mode) - - [Increasing application logging level](#increasing-application-logging-level) - - [Using 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive](#using-client-side-filtering-rules-to-determine-what-should-be-synced-with-microsoft-onedrive) - - [Testing your configuration](#testing-your-configuration) - - [Performing a sync with Microsoft OneDrive](#performing-a-sync-with-microsoft-onedrive) - - [Performing a single directory synchronisation with Microsoft OneDrive](#performing-a-single-directory-synchronisation-with-microsoft-onedrive) - - [Performing a 'one-way' download synchronisation with Microsoft OneDrive](#performing-a-one-way-download-synchronisation-with-microsoft-onedrive) - - [Performing a 'one-way' upload synchronisation with Microsoft OneDrive](#performing-a-one-way-upload-synchronisation-with-microsoft-onedrive) - - [Performing a selective synchronisation via 'sync_list' file](#performing-a-selective-synchronisation-via-sync_list-file) - - [Performing a --resync](#performing-a---resync) - - [Performing a --force-sync without a --resync or changing your configuration](#performing-a---force-sync-without-a---resync-or-changing-your-configuration) - - [Enabling the Client Activity Log](#enabling-the-client-activity-log) - - [Client Activity Log Example:](#client-activity-log-example) - - [Client Activity Log Differences](#client-activity-log-differences) - - [GUI Notifications](#gui-notifications) - - [Handling a Microsoft OneDrive Account Password Change](#handling-a-microsoft-onedrive-account-password-change) - - [Determining the synchronisation result](#determining-the-synchronisation-result) -- [Frequently Asked Configuration Questions](#frequently-asked-configuration-questions) - - [How to change the default configuration of the client?](#how-to-change-the-default-configuration-of-the-client) - - [How to change where my data from Microsoft OneDrive is stored?](#how-to-change-where-my-data-from-microsoft-onedrive-is-stored) - - [How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive?](#how-to-change-what-file-and-directory-permissions-are-assigned-to-data-that-is-downloaded-from-microsoft-onedrive) - - [How are uploads and downloads managed?](#how-are-uploads-and-downloads-managed) - - [How to only sync a specific directory?](#how-to-only-sync-a-specific-directory) - - [How to 'skip' files from syncing?](#how-to-skip-files-from-syncing) - - [How to 'skip' directories from syncing?](#how-to-skip-directories-from-syncing) - - [How to 'skip' .files and .folders from syncing?](#how-to-skip-files-and-folders-from-syncing) - - [How to 'skip' files larger than a certain size from syncing?](#how-to-skip-files-larger-than-a-certain-size-from-syncing) - - [How to 'rate limit' the application to control bandwidth consumed for upload & download operations?](#how-to-rate-limit-the-application-to-control-bandwidth-consumed-for-upload--download-operations) - - [How can I prevent my local disk from filling up?](#how-can-i-prevent-my-local-disk-from-filling-up) - - [How does the client handle symbolic links?](#how-does-the-client-handle-symbolic-links) - - [How to synchronise shared folders (OneDrive Personal)?](#how-to-synchronise-shared-folders-onedrive-personal) - - [How to synchronise shared folders (OneDrive Business or Office 365)?](#how-to-synchronise-shared-folders-onedrive-business-or-office-365) - - [How to synchronise SharePoint / Office 365 Shared Libraries?](#how-to-synchronise-sharepoint--office-365-shared-libraries) - - [How to Create a Shareable Link?](#how-to-create-a-shareable-link) - - [How to Synchronise Both Personal and Business Accounts at once?](#how-to-synchronise-both-personal-and-business-accounts-at-once) - - [How to Synchronise Multiple SharePoint Libraries simultaneously?](#how-to-synchronise-multiple-sharepoint-libraries-simultaneously) - - [How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period?](#how-to-receive-real-time-changes-from-microsoft-onedrive-service-instead-of-waiting-for-the-next-sync-period) - - [How to initiate the client as a background service?](#how-to-initiate-the-client-as-a-background-service) - - [OneDrive service running as root user via init.d](#onedrive-service-running-as-root-user-via-initd) - - [OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-root-user-via-systemd-arch-ubuntu-debian-opensuse-fedora) - - [OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)](#onedrive-service-running-as-root-user-via-systemd-red-hat-enterprise-linux-centos-linux) - - [OneDrive service running as a non-root user via systemd (All Linux Distributions)](#onedrive-service-running-as-a-non-root-user-via-systemd-all-linux-distributions) - - [OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-a-non-root-user-via-systemd-with-notifications-enabled-arch-ubuntu-debian-opensuse-fedora) - - [OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)](#onedrive-service-running-as-a-non-root-user-via-runit-antix-devuan-artix-void) - - [How to start a user systemd service at boot without user login?](#how-to-start-a-user-systemd-service-at-boot-without-user-login) - -## Important Notes -### Upgrading from the 'skilion' Client -The 'skilion' version has a significant number of issues in how it manages the local sync state. When upgrading from the 'skilion' client to this client, it's recommended to stop any service or OneDrive process that may be running. Once all OneDrive services are stopped, make sure to remove any old client binaries from your system. - -Furthermore, if you're using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as shown below: - -**Invalid 'skilion' configuration:** -```text -skip_file = ".*|~*" -``` -**Minimum valid configuration:** -```text -skip_file = "~*" -``` -**Default valid configuration:** -```text -skip_file = "~*|.~*|*.tmp|*.swp|*.partial" -``` - -Avoid using a 'skip_file' entry of `.*` as it may prevent the correct detection of local changes to process. The configuration values for 'skip_file' will be checked for validity, and if there is an issue, the following error message will be displayed: -```text -ERROR: Invalid skip_file entry '.*' detected -``` - -### Guidelines for Naming Local Files and Folders in the Synchronisation Directory -When naming your files and folders in the synchronisation directory, it is important to follow the [Windows naming conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file) for your files and folders. - -Moreover, Microsoft OneDrive does not adhere to POSIX standards. As a result, if you have two files with identical names differing only in capitalisation, the OneDrive Client for Linux will try to manage this. However, in cases of naming conflicts, the conflicting file or folder will not synchronise. This is a deliberate design choice and will not be modified. To avoid such issues, you should rename any conflicting local files or folders. - -### Compatibility with curl -If your system uses curl < 7.47.0, curl will default to HTTP/1.1 for HTTPS operations, and the client will follow suit, using HTTP/1.1. - -For systems running curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS, but it will still use HTTP/1.1 as the default for these operations. The client will employ HTTP/1.1 for HTTPS operations as well. - -However, if your system employs curl >= 7.62.0, curl will, by default, prioritise HTTP/2 over HTTP/1.1. In this case, the client will utilise HTTP/2 for most HTTPS operations and stick with HTTP/1.1 for others. Please note that this distinction is governed by the OneDrive platform, not our client. - -If you explicitly want to use HTTP/1.1, you can do so by using the `--force-http-11` flag or setting the configuration option `force_http_11 = "true"`. This will compel the application to exclusively use HTTP/1.1. Otherwise, all client operations will align with the curl default settings for your distribution. - -## First Steps -### Authorise the Application with Your Microsoft OneDrive Account -Once you've installed the application, you'll need to authorise it using your Microsoft OneDrive Account. This can be done by simply running the application without any additional command switches. - -Please be aware that some companies may require you to explicitly add this app to the [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an approved app to your apps, click on the ellipsis in the top-right corner and select "Request new apps." On the next page, you can add this app. If it's not listed, you should make a request through your IT department. - -When you run the application for the first time, you'll be prompted to open a specific URL using your web browser, where you'll need to log in to your Microsoft Account and grant the application permission to access your files. After granting permission to the application, you'll be redirected to a blank page. Simply copy the URI from the blank page and paste it into the application. - -**Example:** -```text -[user@hostname ~]$ onedrive -Authorise this app by visiting: - -https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient - -Enter the response URI from your browser: https://login.microsoftonline.com/common/oauth2/nativeclient?code= - -The application has been successfully authorised, but no additional command switches were provided. - -Please use 'onedrive --help' for further assistance on how to run this application. -``` - -**Please Note:** Without additional input or configuration, the OneDrive Client for Linux will automatically adhere to default application settings during synchronisation processes with Microsoft OneDrive. - - -### Display Your Applicable Runtime Configuration -To verify the configuration that the application will use, use the following command: -```text -onedrive --display-config -``` -This command will display all the relevant runtime interpretations of the options and configurations you are using. An example output is as follows: -```text -Reading configuration file: /home/user/.config/onedrive/config -Configuration file successfully loaded -onedrive version = vX.Y.Z-A-bcdefghi -Config path = /home/user/.config/onedrive -Config file found in config path = true -Config option 'drive_id' = -Config option 'sync_dir' = ~/OneDrive -... -Config option 'webhook_enabled' = false -``` - -**Important Reminder:** When using multiple OneDrive accounts, it's essential to always use the `--confdir` command followed by the appropriate configuration directory. This ensures that the specific configuration you intend to view is correctly displayed. - -### Understanding OneDrive Client for Linux Operational Modes -There are two modes of operation when using the client: -1. Standalone sync mode that performs a single sync action against Microsoft OneDrive. -2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive. - -**Important Information:** The default setting for the OneDrive Client on Linux will sync all data from your Microsoft OneDrive account to your local device. To avoid this and select specific items for synchronisation, you should explore setting up 'Client Side Filtering' rules. This will help you manage and specify what exactly gets synced with your Microsoft OneDrive account. - -#### Standalone Synchronisation Operational Mode (Standalone Mode) -This method of use can be employed by issuing the following option to the client: -```text -onedrive --sync -``` -For simplicity, this can be shortened to the following: -```text -onedrive -s -``` - -#### Ongoing Synchronisation Operational Mode (Monitor Mode) -This method of use can be utilised by issuing the following option to the client: -```text -onedrive --monitor -``` -For simplicity, this can be shortened to the following: -```text -onedrive -m -``` -**Note:** This method of use is typically employed when enabling a systemd service to run the application in the background. - -Two common errors can occur when using monitor mode: -* Initialisation failure -* Unable to add a new inotify watch - -Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low: -* `fs.file-max` -* `fs.inotify.max_user_watches` - -To determine what the existing values are on your system, use the following commands: -```text -sysctl fs.file-max -sysctl fs.inotify.max_user_watches -``` -Alternatively, when running the client with increased verbosity (see below), the client will display what the current configured system maximum values are: -```text -... -All application operations will be performed in: /home/user/OneDrive -OneDrive synchronisation interval (seconds): 300 -Maximum allowed open files: 393370 <-- This is the current operating system fs.file-max value -Maximum allowed inotify watches: 29374 <-- This is the current operating system fs.inotify.max_user_watches value -Initialising filesystem inotify monitoring ... -... -``` -To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir': -```text -cd /path/to/your/sync/dir -ls -laR | wc -l -``` - -To make a change to these variables using your file and folder count, use the following process: -```text -sudo sysctl fs.file-max= -sudo sysctl fs.inotify.max_user_watches= -``` -Once these values are changed, you will need to restart your client so that the new values are detected and used. - -To make these changes permanent on your system, refer to your OS reference documentation. - -### Increasing application logging level -When running a sync (`--sync`) or using monitor mode (`--monitor`), it may be desirable to see additional information regarding the progress and operation of the client. For example, for a `--sync` command, this would be: -```text -onedrive --sync --verbose -``` -Furthermore, for simplicity, this can be simplified to the following: -``` -onedrive -s -v -``` -Adding `--verbose` twice will enable debug logging output. This is generally required when raising a bug report or needing to understand a problem. - -### Using 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive -Client Side Filtering in the context of the OneDrive Client for Linux refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this: - -* **skip_dir:** This option allows the user to specify directories that should not be synchronised with OneDrive. It's particularly useful for omitting large or irrelevant directories from the sync process. - -* **skip_dotfiles:** Dotfiles, usually configuration files or scripts, can be excluded from the sync. This is useful for users who prefer to keep these files local. - -* **skip_file:** Specific files can be excluded from synchronisation using this option. It provides flexibility in selecting which files are essential for cloud storage. - -* **skip_symlinks:** Symlinks often point to files outside the OneDrive directory or to locations that are not relevant for cloud storage. This option prevents them from being included in the sync. - -Additionally, the OneDrive Client for Linux allows the implementation of Client Side Filtering rules through a 'sync_list' file. This file explicitly states which directories or files should be included in the synchronisation. By default, any item not listed in the 'sync_list' file is excluded. This method offers a more granular approach to synchronisation, ensuring that only the necessary data is transferred to and from Microsoft OneDrive. - -These configurable options and the 'sync_list' file provide users with the flexibility to tailor the synchronisation process to their specific needs, conserving bandwidth and storage space while ensuring that important files are always backed up and accessible. - -**Note:** After changing any Client Side Filtering rule, you must perform a full re-synchronisation. - -### Testing your configuration -You can test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded, or removed; however, the application will display what 'would' have occurred. For example: -```text -onedrive --sync --verbose --dry-run -Reading configuration file: /home/user/.config/onedrive/config -Configuration file successfully loaded -Using 'user' Config Dir: /home/user/.config/onedrive -DRY-RUN Configured. Output below shows what 'would' have occurred. -DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations -DRY RUN: Not creating backup config file as --dry-run has been used -DRY RUN: Not updating hash files as --dry-run has been used -Checking Application Version ... -Attempting to initialise the OneDrive API ... -Configuring Global Azure AD Endpoints -The OneDrive API was initialised successfully -Opening the item database ... -Sync Engine Initialised with new Onedrive API instance -Application version: vX.Y.Z-A-bcdefghi -Account Type: -Default Drive ID: -Default Root ID: -Remaining Free Space: 1058488129 KB -All application operations will be performed in: /home/user/OneDrive -Fetching items from the OneDrive API for Drive ID: .. -... -Performing a database consistency and integrity check on locally stored data ... -Processing DB entries for this Drive ID: -Processing ~/OneDrive -The directory has not changed -... -Scanning local filesystem '~/OneDrive' for new data to upload ... -... -Performing a final true-up scan of online data from Microsoft OneDrive -Fetching items from the OneDrive API for Drive ID: .. - -Sync with Microsoft OneDrive is complete -``` - -### Performing a sync with Microsoft OneDrive -By default, all files are downloaded in `~/OneDrive`. This download location is controlled by the 'sync_dir' config option. - -After authorising the application, a sync of your data can be performed by running: -```text -onedrive --sync -``` -This will synchronise files from your Microsoft OneDrive account to your `~/OneDrive` local directory or to your specified 'sync_dir' location. - -If you prefer to use your local files as stored in `~/OneDrive` as your 'source of truth,' use the following sync command: -```text -onedrive --sync --local-first -``` - -### Performing a single directory synchronisation with Microsoft OneDrive -In some cases, it may be desirable to synchronise a single directory under ~/OneDrive without having to change your client configuration. To do this, use the following command: -```text -onedrive --sync --single-directory '' -``` - -**Example:** If the full path is `~/OneDrive/mydir`, the command would be `onedrive --sync --single-directory 'mydir'` - -### Performing a 'one-way' download synchronisation with Microsoft OneDrive -In some cases, it may be desirable to 'download only' from Microsoft OneDrive. To do this, use the following command: -```text -onedrive --sync --download-only -``` -This will download all the content from Microsoft OneDrive to your `~/OneDrive` location. Any files that are deleted online remain locally and will not be removed. - -However, in some circumstances, it may be desirable to clean up local files that have been removed online. To do this, use the following command: - -```text -onedrive --sync --download-only --cleanup-local-files -``` - -### Performing a 'one-way' upload synchronisation with Microsoft OneDrive -In certain scenarios, you might need to perform an 'upload only' operation to Microsoft OneDrive. This means that you'll be uploading data to OneDrive, but not synchronising any changes or additions made elsewhere. Use this command to initiate an upload-only synchronisation: - -```text -onedrive --sync --upload-only -``` - -**Important Points:** -- The 'upload only' mode operates independently of OneDrive's online content. It doesn't check or sync with what's already stored on OneDrive. It only uploads data from the local client. -- If a local file or folder that was previously synchronised with Microsoft OneDrive is now missing locally, it will be deleted from OneDrive during this operation. - -To ensure that all data on Microsoft OneDrive remains intact (e.g., preventing deletion of items on OneDrive if they're deleted locally), use this command instead: - -```text -onedrive --sync --upload-only --no-remote-delete -``` - -**Understanding both Commands:** -- `--upload-only`: This command will only upload local changes to OneDrive. These changes can include additions, modifications, moves, and deletions of files and folders. -- `--no-remote-delete`: Adding this command prevents the deletion of any items on OneDrive, even if they're deleted locally. This creates a one-way archive on OneDrive where files are only added and never removed. - -### Performing a selective synchronisation via 'sync_list' file -Selective synchronisation allows you to sync only specific files and directories. -To enable selective synchronisation, create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`). - -Important points to understand before using 'sync_list'. -* 'sync_list' excludes _everything_ by default on OneDrive. -* 'sync_list' follows an _"exclude overrides include"_ rule, and requires **explicit inclusion**. -* Order exclusions before inclusions, so that anything _specifically included_ is included. -* How and where you place your `/` matters for excludes and includes in subdirectories. - -Each line of the file represents a relative path from your `sync_dir`. All files and directories not matching any line of the file will be skipped during all operations. - -Additionally, the use of `/` is critically important to determine how a rule is interpreted. It is very similar to `**` wildcards, for those that are familiar with globbing patterns. -Here is an example of `sync_list`: -```text -# sync_list supports comments -# -# The ordering of entries is highly recommended - exclusions before inclusions -# -# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in OneDrive -!Documents/temp* -# -# Exclude secret data folder in root directory only -!/Secret_data/* -# -# Include everything else in root directory -/* -# -# Include my Backup folder(s) or file(s) anywhere on OneDrive -Backup -# -# Include my Backup folder in root -/Backup/ -# -# Include Documents folder(s) anywhere in OneDrive -Documents/ -# -# Include all PDF files in Documents folder(s), anywhere in OneDrive -Documents/*.pdf -# -# Include this single document in Documents folder(s), anywhere in OneDrive -Documents/latest_report.docx -# -# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in OneDrive -Work/Project* -# -# Include all "notes.txt" files, anywhere in OneDrive -notes.txt -# -# Include /Blender in the ~OneDrive root but not if elsewhere in OneDrive -/Blender -# -# Include these directories(or files) in 'Pictures' folder(s), that have a space in their name -Pictures/Camera Roll -Pictures/Saved Pictures -# -# Include these names if they match any file or folder -Cinema Soc -Codes -Textbooks -Year 2 -``` -The following are supported for pattern matching and exclusion rules: -* Use the `*` to wildcard select any characters to match for the item to be included -* Use either `!` or `-` characters at the start of the line to exclude an otherwise included item - -**Note:** When enabling the use of 'sync_list,' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement. - -**Note:** After changing the sync_list, you must perform a full re-synchronisation by adding `--resync` to your existing command line - for example: `onedrive --sync --resync` - -**Note:** In some circumstances, it may be required to sync all the individual files within the 'sync_dir', but due to frequent name change / addition / deletion of these files, it is not desirable to constantly change the 'sync_list' file to include / exclude these files and force a resync. To assist with this, enable the following in your configuration file: -```text -sync_root_files = "true" -``` -This will tell the application to sync any file that it finds in your 'sync_dir' root by default, negating the need to constantly update your 'sync_list' file. - -### Performing a --resync -If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: -* drive_id -* sync_dir -* skip_file -* skip_dir -* skip_dotfiles -* skip_symlinks -* sync_business_shared_items -* Creating, Modifying or Deleting the 'sync_list' file - -Additionally, you might opt for a `--resync` if you think it's necessary to ensure your data remains in sync. If you're using this switch simply because you're unsure of the sync status, you can check the actual sync status using `--display-sync-status`. - -When you use `--resync`, you'll encounter the following warning and advice: -```text -Using --resync will delete your local 'onedrive' client state, so there won't be a record of your current 'sync status.' -This may potentially overwrite local versions of files with older versions downloaded from OneDrive, leading to local data loss. -If in doubt, back up your local data before using --resync. - -Are you sure you want to proceed with --resync? [Y/N] -``` - -To proceed with `--resync`, you must type 'y' or 'Y' to allow the application to continue. - -**Note:** It's highly recommended to use `--resync` only if the application prompts you to do so. Don't blindly set the application to start with `--resync` as the default option. - -**Note:** In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt. - -### Performing a --force-sync without a --resync or changing your configuration -In some cases and situations, you may have configured the application to skip certain files and folders using 'skip_file' and 'skip_dir' configuration. You then may have a requirement to actually sync one of these items, but do not wish to modify your configuration, nor perform an entire `--resync` twice. - -The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync`. - -To use this option, you must run the application manually in the following manner: -```text -onedrive --sync --single-directory '' --force-sync -``` - -When using `--force-sync`, you'll encounter the following warning and advice: -```text -WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used - -Using --force-sync will reconfigure the application to use defaults. This may have unknown future impacts. -By proceeding with this option, you accept any impacts, including potential data loss resulting from using --force-sync. - -Are you sure you want to proceed with --force-sync [Y/N] -``` - -To proceed with `--force-sync`, you must type 'y' or 'Y' to allow the application to continue. - -### Enabling the Client Activity Log -When running onedrive, all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag. By default, log files will be written to `/var/log/onedrive/` and will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client to allow easy sorting of user to client activity log. - -**Note:** You will need to ensure the existence of this directory and that your user has the applicable permissions to write to this directory; otherwise, the following error message will be printed: -```text -ERROR: Unable to access /var/log/onedrive -ERROR: Please manually create '/var/log/onedrive' and set appropriate permissions to allow write access -ERROR: The requested client activity log will instead be located in your user's home directory -``` - -On many systems, this can be achieved by performing the following: -```text -sudo mkdir /var/log/onedrive -sudo chown root:users /var/log/onedrive -sudo chmod 0775 /var/log/onedrive -``` - -Additionally, you need to ensure that your user account is part of the 'users' group: -``` -cat /etc/group | grep users -``` - -If your user is not part of this group, then you need to add your user to this group: -``` -sudo usermod -a -G users -``` - -If you need to make a group modification, you will need to 'logout' of all sessions / SSH sessions to log in again to have the new group access applied. - -If the client is unable to write the client activity log, the following error message will be printed: -```text -ERROR: Unable to write the activity log to /var/log/onedrive/%username%.onedrive.log -ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account -ERROR: The requested client activity log will instead be located in your user's home directory -``` - -If you receive this error message, you will need to diagnose why your system cannot write to the specified file location. - -#### Client Activity Log Example: -An example of a client activity log for the command `onedrive --sync --enable-logging` is below: -```text -2023-Sep-27 08:16:00.1128806 Configuring Global Azure AD Endpoints -2023-Sep-27 08:16:00.1160620 Sync Engine Initialised with new Onedrive API instance -2023-Sep-27 08:16:00.5227122 All application operations will be performed in: /home/user/OneDrive -2023-Sep-27 08:16:00.5227977 Fetching items from the OneDrive API for Drive ID: -2023-Sep-27 08:16:00.7780979 Processing changes and items received from Microsoft OneDrive ... -2023-Sep-27 08:16:00.7781548 Performing a database consistency and integrity check on locally stored data ... -2023-Sep-27 08:16:00.7785889 Scanning the local file system '~/OneDrive' for new data to upload ... -2023-Sep-27 08:16:00.7813710 Performing a final true-up scan of online data from Microsoft OneDrive -2023-Sep-27 08:16:00.7814668 Fetching items from the OneDrive API for Drive ID: -2023-Sep-27 08:16:01.0141776 Processing changes and items received from Microsoft OneDrive ... -2023-Sep-27 08:16:01.0142454 Sync with Microsoft OneDrive is complete -``` -An example of a client activity log for the command `onedrive --sync --verbose --enable-logging` is below: -```text -2023-Sep-27 08:20:05.4600464 Checking Application Version ... -2023-Sep-27 08:20:05.5235017 Attempting to initialise the OneDrive API ... -2023-Sep-27 08:20:05.5237207 Configuring Global Azure AD Endpoints -2023-Sep-27 08:20:05.5238087 The OneDrive API was initialised successfully -2023-Sep-27 08:20:05.5238536 Opening the item database ... -2023-Sep-27 08:20:05.5270612 Sync Engine Initialised with new Onedrive API instance -2023-Sep-27 08:20:05.9226535 Application version: vX.Y.Z-A-bcdefghi -2023-Sep-27 08:20:05.9227079 Account Type: -2023-Sep-27 08:20:05.9227360 Default Drive ID: -2023-Sep-27 08:20:05.9227550 Default Root ID: -2023-Sep-27 08:20:05.9227862 Remaining Free Space: -2023-Sep-27 08:20:05.9228296 All application operations will be performed in: /home/user/OneDrive -2023-Sep-27 08:20:05.9228989 Fetching items from the OneDrive API for Drive ID: -2023-Sep-27 08:20:06.2076569 Performing a database consistency and integrity check on locally stored data ... -2023-Sep-27 08:20:06.2077121 Processing DB entries for this Drive ID: -2023-Sep-27 08:20:06.2078408 Processing ~/OneDrive -2023-Sep-27 08:20:06.2078739 The directory has not changed -2023-Sep-27 08:20:06.2079783 Processing Attachments -2023-Sep-27 08:20:06.2080071 The directory has not changed -2023-Sep-27 08:20:06.2081585 Processing Attachments/file.docx -2023-Sep-27 08:20:06.2082079 The file has not changed -2023-Sep-27 08:20:06.2082760 Processing Documents -2023-Sep-27 08:20:06.2083225 The directory has not changed -2023-Sep-27 08:20:06.2084284 Processing Documents/file.log -2023-Sep-27 08:20:06.2084886 The file has not changed -2023-Sep-27 08:20:06.2085150 Scanning the local file system '~/OneDrive' for new data to upload ... -2023-Sep-27 08:20:06.2087133 Skipping item - excluded by sync_list config: ./random_25k_files -2023-Sep-27 08:20:06.2116235 Performing a final true-up scan of online data from Microsoft OneDrive -2023-Sep-27 08:20:06.2117190 Fetching items from the OneDrive API for Drive ID: -2023-Sep-27 08:20:06.5049743 Sync with Microsoft OneDrive is complete -``` - -#### Client Activity Log Differences -Despite application logging being enabled as early as possible, the following log entries will be missing from the client activity log when compared to console output: - -**No user configuration file:** -```text -No user or system config file found, using application defaults -Using 'user' configuration path for application state data: /home/user/.config/onedrive -Using the following path to store the runtime application log: /var/log/onedrive -``` -**User configuration file:** -```text -Reading configuration file: /home/user/.config/onedrive/config -Configuration file successfully loaded -Using 'user' configuration path for application state data: /home/user/.config/onedrive -Using the following path to store the runtime application log: /var/log/onedrive -``` - -### GUI Notifications -If notification support has been compiled in (refer to GUI Notification Support in install.md .. ADD LINK LATER), the following events will trigger a GUI notification within the display manager session: -* Aborting a sync if .nosync file is found -* Skipping a particular item due to an invalid name -* Skipping a particular item due to an invalid symbolic link -* Skipping a particular item due to an invalid UTF sequence -* Skipping a particular item due to an invalid character encoding sequence -* Cannot create remote directory -* Cannot upload file changes (free space issue, breaches maximum allowed size, breaches maximum OneDrive Account path length) -* Cannot delete remote file / folder -* Cannot move remote file / folder -* When a re-authentication is required -* When a new client version is available -* Files that fail to upload -* Files that fail to download - -### Handling a Microsoft OneDrive Account Password Change -If you change your Microsoft OneDrive Account Password, the client will no longer be authorised to sync, and will generate the following error upon next application run: -```text -AADSTS50173: The provided grant has expired due to it being revoked, a fresh auth token is needed. The user might have changed or reset their password. The grant was issued on '' and the TokensValidFrom date (before which tokens are not valid) for this user is ''. - -ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token. -``` - -To re-authorise the client, follow the steps below: -1. If running the client as a system service (init.d or systemd), stop the applicable system service -2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. Please note, if you are using `--confdir` as part of your application runtime configuration, you must include this when telling the client to re-authenticate. -3. Restart the client if running as a system service or perform the standalone sync operation again - -The application will now sync with OneDrive with the new credentials. - -### Determining the synchronisation result -When the client has finished syncing without errors, the following will be displayed: -``` -Sync with Microsoft OneDrive is complete -``` - -If any items failed to sync, the following will be displayed: -``` -Sync with Microsoft OneDrive has completed, however there are items that failed to sync. -``` -A file list of failed upload or download items will also be listed to allow you to determine your next steps. - -In order to fix the upload or download failures, you may need to: -* Review the application output to determine what happened -* Re-try your command utilising a resync to ensure your system is correctly synced with your Microsoft OneDrive Account - -## Frequently Asked Configuration Questions - -### How to change the default configuration of the client? -Configuration is determined by three layers, and applied in the following order: -* Application default values -* Values that are set in the configuration file -* Values that are passed in via the command line at application runtime. These values will override any configuration file set value. - -The default application values provide a reasonable operational default, and additional configuration is entirely optional. - -If you want to change the application defaults, you can download a copy of the config file into your application configuration directory. Valid default directories for the config file are: -* `~/.config/onedrive` -* `/etc/onedrive` - -**Example:** To download a copy of the config file, use the following: -```text -mkdir -p ~/.config/onedrive -wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/onedrive/config -``` - -For full configuration options and CLI switches, please refer to application-config-options.md - -### How to change where my data from Microsoft OneDrive is stored? -By default, the location where your Microsoft OneDrive data is stored, is within your Home Directory under a directory called 'OneDrive'. This replicates as close as possible where the Microsoft Windows OneDrive client stores data. - -To change this location, the application configuration option 'sync_dir' is used to specify a new local directory where your Microsoft OneDrive data should be stored. - -**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible when using 'Monitor Mode'. Local filesystem changes will be replicated between the local filesystem and Microsoft OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix. - -### How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive? -The following are the application default permissions for any new directory or file that is created locally when downloaded from Microsoft OneDrive: -* Directories: 700 - This provides the following permissions: `drwx------` -* Files: 600 - This provides the following permissions: `-rw-------` - -These default permissions align to the security principal of 'least privilege' so that only you should have access to your data that you download from Microsoft OneDrive. - -To alter these default permissions, you can adjust the values of two configuration options as follows. You can also use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. -```text -sync_dir_permissions = "700" -sync_file_permissions = "600" -``` - -**Important:** Please note that special permission bits such as setuid, setgid, and the sticky bit are not supported. Valid permission values range from `000` to `777` only. - -### How are uploads and downloads managed? -The system manages downloads and uploads using a multi-threaded approach. Specifically, the application utilises 16 threads for these processes. This thread count is preset and cannot be modified by users. This design ensures efficient handling of data transfers but does not allow for customisation of thread allocation. - -### How to only sync a specific directory? -There are two methods to achieve this: -* Employ the '--single-directory' option to only sync this specific path -* Employ 'sync_list' as part of your 'config' file to configure what files and directories to sync, and what should be excluded - -### How to 'skip' files from syncing? -There are two methods to achieve this: -* Employ 'skip_file' as part of your 'config' file to configure what files to skip -* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded - -### How to 'skip' directories from syncing? -There are three methods available to 'skip' a directory from the sync process: -* Employ 'skip_dir' as part of your 'config' file to configure what directories to skip -* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded -* Employ 'check_nosync' as part of your 'config' file and a '.nosync' empty file within the directory to exclude to skip that directory - -### How to 'skip' .files and .folders from syncing? -There are three methods to achieve this: -* Employ 'skip_file' or 'skip_dir' to configure what files or folders to skip -* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded -* Employ 'skip_dotfiles' as part of your 'config' file to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive - -### How to 'skip' files larger than a certain size from syncing? -Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped. - -### How to 'rate limit' the application to control bandwidth consumed for upload & download operations? -To optimise Internet bandwidth usage during upload and download processes, include the 'rate_limit' setting in your configuration file. This setting controls the bandwidth allocated to each thread. - -By default, 'rate_limit' is set to '0', indicating that the application will utilise the maximum available bandwidth across all threads. - -To check the current 'rate_limit' value, use the `--display-config` command. - -**Note:** Since downloads and uploads are processed through multiple threads, the 'rate_limit' value applies to each thread separately. For instance, setting 'rate_limit' to 1048576 (1MB) means that during data transfers, the total bandwidth consumption might reach around 16MB, not just the 1MB configured due to the number of threads being used. - -### How can I prevent my local disk from filling up? -By default, the application will reserve 50MB of disk space to prevent your filesystem from running out of disk space. - -This default value can be modified by adding the 'space_reservation' configuration option and the applicable value as part of your 'config' file. - -You can review the value being used when using `--display-config`. - -### How does the client handle symbolic links? -Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. - -As such, there are only two methods to support symbolic links with this client: -1. Follow the Linux symbolic link and upload whatever the local symbolic link is pointing to to Microsoft OneDrive. This is the default behaviour. -2. Skip symbolic links by configuring the application to do so. When skipping, no data, no link, no reference is uploaded to OneDrive. - -Use 'skip_symlinks' as part of your 'config' file to configure the skipping of all symbolic links while syncing. - -### How to synchronise shared folders (OneDrive Personal)? -Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". - -### How to synchronise shared folders (OneDrive Business or Office 365)? -Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". - -Refer to [./business-shared-folders.md](business-shared-folders.md) for further details. - -### How to synchronise SharePoint / Office 365 Shared Libraries? -There are two methods to achieve this: -* SharePoint library can be directly added to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the SharePoint Library you want to synchronise, and then click on "Add to my OneDrive". -* Configure a separate application instance to only synchronise that specific SharePoint Library. Refer to [./sharepoint-libraries.md](sharepoint-libraries.md) for configuration assistance. - -### How to Create a Shareable Link? -In certain situations, you might want to generate a shareable file link and provide this link to other users for accessing a specific file. - -To accomplish this, employ the following command: -```text -onedrive --create-share-link -``` -**Note:** By default, this access permissions for the file link will be read-only. - -To make it a read-write link, execute the following command: -```text -onedrive --create-share-link --with-editing-perms -``` -**Note:** The order of the file path and option flag is crucial. - -### How to Synchronise Both Personal and Business Accounts at once? -You need to set up separate instances of the application configuration for each account. - -Refer to [./advanced-usage.md](advanced-usage.md) for guidance on configuration. - -### How to Synchronise Multiple SharePoint Libraries simultaneously? -For each SharePoint Library, configure a separate instance of the application configuration. - -Refer to [./advanced-usage.md](advanced-usage.md) for configuration instructions. - -### How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period? -When operating in 'Monitor Mode,' it may be advantageous to receive real-time updates to online data. A 'webhook' is the method to achieve this, so that when in 'Monitor Mode,' the client subscribes to remote updates. - -Remote changes can then be promptly synchronised to your local file system, without waiting for the next synchronisation cycle. - -This is accomplished by: -* Using 'webhook_enabled' as part of your 'config' file to enable this feature -* Using 'webhook_public_url' as part of your 'config' file to configure the URL the webhook will use for subscription updates - -### How to initiate the client as a background service? -There are a few ways to employ onedrive as a service: -* via init.d -* via systemd -* via runit - -#### OneDrive service running as root user via init.d -```text -chkconfig onedrive on -service onedrive start -``` -To view the logs, execute: -```text -tail -f /var/log/onedrive/.onedrive.log -``` -To alter the 'user' under which the client operates (typically root by default), manually modify the init.d service file and adjust `daemon --user root onedrive_service.sh` to match the correct user. - -#### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora) -Initially, switch to the root user with `su - root`, then activate the systemd service: -```text -systemctl --user enable onedrive -systemctl --user start onedrive -``` -**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below. - -**Note:** This will execute the 'onedrive' process with a UID/GID of '0', which means any files or folders created will be owned by 'root'. - -To monitor the service's status, use the following: -```text -systemctl --user status onedrive.service -``` - -To observe the systemd application logs, use: -```text -journalctl --user-unit=onedrive -f -``` - -**Note:** For systemd to function correctly, it requires the presence of XDG environment variables. If you encounter the following error while enabling the systemd service: -```text -Failed to connect to bus: No such file or directory -``` -The most likely cause is missing XDG environment variables. To resolve this, add the following lines to `.bashrc` or another file executed upon user login: -```text -export XDG_RUNTIME_DIR="/run/user/$UID" -export DBUS_SESSION_BUS_ADDRESS="unix:path=${XDG_RUNTIME_DIR}/bus" -``` - -To apply this change, you must log out of all user accounts where it has been made. - -**Note:** On certain systems (e.g., Raspbian / Ubuntu / Debian on Raspberry Pi), the XDG fix above may not persist after system reboots. An alternative to starting the client via systemd as root is as follows: -1. Create a symbolic link from `/home/root/.config/onedrive` to `/root/.config/onedrive/`. -2. Establish a systemd service using the '@' service file: `systemctl enable onedrive@root.service`. -3. Start the root@service: `systemctl start onedrive@root.service`. - -This ensures that the service correctly restarts upon system reboot. - -To examine the systemd application logs, run: -```text -journalctl --unit=onedrive@ -f -``` - -#### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux) -```text -systemctl enable onedrive -systemctl start onedrive -``` -**Note:** This will execute the 'onedrive' process with a UID/GID of '0', meaning any files or folders created will be owned by 'root'. - -To view the systemd application logs, execute: -```text -journalctl --unit=onedrive -f -``` - -#### OneDrive service running as a non-root user via systemd (All Linux Distributions) -In some instances, it is preferable to run the OneDrive client as a service without the 'root' user. Follow the instructions below to configure the service for your regular user login. - -1. As the user who will run the service, launch the application in standalone mode, authorize it for use, and verify that synchronization is functioning as expected: -```text -onedrive --sync --verbose -``` -2. After validating the application for your user, switch to the 'root' user, where is your username from step 1 above. -```text -systemctl enable onedrive@.service -systemctl start onedrive@.service -``` -3. To check the service's status for the user, use the following: -```text -systemctl status onedrive@.service -``` - -To observe the systemd application logs, use: -```text -journalctl --unit=onedrive@ -f -``` - -#### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora) -In some scenarios, you may want to receive GUI notifications when using the client as a non-root user. In this case, follow these steps: - -1. Log in via the graphical UI as the user you want to enable the service for. -2. Disable any `onedrive@` service files for your username, e.g.: -```text -sudo systemctl stop onedrive@alex.service -sudo systemctl disable onedrive@alex.service -``` -3. Enable the service as follows: -```text -systemctl --user enable onedrive -systemctl --user start onedrive -``` - -To check the service's status for the user, use the following: -```text -systemctl --user status onedrive.service -``` - -To view the systemd application logs, execute: -```text -journalctl --user-unit=onedrive -f -``` - -**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms. - -#### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void) - -1. Create the following folder if it doesn't already exist: `/etc/sv/runsvdir-` - - - where `` is the `USER` targeted for the service - - e.g., `# mkdir /etc/sv/runsvdir-nolan` - -2. Create a file called `run` under the previously created folder with executable permissions - - - `# touch /etc/sv/runsvdir-/run` - - `# chmod 0755 /etc/sv/runsvdir-/run` - -3. Edit the `run` file with the following contents (permissions needed): - - ```sh - #!/bin/sh - export USER="" - export HOME="/home/" - - groups="$(id -Gn "${USER}" | tr ' ' ':')" - svdir="${HOME}/service" - - exec chpst -u "${USER}:${groups}" runsvdir "${svdir}" - ``` - - - Ensure you replace `` with the `USER` set in step #1. - -4. Enable the previously created folder as a service - - - `# ln -fs /etc/sv/runsvdir- /var/service/` - -5. Create a subfolder in the `USER`'s `HOME` directory to store the services (or symlinks) - - - `$ mkdir ~/service` - -6. Create a subfolder specifically for OneDrive - - - `$ mkdir ~/service/onedrive/` - -7. Create a file called `run` under the previously created folder with executable permissions - - - `$ touch ~/service/onedrive/run` - - `$ chmod 0755 ~/service/onedrive/run` - -8. Append the following contents to the `run` file - - ```sh - #!/usr/bin/env sh - exec /usr/bin/onedrive --monitor - ``` - - - In some scenarios, the path to the `onedrive` binary may vary. You can obtain it by running `$ command -v onedrive`. - -9. Reboot to apply the changes - -10. Check the status of user-defined services - - - `$ sv status ~/service/*` - -For additional details, you can refer to Void's documentation on [Per-User Services](https://docs.voidlinux.org/config/services/user-services.html). - -### How to start a user systemd service at boot without user login? -In some situations, it may be necessary for the systemd service to start without requiring your 'user' to log in. - -To address this issue, you need to reconfigure your 'user' account so that the systemd services you've created launch without the need for you to log in to your system: -```text -loginctl enable-linger -``` \ No newline at end of file diff --git a/docs/advanced-usage.md b/docs/advanced-usage.md deleted file mode 100644 index 2701909d8..000000000 --- a/docs/advanced-usage.md +++ /dev/null @@ -1,302 +0,0 @@ -# Advanced Configuration of the OneDrive Free Client -This document covers the following scenarios: -* [Configuring the client to use multiple OneDrive accounts / configurations](#configuring-the-client-to-use-multiple-onedrive-accounts--configurations) -* [Configuring the client to use multiple OneDrive accounts / configurations using Docker](#configuring-the-client-to-use-multiple-onedrive-accounts--configurations-using-docker) -* [Configuring the client for use in dual-boot (Windows / Linux) situations](#configuring-the-client-for-use-in-dual-boot-windows--linux-situations) -* [Configuring the client for use when 'sync_dir' is a mounted directory](#configuring-the-client-for-use-when-sync_dir-is-a-mounted-directory) -* [Upload data from the local ~/OneDrive folder to a specific location on OneDrive](#upload-data-from-the-local-onedrive-folder-to-a-specific-location-on-onedrive) - -## Configuring the client to use multiple OneDrive accounts / configurations -Essentially, each OneDrive account or SharePoint Shared Library which you require to be synced needs to have its own and unique configuration, local sync directory and service files. To do this, the following steps are needed: -1. Create a unique configuration folder for each onedrive client configuration that you need -2. Copy to this folder a copy of the default configuration file -3. Update the default configuration file as required, changing the required minimum config options and any additional options as needed to support your multi-account configuration -4. Authenticate the client using the new configuration directory -5. Test the configuration using '--display-config' and '--dry-run' -6. Sync the OneDrive account data as required using `--synchronize` or `--monitor` -7. Configure a unique systemd service file for this account configuration - -### 1. Create a unique configuration folder for each onedrive client configuration that you need -Make the configuration folder as required for this new configuration, for example: -```text -mkdir ~/.config/my-new-config -``` - -### 2. Copy to this folder a copy of the default configuration file -Copy to this folder a copy of the default configuration file by downloading this file from GitHub and saving this file in the directory created above: -```text -wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/my-new-config/config -``` - -### 3. Update the default configuration file -The following config options *must* be updated to ensure that individual account data is not cross populated with other OneDrive accounts or other configurations: -* sync_dir - -Other options that may require to be updated, depending on the OneDrive account that is being configured: -* drive_id -* application_id -* sync_business_shared_folders -* skip_dir -* skip_file -* Creation of a 'sync_list' file if required -* Creation of a 'business_shared_folders' file if required - -### 4. Authenticate the client -Authenticate the client using the specific configuration file: -```text -onedrive --confdir="~/.config/my-new-config" -``` -You will be asked to open a specific URL by using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving permission to the application, you will be redirected to a blank page. Copy the URI of the blank page into the application. -```text -[user@hostname ~]$ onedrive --confdir="~/.config/my-new-config" -Configuration file successfully loaded -Configuring Global Azure AD Endpoints -Authorize this app visiting: - -https://..... - -Enter the response uri: - -``` - -### 5. Display and Test the configuration -Test the configuration using '--display-config' and '--dry-run'. By doing so, this allows you to test any configuration that you have currently made, enabling you to fix this configuration before using the configuration. - -#### Display the configuration -```text -onedrive --confdir="~/.config/my-new-config" --display-config -``` - -#### Test the configuration by performing a dry-run -```text -onedrive --confdir="~/.config/my-new-config" --synchronize --verbose --dry-run -``` - -If both of these operate as per your expectation, the configuration of this client setup is complete and validated. If not, amend your configuration as required. - -### 6. Sync the OneDrive account data as required -Sync the data for the new account configuration as required: -```text -onedrive --confdir="~/.config/my-new-config" --synchronize --verbose -``` -or -```text -onedrive --confdir="~/.config/my-new-config" --monitor --verbose -``` - -* `--synchronize` does a one-time sync -* `--monitor` keeps the application running and monitoring for changes both local and remote - -### 7. Automatic syncing of new OneDrive configuration -In order to automatically start syncing your OneDrive accounts, you will need to create a service file for each account. From the applicable 'systemd folder' where the applicable systemd service file exists: -* RHEL / CentOS: `/usr/lib/systemd/system` -* Others: `/usr/lib/systemd/user` and `/lib/systemd/system` - -### Step1: Create a new systemd service file -#### Red Hat Enterprise Linux, CentOS Linux -Copy the required service file to a new name: -```text -sudo cp /usr/lib/systemd/system/onedrive.service /usr/lib/systemd/system/onedrive-my-new-config -``` -or -```text -sudo cp /usr/lib/systemd/system/onedrive@.service /usr/lib/systemd/system/onedrive-my-new-config@.service -``` - -#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora -Copy the required service file to a new name: -```text -sudo cp /usr/lib/systemd/user/onedrive.service /usr/lib/systemd/user/onedrive-my-new-config.service -``` -or -```text -sudo cp /lib/systemd/system/onedrive@.service /lib/systemd/system/onedrive-my-new-config@.service -``` - -### Step 2: Edit new systemd service file -Edit the new systemd file, updating the line beginning with `ExecStart` so that the confdir mirrors the one you used above: -```text -ExecStart=/usr/local/bin/onedrive --monitor --confdir="/full/path/to/config/dir" -``` - -Example: -```text -ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/my-new-config" -``` - -**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded. - -### Step 3: Enable the new systemd service -Once the file is correctly editied, you can enable the new systemd service using the following commands. - -#### Red Hat Enterprise Linux, CentOS Linux -```text -systemctl enable onedrive-my-new-config -systemctl start onedrive-my-new-config -``` - -#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora -```text -systemctl --user enable onedrive-my-new-config -systemctl --user start onedrive-my-new-config -``` -or -```text -systemctl --user enable onedrive-my-new-config@myusername.service -systemctl --user start onedrive-my-new-config@myusername.service -``` - -### Step 4: Viewing systemd status and logs for the custom service -#### Viewing systemd service status - Red Hat Enterprise Linux, CentOS Linux -```text -systemctl status onedrive-my-new-config -``` - -#### Viewing systemd service status - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora -```text -systemctl --user status onedrive-my-new-config -``` - -#### Viewing journalctl systemd logs - Red Hat Enterprise Linux, CentOS Linux -```text -journalctl --unit=onedrive-my-new-config -f -``` - -#### Viewing journalctl systemd logs - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora -```text -journalctl --user --unit=onedrive-my-new-config -f -``` - -### Step 5: (Optional) Run custom systemd service at boot without user login -In some cases it may be desirable for the systemd service to start without having to login as your 'user' - -All the systemd steps above that utilise the `--user` option, will run the systemd service as your particular user. As such, the systemd service will not start unless you actually login to your system. - -To avoid this issue, you need to reconfigure your 'user' account so that the systemd services you have created will startup without you having to login to your system: -```text -loginctl enable-linger -``` - -Example: -```text -alex@ubuntu-headless:~$ loginctl enable-linger alex -``` - -Repeat these steps for each OneDrive new account that you wish to use. - -## Configuring the client to use multiple OneDrive accounts / configurations using Docker -In some situations it may be desirable to run multiple Docker containers at the same time, each with their own configuration. - -To run the Docker container successfully, it needs two unique Docker volumes to operate: -* Your configuration Docker volumes -* Your data Docker volume - -When running multiple Docker containers, this is no different - each Docker container must have it's own configuration and data volume. - -### High level steps: -1. Create the required unique Docker volumes for the configuration volume -2. Create the required unique local path used for the Docker data volume -3. Start the multiple Docker containers with the required configuration for each container - -#### Create the required unique Docker volumes for the configuration volume -Create the required unique Docker volumes for the configuration volume(s): -```text -docker volume create onedrive_conf_sharepoint_site1 -docker volume create onedrive_conf_sharepoint_site2 -docker volume create onedrive_conf_sharepoint_site3 -... -docker volume create onedrive_conf_sharepoint_site50 -``` - -#### Create the required unique local path used for the Docker data volume -Create the required unique local path used for the Docker data volume -```text -mkdir -p /use/full/local/path/no/tilda/SharePointSite1 -mkdir -p /use/full/local/path/no/tilda/SharePointSite2 -mkdir -p /use/full/local/path/no/tilda/SharePointSite3 -... -mkdir -p /use/full/local/path/no/tilda/SharePointSite50 -``` - -#### Start the Docker container with the required configuration (example) -```text -docker run -it --name onedrive -v onedrive_conf_sharepoint_site1:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite1:/onedrive/data" driveone/onedrive:latest -docker run -it --name onedrive -v onedrive_conf_sharepoint_site2:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite2:/onedrive/data" driveone/onedrive:latest -docker run -it --name onedrive -v onedrive_conf_sharepoint_site3:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite3:/onedrive/data" driveone/onedrive:latest -... -docker run -it --name onedrive -v onedrive_conf_sharepoint_site50:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite50:/onedrive/data" driveone/onedrive:latest -``` - -#### TIP -To avoid 're-authenticating' and 'authorising' each individual Docker container, if all the Docker containers are using the 'same' OneDrive credentials, you can re-use the 'refresh_token' from one Docker container to another by copying this file to the configuration Docker volume of each Docker container. - -If the account credentials are different .. you will need to re-authenticate each Docker container individually. - -## Configuring the client for use in dual-boot (Windows / Linux) situations -When dual booting Windows and Linux, depending on the Windows OneDrive account configuration, the 'Files On-Demand' option may be enabled when running OneDrive within your Windows environment. - -When this option is enabled in Windows, if you are sharing this location between your Windows and Linux systems, all files will be a 0 byte link, and cannot be used under Linux. - -To fix the problem of windows turning all files (that should be kept offline) into links, you have to uncheck a specific option in the onedrive settings window. The option in question is `Save space and download files as you use them`. - -To find this setting, open the onedrive pop-up window from the taskbar, click "Help & Settings" > "Settings". This opens a new window. Go to the tab "Settings" and look for the section "Files On-Demand". - -After unchecking the option and clicking "OK", the Windows OneDrive client should restart itself and start actually downloading your files so they will truely be available on your disk when offline. These files will then be fully accessible under Linux and the Linux OneDrive client. - -| OneDrive Personal | Onedrive Business
SharePoint | -|---|---| -| ![Uncheck-Personal](./images/personal-files-on-demand.png) | ![Uncheck-Business](./images/business-files-on-demand.png) | - -## Configuring the client for use when 'sync_dir' is a mounted directory -In some environments, your setup might be that your configured 'sync_dir' is pointing to another mounted file system - a NFS|CIFS location, an external drive (USB stuc, eSATA etc). As such, you configure your 'sync_dir' as follows: -```text -sync_dir = "/path/to/mountpoint/OneDrive" -``` - -The issue here is - how does the client react if the mount point gets removed - network loss, device removal? - -The client has zero knowledge of any event that causes a mountpoint to become unavailable, thus, the client (if you are running as a service) will assume that you deleted the files, thus, will go ahead and delete all your files on OneDrive. This is most certainly an undesirable action. - -There are a few options here which you can configure in your 'config' file to assist you to prevent this sort of item from occuring: -1. classify_as_big_delete -2. check_nomount -3. check_nosync - -**Note:** Before making any change to your configuration, stop any sync process & stop any onedrive systemd service from running. - -### classify_as_big_delete -By default, this uses a value of 1000 files|folders. An undesirable unmount if you have more than 1000 files, this default level will prevent the client from executing the online delete. Modify this value up or down as desired - -### check_nomount & check_nosync -These two options are really the right safe guards to use. - -In your 'mount point', *before* you mount your external folder|device, create empty `.nosync` file, so that this is the *only* file present in the mount location before you mount your data to your mount point. When you mount your data, this '.nosync' file will not be visible, but, if the device you are mounting goes away - this '.nosync' file is the only file visible. - -Next, in your 'config' file, configure the following options: `check_nomount = "true"` and `check_nosync = "true"` - -What this will do is tell the client, if at *any* point you see this file - stop syncing - thus, protecting your online data from being deleted by the mounted device being suddenly unavailable. - -After making this sort of change - test with `--dry-run` so you can see the impacts of your mount point being unavailable, and how the client is now reacting. Once you are happy with how the system will react, restart your sync processes. - - -## Upload data from the local ~/OneDrive folder to a specific location on OneDrive -In some environments, you may not want your local ~/OneDrive folder to be uploaded directly to the root of your OneDrive account online. - -Unfortunatly, the OneDrive API lacks any facility to perform a re-direction of data during upload. - -The workaround for this is to structure your local filesystem and reconfigure your client to achieve the desired goal. - -### High level steps: -1. Create a new folder, for example `/opt/OneDrive` -2. Configure your application config 'sync_dir' to look at this folder -3. Inside `/opt/OneDrive` create the folder you wish to sync the data online to, for example: `/opt/OneDrive/RemoteOnlineDestination` -4. Configure the application to only sync `/opt/OneDrive/RemoteDestination` via 'sync_list' -5. Symbolically link `~/OneDrive` -> `/opt/OneDrive/RemoteOnlineDestination` - -### Outcome: -* Your `~/OneDrive` will look / feel as per normal -* The data will be stored online under `/RemoteOnlineDestination` - -### Testing: -* Validate your configuration with `onedrive --display-config` -* Test your configuration with `onedrive --dry-run` diff --git a/docs/application-config-options.md b/docs/application-config-options.md deleted file mode 100644 index 31b50614b..000000000 --- a/docs/application-config-options.md +++ /dev/null @@ -1,1075 +0,0 @@ -# Application Configuration Options for the OneDrive Client for Linux -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Table of Contents - -- [Configuration File Options](#configuration-file-options) - - [application_id](#application_id) - - [azure_ad_endpoint](#azure_ad_endpoint) - - [azure_tenant_id](#azure_tenant_id) - - [bypass_data_preservation](#bypass_data_preservation) - - [check_nomount](#check_nomount) - - [check_nosync](#check_nosync) - - [classify_as_big_delete](#classify_as_big_delete) - - [cleanup_local_files](#cleanup_local_files) - - [connect_timeout](#connect_timeout) - - [data_timeout](#data_timeout) - - [debug_https](#debug_https) - - [disable_download_validation](#disable_download_validation) - - [disable_notifications](#disable_notifications) - - [disable_upload_validation](#disable_upload_validation) - - [display_running_config](#display_running_config) - - [dns_timeout](#dns_timeout) - - [download_only](#download_only) - - [drive_id](#drive_id) - - [dry_run](#dry_run) - - [enable_logging](#enable_logging) - - [force_http_11](#force_http_11) - - [ip_protocol_version](#ip_protocol_version) - - [local_first](#local_first) - - [log_dir](#log_dir) - - [monitor_fullscan_frequency](#monitor_fullscan_frequency) - - [monitor_interval](#monitor_interval) - - [monitor_log_frequency](#monitor_log_frequency) - - [no_remote_delete](#no_remote_delete) - - [operation_timeout](#operation_timeout) - - [rate_limit](#rate_limit) - - [read_only_auth_scope](#read_only_auth_scope) - - [remove_source_files](#remove_source_files) - - [resync](#resync) - - [resync_auth](#resync_auth) - - [skip_dir](#skip_dir) - - [skip_dir_strict_match](#skip_dir_strict_match) - - [skip_dotfiles](#skip_dotfiles) - - [skip_file](#skip_file) - - [skip_size](#skip_size) - - [skip_symlinks](#skip_symlinks) - - [space_reservation](#space_reservation) - - [sync_business_shared_items](#sync_business_shared_items) - - [sync_dir](#sync_dir) - - [sync_dir_permissions](#sync_dir_permissions) - - [sync_file_permissions](#sync_file_permissions) - - [sync_root_files](#sync_root_files) - - [upload_only](#upload_only) - - [user_agent](#user_agent) - - [webhook_enabled](#webhook_enabled) - - [webhook_expiration_interval](#webhook_expiration_interval) - - [webhook_listening_host](#webhook_listening_host) - - [webhook_listening_port](#webhook_listening_port) - - [webhook_public_url](#webhook_public_url) - - [webhook_renewal_interval](#webhook_renewal_interval) -- [Command Line Interface (CLI) Only Options](#command-line-interface-cli-only-options) - - [CLI Option: --auth-files](#cli-option---auth-files) - - [CLI Option: --auth-response](#cli-option---auth-response) - - [CLI Option: --confdir](#cli-option---confdir) - - [CLI Option: --create-directory](#cli-option---create-directory) - - [CLI Option: --create-share-link](#cli-option---create-share-link) - - [CLI Option: --destination-directory](#cli-option---destination-directory) - - [CLI Option: --display-config](#cli-option---display-config) - - [CLI Option: --display-sync-status](#cli-option---display-sync-status) - - [CLI Option: --display-quota](#cli-option---display-quota) - - [CLI Option: --force](#cli-option---force) - - [CLI Option: --force-sync](#cli-option---force-sync) - - [CLI Option: --get-file-link](#cli-option---get-file-link) - - [CLI Option: --get-sharepoint-drive-id](#cli-option---get-sharepoint-drive-id) - - [CLI Option: --logout](#cli-option---logout) - - [CLI Option: --modified-by](#cli-option---modified-by) - - [CLI Option: --monitor | -m](#cli-option---monitor--m) - - [CLI Option: --print-access-token](#cli-option---print-access-token) - - [CLI Option: --reauth](#cli-option---reauth) - - [CLI Option: --remove-directory](#cli-option---remove-directory) - - [CLI Option: --single-directory](#cli-option---single-directory) - - [CLI Option: --source-directory](#cli-option---source-directory) - - [CLI Option: --sync | -s](#cli-option---sync--s) - - [CLI Option: --verbose | -v+](#cli-option---verbose--v) - - [CLI Option: --with-editing-perms](#cli-option---with-editing-perms) -- [Depreciated Configuration File and CLI Options](#depreciated-configuration-file-and-cli-options) - - [min_notify_changes](#min_notify_changes) - - [CLI Option: --synchronize](#cli-option---synchronize) - - -## Configuration File Options - -### application_id -_**Description:**_ This is the config option for application id that used used to identify itself to Microsoft OneDrive. In some circumstances, it may be desirable to use your own application id. To do this, you must register a new application with Microsoft Azure via https://portal.azure.com/, then use your new application id with this config option. - -_**Value Type:**_ String - -_**Default Value:**_ d50ca740-c83f-4d1b-b616-12c519384f0c - -_**Config Example:**_ `application_id = "d50ca740-c83f-4d1b-b616-12c519384f0c"` - -### azure_ad_endpoint -_**Description:**_ This is the config option to change the Microsoft Azure Authentication Endpoint that the client uses to conform with data and security requirements that requires data to reside within the geographic borders of that country. - -_**Value Type:**_ String - -_**Default Value:**_ *Empty* - not required for normal operation - -_**Valid Values:**_ USL4, USL5, DE, CN - -_**Config Example:**_ `azure_ad_endpoint = "DE"` - -### azure_tenant_id -_**Description:**_ This config option allows the locking of the client to a specific single tenant and will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common". The tenant id may be the GUID Directory ID or the fully qualified tenant name. - -_**Value Type:**_ String - -_**Default Value:**_ *Empty* - not required for normal operation - -_**Config Example:**_ `azure_tenant_id = "example.onmicrosoft.us"` or `azure_tenant_id = "0c4be462-a1ab-499b-99e0-da08ce52a2cc"` - -_**Additional Usage Requirement:**_ Must be configured if 'azure_ad_endpoint' is configured. - -### bypass_data_preservation -_**Description:**_ This config option allows the disabling of preserving local data by renaming the local file in the event of data conflict. If this is enabled, you will experience data loss on your local data as the local file will be over-written with data from OneDrive online. Use with care and caution. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `bypass_data_preservation = "false"` or `bypass_data_preservation = "true"` - -### check_nomount -_**Description:**_ This config option is useful to prevent application startup & ongoing use in 'Monitor Mode' if the configured 'sync_dir' is a separate disk that is being mounted by your system. This option will check for the presence of a `.nosync` file in your mount point, and if present, abort any sync process to preserve data. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `check_nomount = "false"` or `check_nomount = "true"` - -_**CLI Option:**_ `--check-for-nomount` - -_**Additional Usage Requirement:**_ Create a `.nosync` file in your mount point *before* you mount your disk so that this is visible, in your mount point if your disk is unmounted. - -### check_nosync -_**Description:**_ This config option is useful to prevent the sync of a *local* directory to Microsoft OneDrive. It will *not* check for this file online to prevent the download of directories to your local system. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `check_nosync = "false"` or `check_nosync = "true"` - -_**CLI Option Use:**_ `--check-for-nosync` - -_**Additional Usage Requirement:**_ Create a `.nosync` file in any *local* directory that you wish to not sync to Microsoft OneDrive when you enable this option. - -### classify_as_big_delete -_**Description:**_ This config option defines the number of children in a path that is locally removed which will be classified as a 'big data delete' to safeguard large data removals - which are typically accidental local delete events. - -_**Value Type:**_ Integer - -_**Default Value:**_ 1000 - -_**Config Example:**_ `classify_as_big_delete = "2000"` - -_**CLI Option Use:**_ `--classify-as-big-delete 2000` - -_**Additional Usage Requirement:**_ If this option is triggered, you will need to add `--force` to force a sync to occur. - -### cleanup_local_files -_**Description:**_ This config option provides the capability to cleanup local files and folders if they are removed online. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `cleanup_local_files = "false"` or `cleanup_local_files = "true"` - -_**CLI Option Use:**_ `--cleanup-local-files` - -_**Additional Usage Requirement:**_ This configuration option can only be used with 'download_only'. It cannot be used with any other application option. - -### connect_timeout -_**Description:**_ This configuration setting manages the TCP connection timeout duration in seconds for HTTPS connections to Microsoft OneDrive when using the curl library. - -_**Value Type:**_ Integer - -_**Default Value:**_ 30 - -_**Config Example:**_ `connect_timeout = "20"` - -### data_timeout -_**Description:**_ This setting controls the timeout duration, in seconds, for when data is not received on an active connection to Microsoft OneDrive over HTTPS when using the curl library, before that connection is timeout out. - -_**Value Type:**_ Integer - -_**Default Value:**_ 240 - -_**Config Example:**_ `data_timeout = "300"` - -### debug_https -_**Description:**_ This setting controls whether the curl library is configured to output additional data to assist with diagnosing HTTPS issues and problems. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `debug_https = "false"` or `debug_https = "true"` - -_**CLI Option Use:**_ `--debug-https` - -_**Additional Usage Notes:**_ Whilst this option can be used at any time, it is advisable that you only use this option when advised as this will output your `Authorization: bearer` - which is your authentication token to Microsoft OneDrive. - -### disable_download_validation -_**Description:**_ This option determines whether the client will conduct integrity validation on files downloaded from Microsoft OneDrive. Sometimes, when downloading files, particularly from SharePoint, there is a discrepancy between the file size reported by the OneDrive API and the byte count received from the SharePoint HTTP Server for the same file. Enable this option to disable the integrity checks performed by this client. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `disable_download_validation = "false"` or `disable_download_validation = "true"` - -_**CLI Option Use:**_ `--disable-download-validation` - -_**Additional Usage Notes:**_ If you're downloading data from SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. - -### disable_notifications -_**Description:**_ This setting controls whether GUI notifications are sent from the client to your display manager session. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `disable_notifications = "false"` or `disable_notifications = "true"` - -_**CLI Option Use:**_ `--disable-notifications` - -### disable_upload_validation -_**Description:**_ This option determines whether the client will conduct integrity validation on files uploaded to Microsoft OneDrive. Sometimes, when uploading files, particularly to SharePoint, SharePoint will modify your file post upload by adding new data to your file which breaks the integrity checking of the upload performed by this client. Enable this option to disable the integrity checks performed by this client. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `disable_upload_validation = "false"` or `disable_upload_validation = "true"` - -_**CLI Option Use:**_ `--disable-upload-validation` - -_**Additional Usage Notes:**_ If you're uploading data to SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. - -### display_running_config -_**Description:**_ This option will include the running config of the application at application startup. This may be desirable to enable when running in containerised environments so that any application logging that is occuring, will have the application configuration being consumed at startup, written out to any applicable log file. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `display_running_config = "false"` or `display_running_config = "true"` - -_**CLI Option Use:**_ `--display-running-config` - -### dns_timeout -_**Description:**_ This setting controls the libcurl DNS cache value. By default, libcurl caches this info for 60 seconds. This libcurl DNS cache timeout is entirely speculative that a name resolves to the same address for a small amount of time into the future as libcurl does not use DNS TTL properties. We recommend users not to tamper with this option unless strictly necessary. - -_**Value Type:**_ Integer - -_**Default Value:**_ 60 - -_**Config Example:**_ `dns_timeout = "90"` - -### download_only -_**Description:**_ This setting forces the client to only download data from Microsoft OneDrive and replicate that data locally. No changes made locally will be uploaded to Microsoft OneDrive when using this option. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `download_only = "false"` or `download_only = "true"` - -_**CLI Option Use:**_ `--download-only` - -### drive_id -_**Description:**_ This setting controls the specific drive identifier the client will use when syncing with Microsoft OneDrive. - -_**Value Type:**_ String - -_**Default Value:**_ *None* - -_**Config Example:**_ `drive_id = "b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB"` - -_**Additional Usage Notes:**_ This option is typically only used when configuring the client to sync a specific SharePoint Library. If this configuration option is specified in your config file, a value must be specified otherwise the application will exit citing a fatal error has occured. - -### dry_run -_**Description:**_ This setting controls the application capability to test your application configuration without actually performing any actual activity (download, upload, move, delete, folder creation). - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `dry_run = "false"` or `dry_run = "true"` - -_**CLI Option Use:**_ `--dry-run` - -### enable_logging -_**Description:**_ This setting controls the application logging all actions to a separate file. By default, all log files will be written to `/var/log/onedrive`, however this can changed by using the 'log_dir' config option - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `enable_logging = "false"` or `enable_logging = "true"` - -_**CLI Option Use:**_ `--enable-logging` - -_**Additional Usage Notes:**_ Additional configuration is potentially required to configure the default log directory. Refer to usage.md for details (ADD LINK) - -### force_http_11 -_**Description:**_ This setting controls the application HTTP protocol version. By default, the application will use libcurl defaults for which HTTP prodocol version will be used to interact with Microsoft OneDrive. Use this setting to downgrade libcurl to only use HTTP/1.1. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `force_http_11 = "false"` or `force_http_11 = "true"` - -_**CLI Option Use:**_ `--force-http-11` - -### ip_protocol_version -_**Description:**_ This setting controls the application IP protocol that should be used when communicating with Microsoft OneDrive. The default is to use IPv4 and IPv6 networks for communicating to Microsoft OneDrive. - -_**Value Type:**_ Integer - -_**Default Value:**_ 0 - -_**Valid Values:**_ 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only - -_**Config Example:**_ `ip_protocol_version = "0"` or `ip_protocol_version = "1"` or `ip_protocol_version = "2"` - -_**Additional Usage Notes:**_ In some environments where IPv4 and IPv6 are configured at the same time, this causes resolution and routing issues to Microsoft OneDrive. If this is the case, it is advisable to change 'ip_protocol_version' to match your environment. - -### local_first -_**Description:**_ This setting controls what the application considers the 'source of truth' for your data. By default, what is stored online will be considered as the 'source of truth' when syncing to your local machine. When using this option, your local data will be considered the 'source of truth'. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `local_first = "false"` or `local_first = "true"` - -_**CLI Option Use:**_ `--local-first` - -### log_dir -_**Description:**_ This setting controls the custom application log path when 'enable_logging' has been enabled. By default, all log files will be written to `/var/log/onedrive`. - -_**Value Type:**_ String - -_**Default Value:**_ *None* - -_**Config Example:**_ `log_dir = "~/logs/"` - -_**CLI Option Use:**_ `--log-dir "~/logs/"` - -### monitor_fullscan_frequency -_**Description:**_ This configuration option controls the number of 'monitor_interval' iterations between when a full scan of your data is performed to ensure data integrity and consistency. - -_**Value Type:**_ Integer - -_**Default Value:**_ 12 - -_**Config Example:**_ `monitor_fullscan_frequency = "24"` - -_**CLI Option Use:**_ `--monitor-fullscan-frequency '24'` - -_**Additional Usage Notes:**_ By default without configuration, 'monitor_fullscan_frequency' is set to 12. In this default state, this means that a full scan is performed every 'monitor_interval' x 'monitor_fullscan_frequency' = 3600 seconds. This setting is only applicable when running in `--monitor` mode. Setting this configuration option to '0' will *disable* the full scan of your data online. - -### monitor_interval -_**Description:**_ This configuration setting determines how often the synchronisation loops run in --monitor mode, measured in seconds. When this time period elapses, the client will check for online changes in Microsoft OneDrive, conduct integrity checks on local data and scan the local 'sync_dir' to identify any new content that hasn't been uploaded yet. - -_**Value Type:**_ Integer - -_**Default Value:**_ 300 - -_**Config Example:**_ `monitor_interval = "600"` - -_**CLI Option Use:**_ `--monitor-interval '600'` - -_**Additional Usage Notes:**_ A minimum value of 300 is enforced for this configuration setting. - -### monitor_log_frequency -_**Description:**_ This configuration option controls the suppression of frequently printed log items to the system console when using `--monitor` mode. The aim of this configuration item is to reduce the log output when near zero sync activity is occuring. - -_**Value Type:**_ Integer - -_**Default Value:**_ 12 - -_**Config Example:**_ `monitor_log_frequency = "24"` - -_**CLI Option Use:**_ `--monitor-log-frequency '24'` - -_**Additional Usage Notes:**_ - -By default, at application start-up when using `--monitor` mode, the following will be logged to indicate that the application has correctly started and has performed all the initial processing steps: -```text -Reading configuration file: /home/user/.config/onedrive/config -Configuration file successfully loaded -Configuring Global Azure AD Endpoints -Sync Engine Initialised with new Onedrive API instance -All application operations will be performed in: /home/user/OneDrive -OneDrive synchronisation interval (seconds): 300 -Initialising filesystem inotify monitoring ... -Performing initial syncronisation to ensure consistent local state ... -Starting a sync with Microsoft OneDrive -Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. -Processing changes and items received from Microsoft OneDrive ... -Performing a database consistency and integrity check on locally stored data ... -Scanning the local file system '~/OneDrive' for new data to upload ... -Performing a final true-up scan of online data from Microsoft OneDrive -Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. -Processing changes and items received from Microsoft OneDrive ... -Sync with Microsoft OneDrive is complete -``` -Then, based on 'monitor_log_frequency', the following output will be logged until the suppression loop value is reached: -```text -Starting a sync with Microsoft OneDrive -Syncing changes from Microsoft OneDrive ... -Sync with Microsoft OneDrive is complete -``` -**Note:** The additional log output `Performing a database consistency and integrity check on locally stored data ...` will only be displayed when this activity is occuring which is triggered by 'monitor_fullscan_frequency'. - -**Note:** If verbose application output is being used (`--verbose`), then this configuration setting has zero effect, as application verbose output takes priority over application output surpression. - -### no_remote_delete -_**Description:**_ This configuration option controls whether local file and folder deletes are actioned on Microsoft OneDrive. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `local_first = "false"` or `local_first = "true"` - -_**CLI Option Use:**_ `--no-remote-delete` - -_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only` - -### operation_timeout -_**Description:**_ This configuration option controls the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. We recommend users not to tamper with this option unless strictly necessary. - -_**Value Type:**_ Integer - -_**Default Value:**_ 3600 - -_**Config Example:**_ `operation_timeout = "3600"` - -### rate_limit -_**Description:**_ This configuration option controls the bandwidth used by the application, per thread, when interacting with Microsoft OneDrive. - -_**Value Type:**_ Integer - -_**Default Value:**_ 0 (unlimited, use available bandwidth per thread) - -_**Valid Values:**_ Valid tested values for this configuration option are as follows: - -* 131072 = 128 KB/s - absolute minimum for basic application operations to prevent timeouts -* 262144 = 256 KB/s -* 524288 = 512 KB/s -* 1048576 = 1 MB/s -* 10485760 = 10 MB/s -* 104857600 = 100 MB/s - -_**Config Example:**_ `rate_limit = "131072"` - -### read_only_auth_scope -_**Description:**_ This configuration option controls whether the OneDrive Client for Linux operates in a totally in read-only operation. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `read_only_auth_scope = "false"` or `read_only_auth_scope = "true"` - -_**Additional Usage Notes:**_ When using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data until you revoke this consent. - -### remove_source_files -_**Description:**_ This configuration option controls whether the OneDrive Client for Linux removes the local file post successful transfer to Microsoft OneDrive. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `remove_source_files = "false"` or `remove_source_files = "true"` - -_**CLI Option Use:**_ `--remove-source-files` - -_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only` - -### resync -_**Description:**_ This configuration option controls whether the known local sync state with Microsoft OneDrive is removed at application startup. When this option is used, a full scan of your data online is performed to ensure that the local sync state is correctly built back up. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `resync = "false"` or `resync = "true"` - -_**CLI Option Use:**_ `--resync` - -_**Additional Usage Notes:**_ It's highly recommended to use this option only if the application prompts you to do so. Don't blindly use this option as a default option. If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: -* drive_id -* sync_dir -* skip_file -* skip_dir -* skip_dotfiles -* skip_symlinks -* sync_business_shared_items -* Creating, Modifying or Deleting the 'sync_list' file - -### resync_auth -_**Description:**_ This configuration option controls the approval of performing a 'resync' which can be beneficial in automated environments. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `resync_auth = "false"` or `resync_auth = "true"` - -_**CLI Option Use:**_ `--resync-auth` - -_**Additional Usage Notes:**_ In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' resync requirement, this option allows you to automatically acknowledge the resync prompt. - -### skip_dir -_**Description:**_ This configuration option controls whether the application skips certain directories from being synced. Directories can be specified in 2 ways: - -* As a single entry. This will search the respective path for this entry and skip all instances where this directory is present, where ever it may exist. -* As a full path entry. This will skip the explicit path as set. - -**Important:** Entries for 'skip_dir' are *relative* to your 'sync_dir' path. - -_**Value Type:**_ String - -_**Default Value:**_ *Empty* - not required for normal operation - -_**Config Example:**_ - -Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. - -```text -skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell|.Rproj-user" -``` - -The 'skip_dir' option can also be specified multiple times within your config file, for example: -```text -skip_dir = "SkipThisDirectoryAnywhere" -skip_dir = ".SkipThisOtherDirectoryAnywhere" -skip_dir = "/Explicit/Path/To/A/Directory" -skip_dir = "/Another/Explicit/Path/To/Different/Directory" -``` - -This will be interpreted the same as: -```text -skip_dir = "SkipThisDirectoryAnywhere|.SkipThisOtherDirectoryAnywhere|/Explicit/Path/To/A/Directory|/Another/Explicit/Path/To/Different/Directory" -``` - -_**CLI Option Use:**_ `--skip-dir 'SkipThisDirectoryAnywhere|.SkipThisOtherDirectoryAnywhere|/Explicit/Path/To/A/Directory|/Another/Explicit/Path/To/Different/Directory'` - -_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. - -### skip_dir_strict_match -_**Description:**_ This configuration option controls whether the application performs strict directory matching when checking 'skip_dir' items. When enabled, the 'skip_dir' item must be a full path match to the path to be skipped. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `skip_dir_strict_match = "false"` or `skip_dir_strict_match = "true"` - -_**CLI Option Use:**_ `--skip-dir-strict-match` - -### skip_dotfiles -_**Description:**_ This configuration option controls whether the application will skip all .files and .folders when performing sync operations. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `skip_dotfiles = "false"` or `skip_dotfiles = "true"` - -_**CLI Option Use:**_ `--skip-dot-files` - -_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. - -### skip_file -_**Description:**_ This configuration option controls whether the application skips certain files from being synced. - -_**Value Type:**_ String - -_**Default Value:**_ `~*|.~*|*.tmp|*.swp|*.partial` - -_**Config Example:**_ - -Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. - -By default, the following files will be skipped: -* Files that start with ~ -* Files that start with .~ (like .~lock.* files generated by LibreOffice) -* Files that end in .tmp, .swp and .partial - -Files can be skipped in the following fashion: -* Specify a wildcard, eg: '*.txt' (skip all txt files) -* Explicitly specify the filename and it's full path relative to your sync_dir, eg: '/path/to/file/filename.ext' -* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' - -```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" -skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" -``` -The 'skip_file' option can be specified multiple times within your config file, for example: -```text -skip_file = "~*|.~*|*.tmp|*.swp" -skip_file = "*.blah" -skip_file = "never_sync.file" -skip_file = "/Documents/keepass.kdbx" -``` -This will be interpreted the same as: -```text -skip_file = "~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx" -``` - -_**CLI Option Use:**_ `--skip-file '~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx'` - -_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. - -### skip_size -_**Description:**_ This configuration option controls whether the application skips syncing certain files larger than the specified size. The value specified is in MB. - -_**Value Type:**_ Integer - -_**Default Value:**_ 0 (all files, regardless of size, are synced) - -_**Config Example:**_ `skip_size = "50"` - -_**CLI Option Use:**_ `--skip-size '50'` - -### skip_symlinks -_**Description:**_ This configuration option controls whether the application will skip all symbolic links when performing sync operations. Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `skip_symlinks = "false"` or `skip_symlinks = "true"` - -_**CLI Option Use:**_ `--skip-symlinks` - -_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. - -### space_reservation -_**Description:**_ This configuration option controls how much local disk space should be reserved, to prevent the application from filling up your entire disk due to misconfiguration - -_**Value Type:**_ Integer - -_**Default Value:**_ 50 MB (expressesed as Bytes when using `--display-config`) - -_**Config Example:**_ `space_reservation = "100"` - -_**CLI Option Use:**_ `--space-reservation '100'` - -### sync_business_shared_items -_**Description:**_ This configuration option controls whether OneDrive Business | Office 365 Shared Folders, when added as a 'shortcut' to your 'My Files' will be synced to your local system. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `sync_business_shared_items = "false"` or `sync_business_shared_items = "true"` - -_**CLI Option Use:**_ *none* - this is a config file option only - -_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. - -### sync_dir -_**Description:**_ This configuration option determines the location on your local filesystem where your data from Microsoft OneDrive will be saved. - -_**Value Type:**_ String - -_**Default Value:**_ `~/OneDrive` - -_**Config Example:**_ `sync_dir = "~/MyDirToSync"` - -_**CLI Option Use:**_ `--syncdir '~/MyDirToSync'` - -_**Additional Usage Notes:**_ After changing this option, you will be required to perform a resync. - -### sync_dir_permissions -_**Description:**_ This configuration option defines the directory permissions applied when a new directory is created locally during the process of syncing your data from Microsoft OneDrive. - -_**Value Type:**_ Integer - -_**Default Value:**_ `700` - This provides the following permissions: `drwx------` - -_**Config Example:**_ `sync_dir_permissions = "700"` - -_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. - -### sync_file_permissions -_**Description:**_ This configuration option defines the file permissions applied when a new file is created locally during the process of syncing your data from Microsoft OneDrive. - -_**Value Type:**_ Integer - -_**Default Value:**_ `600` - This provides the following permissions: `-rw-------` - -_**Config Example:**_ `sync_file_permissions = "600"` - -_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. - -### sync_root_files -_**Description:**_ This configuration option manages the synchronisation of files located in the 'sync_dir' root when using a 'sync_list.' It enables you to sync all these files by default, eliminating the need to repeatedly modify your 'sync_list' and initiate resynchronisation. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `sync_root_files = "false"` or `sync_root_files = "true"` - -_**CLI Option Use:**_ `--sync-root-files` - -_**Additional Usage Notes:**_ Although it's not mandatory, it's recommended that after enabling this option, you perform a `--resync`. This ensures that any previously excluded content is now included in your sync process. - -### upload_only -_**Description:**_ This setting forces the client to only upload data to Microsoft OneDrive and replicate the locate state online. By default, this will also remove content online, that has been removed locally. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ `upload_only = "false"` or `upload_only = "true"` - -_**CLI Option Use:**_ `--upload-only` - -_**Additional Usage Notes:**_ To ensure that data deleted locally remains accessible online, you can use the 'no_remote_delete' option. If you want to delete the data from your local storage after a successful upload to Microsoft OneDrive, you can use the 'remove_source_files' option. - -### user_agent -_**Description:**_ This configuration option controls the 'User-Agent' request header that is presented to Microsoft Graph API when accessing the Microsoft OneDrive service. This string lets servers and network peers identify the application, operating system, vendor, and/or version of the application making the request. We recommend users not to tamper with this option unless strictly necessary. - -_**Value Type:**_ String - -_**Default Value:**_ `ISV|abraunegg|OneDrive Client for Linux/vX.Y.Z-A-bcdefghi` - -_**Config Example:**_ `user_agent = "ISV|CompanyName|AppName/Version"` - -_**Additional Usage Notes:**_ The current value conforms the the Microsoft Graph API documentation for presenting an appropriate 'User-Agent' header and aligns to the registered 'application_id' that this application uses. - -### webhook_enabled -_**Description:**_ This configuration option controls the application feature 'webhooks' to allow you to subscribe to remote updates as published by Microsoft OneDrive. This option only operates when the client is using 'Monitor Mode'. - -_**Value Type:**_ Boolean - -_**Default Value:**_ False - -_**Config Example:**_ The following is the minimum working example that needs to be added to your 'config' file to enable 'webhooks' successfully: -```text -webhook_enabled = "true" -webhook_public_url = "http://:8888/" -``` - -_**Additional Usage Notes:**_ - -etting `webhook_enabled = "true"` enables the webhook feature in 'monitor' mode. The onedrive process will listen for incoming updates at a configurable endpoint, which defaults to `0.0.0.0:8888`. The `webhook_public_url` must be set to an public-facing url for Microsoft to send updates to your webhook. - -If your host is directly exposed to the Internet, the `webhook_public_url` can be set to `http://:8888/` to match the default endpoint. In this case, it is also advisable to configure a reverse proxy like `nginx` to proxy the traffic to the client. For example, below is a nginx config snippet to proxy traffic into the webhook: -```text -server { - listen 80; - location /webhooks/onedrive { - proxy_http_version 1.1; - proxy_pass http://127.0.0.1:8888; - } -} -``` - -With nginx running, you can configure 'webhook_public_url' to `https:///webhooks/onedrive` - -**Note:** A valid HTTPS certificate is required for your public-facing URL if using nginx. - -If you receive this application error: `Subscription validation request failed. Response must exactly match validationToken query parameter.` the most likely cause for this error will be your nginx configuration. - -To resolve this configuration issue, potentially investigate the following configuration for nginx: -```text -server { - listen 80; - location /webhooks/onedrive { - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Original-Request-URI $request_uri; - proxy_read_timeout 300s; - proxy_connect_timeout 75s; - proxy_buffering off; - proxy_http_version 1.1; - proxy_pass http://127.0.0.1:8888; - } -} -``` -For any further nginx configuration assistance, please refer to: https://docs.nginx.com/ - -### webhook_expiration_interval -_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription expires. The value is expressed in the number of seconds before expiry. - -_**Value Type:**_ Integer - -_**Default Value:**_ 600 - -_**Config Example:**_ `webhook_expiration_interval = "1200"` - -### webhook_listening_host -_**Description:**_ This configuration option controls the host address that this client binds to, when the webhook feature is enabled. - -_**Value Type:**_ String - -_**Default Value:**_ 0.0.0.0 - -_**Config Example:**_ `webhook_listening_host = ""` - this will use the default value. `webhook_listening_host = "192.168.3.4"` - this will bind the client to use the IP address 192.168.3.4. - -_**Additional Usage Notes:**_ Use in conjunction with 'webhook_listening_port' to change the webhook listening endpoint. - -### webhook_listening_port -_**Description:**_ This configuration option controls the TCP port that this client listens on, when the webhook feature is enabled. - -_**Value Type:**_ Integer - -_**Default Value:**_ 8888 - -_**Config Example:**_ `webhook_listening_port = "9999"` - -_**Additional Usage Notes:**_ Use in conjunction with 'webhook_listening_host' to change the webhook listening endpoint. - -### webhook_public_url -_**Description:**_ This configuration option controls the URL that Microsoft will send subscription notifications to. This must be a valid Internet accessible URL. - -_**Value Type:**_ String - -_**Default Value:**_ *empty* - -_**Config Example:**_ - -* If your host is directly connected to the Internet: `webhook_public_url = "http://:8888/"` -* If you are using nginx to reverse proxy traffic from the Internet: `webhook_public_url = "https:///webhooks/onedrive"` - -### webhook_renewal_interval -_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription is renewed. The value is expressed in the number of seconds before renewal. - -_**Value Type:**_ Integer - -_**Default Value:**_ 300 - -_**Config Example:**_ `webhook_renewal_interval = "600"` - -### webhook_retry_interval -_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription is retried when creating or renewing a subscription failed. The value is expressed in the number of seconds before retry. - -_**Value Type:**_ Integer - -_**Default Value:**_ 60 - -_**Config Example:**_ `webhook_retry_interval = "120"` - -## Command Line Interface (CLI) Only Options - -### CLI Option: --auth-files -_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via specific files that the application uses to read the authentication data from. - -_**Usage Example:**_ `onedrive --auth-files authUrl:responseUrl` - -_**Additional Usage Notes:**_ The authorisation URL is written to the specified 'authUrl' file, then onedrive waits for the file 'responseUrl' to be present, and reads the authentication response from that file. Example: - -```text -onedrive --auth-files '~/onedrive-auth-url:~/onedrive-response-url' -Reading configuration file: /home/alex/.config/onedrive/config -Configuration file successfully loaded -Configuring Global Azure AD Endpoints -Client requires authentication before proceeding. Waiting for --auth-files elements to be available. -``` -At this point, the client has written the file `~/onedrive-auth-url` which contains the authentication URL that needs to be visited to perform the authentication process. The client will now wait and watch for the presence of the file `~/onedrive-response-url`. - -Visit the authentication URL, and then create a new file called `~/onedrive-response-url` with the response URI. Once this has been done, the application will acknowledge the presence of this file, read the contents, and authenticate the application. -```text -Sync Engine Initialised with new Onedrive API instance - - --sync or --monitor switches missing from your command line input. Please add one (not both) of these switches to your command line or use 'onedrive --help' for further assistance. - -No OneDrive sync will be performed without one of these two arguments being present. -``` - -### CLI Option: --auth-response -_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via providing the authentication response URI directly. - -_**Usage Example:**_ `onedrive --auth-response https://login.microsoftonline.com/common/oauth2/nativeclient?code=` - -_**Additional Usage Notes:**_ Typically, unless the application client identifier, authentication scopes are being modified or a specific Azure Tenant is being specified, the authentication URL will mostlikely be as follows: -```text -https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient -``` -With this URL being known, it is possible ahead of time to request an authentication token by visiting this URL, and performing the authenticaton access request. - -### CLI Option: --confdir -_**Description:**_ This CLI option allows the user to specify where all the application configuration and relevant components are stored. - -_**Usage Example:**_ `onedrive --confdir '~/.config/onedrive-business/'` - -_**Additional Usage Notes:**_ If using this option, it must be specified each and every time the application is used. If this is ommited, the application default configuration directory will be used. - -### CLI Option: --create-directory -_**Description:**_ This CLI option allows the user to create the specified directory path on Microsoft OneDrive without performing a sync. - -_**Usage Example:**_ `onedrive --create-directory 'path/of/new/folder/structure/to/create/'` - -_**Additional Usage Notes:**_ The specified path to create is relative to your configured 'sync_dir'. - -### CLI Option: --create-share-link -_**Description:**_ This CLI option enables the creation of a shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. By default, the permissions for the file will be 'read-only'. - -_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt'` - -_**Additional Usage Notes:**_ If writable access to the file is required, you must add `--with-editing-perms` to your command. See below for details. - -### CLI Option: --destination-directory -_**Description:**_ This CLI option specifies the 'destination' portion of moving a file or folder online, without performing a sync operation. - -_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` - -_**Additional Usage Notes:**_ All specified paths are relative to your configured 'sync_dir'. - -### CLI Option: --display-config -_**Description:**_ This CLI option will display the effective application configuration - -_**Usage Example:**_ `onedrive --display-config` - -### CLI Option: --display-sync-status -_**Description:**_ This CLI option will display the sync status of the configured 'sync_dir' - -_**Usage Example:**_ `onedrive --display-sync-status` - -_**Additional Usage Notes:**_ This option can also use the `--single-directory` option to determine the sync status of a specific directory within the configured 'sync_dir' - -### CLI Option: ---display-quota -_**Description:**_ This CLI option will display the quota status of the account drive id or the configured 'drive_id' value - -_**Usage Example:**_ `onedrive --display-quota` - -### CLI Option: --force -_**Description:**_ This CLI option enables the force the deletion of data when a 'big delete' is detected. - -_**Usage Example:**_ `onedrive --sync --verbose --force` - -_**Additional Usage Notes:**_ This option should only be used exclusively in cases where you've initiated a 'big delete' and genuinely intend to remove all the data that is set to be deleted online. - -### CLI Option: --force-sync -_**Description:**_ This CLI option enables the syncing of a specific directory, using the Client Side Filtering application defaults, overriding any user application configuration. - -_**Usage Example:**_ `onedrive --sync --verbose --force-sync --single-directory 'Data' - -_**Additional Usage Notes:**_ When this option is used, you will be presented with the following warning and risk acceptance: -```text -WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synch --single-directory --force-sync being used - -The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts. -By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync. - -Are you sure you wish to proceed with --force-sync [Y/N] -``` -To procceed with this sync task, you must risk accept the actions you are taking. If you have any concerns, first use `--dry-run` and evaluate the outcome before proceeding with the actual action. - -### CLI Option: --get-file-link -_**Description:**_ This CLI option queries the OneDrive API and return's the WebURL for the given local file. - -_**Usage Example:**_ `onedrive --get-file-link 'relative/path/to/your/file.txt'` - -_**Additional Usage Notes:**_ The path that you should use must be relative to your 'sync_dir' - -### CLI Option: --get-sharepoint-drive-id -_**Description:**_ This CLI option queries the OneDrive API and return's the Office 365 Drive ID for a given Office 365 SharePoint Shared Library that can then be used with 'drive_id' to sync a specific SharePoint Library. - -_**Usage Example:**_ `onedrive --get-sharepoint-drive-id '*'` or `onedrive --get-sharepoint-drive-id 'PointPublishing Hub Site'` - -### CLI Option: --logout -_**Description:**_ This CLI option removes this clients authentictaion status with Microsoft OneDrive. Any further application use will requrie the application to be re-authenticated with Microsoft OneDrive. - -_**Usage Example:**_ `onedrive --logout` - -### CLI Option: --modified-by -_**Description:**_ This CLI option queries the OneDrive API and return's the last modified details for the given local file. - -_**Usage Example:**_ `onedrive --modified-by 'relative/path/to/your/file.txt'` - -_**Additional Usage Notes:**_ The path that you should use must be relative to your 'sync_dir' - -### CLI Option: --monitor | -m -_**Description:**_ This CLI option controls the 'Monitor Mode' operational aspect of the client. When this option is used, the client will perform on-going syncs of data between Microsoft OneDrive and your local system. Local changes will be uploaded in near-realtime, whilst online changes will be downloaded on the next sync process. The frequency of these checks is governed by the 'monitor_interval' value. - -_**Usage Example:**_ `onedrive --monitor` or `onedrive -m` - -### CLI Option: --print-access-token -_**Description:**_ Print the current access token being used to access Microsoft OneDrive. - -_**Usage Example:**_ `onedrive --verbose --verbose --debug-https --print-access-token` - -_**Additional Usage Notes:**_ Do not use this option if you do not know why you are wanting to use it. Be highly cautious of exposing this object. Change your password if you feel that you have inadvertantly exposed this token. - -### CLI Option: --reauth -_**Description:**_ This CLI option controls the ability to re-authenticate your client with Microsoft OneDrive. - -_**Usage Example:**_ `onedrive --reauth` - -### CLI Option: --remove-directory -_**Description:**_ This CLI option allows the user to remove the specified directory path on Microsoft OneDrive without performing a sync. - -_**Usage Example:**_ `onedrive --remove-directory 'path/of/new/folder/structure/to/remove/'` - -_**Additional Usage Notes:**_ The specified path to remove is relative to your configured 'sync_dir'. - -### CLI Option: --single-directory -_**Description:**_ This CLI option controls the applications ability to sync a specific single directory. - -_**Usage Example:**_ `onedrive --sync --single-directory 'Data'` - -_**Additional Usage Notes:**_ The path specified is relative to your configured 'sync_dir' path. If the physical local path 'Folder' to sync is `~/OneDrive/Data/Folder` then the command would be `--single-directory 'Data/Folder'`. - -### CLI Option: --source-directory -_**Description:**_ This CLI option specifies the 'source' portion of moving a file or folder online, without performing a sync operation. - -_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` - -_**Additional Usage Notes:**_ All specified paths are relative to your configured 'sync_dir'. - -### CLI Option: --sync | -s -_**Description:**_ This CLI option controls the 'Standalone Mode' operational aspect of the client. When this option is used, the client will perform a one-time sync of data between Microsoft OneDrive and your local system. - -_**Usage Example:**_ `onedrive --sync` or `onedrive -s` - -### CLI Option: --verbose | -v+ -_**Description:**_ This CLI option controls the verbosity of the application output. Use the option once, to have normal verbose output, use twice to have debug level application output. - -_**Usage Example:**_ `onedrive --sync --verbose` or `onedrive --monitor --verbose` - -### CLI Option: --with-editing-perms -_**Description:**_ This CLI option enables the creation of a writable shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. This option can only be used in conjunction with `--create-share-link` - -_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt' --with-editing-perms` - -_**Additional Usage Notes:**_ Placement of `--with-editing-perms` is critical. It *must* be placed after the file path as per the example above. - -## Depreciated Configuration File and CLI Options -The following configuration options are no longer supported - -### min_notify_changes -_**Description:**_ Minimum number of pending incoming changes necessary to trigger a GUI desktop notification. - -_**Depreciated Config Example:**_ `min_notify_changes = "50"` - -_**Depreciated CLI Option:**_ `--min-notify-changes '50'` - -_**Reason for depreciation:**_ Application has been totally re-written. When this item was introduced, it was done so to reduce spamming of all events to the GUI desktop. - -### CLI Option: --synchronize -_**Description:**_ Perform a synchronisation with Microsoft OneDrive - -_**Depreciated CLI Option:**_ `--synchronize` - -_**Reason for depreciation:**_ `--synchronize` has been depreciated in favour of `--sync` or `-s` diff --git a/docs/application-security.md b/docs/application-security.md deleted file mode 100644 index 7c22c4f13..000000000 --- a/docs/application-security.md +++ /dev/null @@ -1,97 +0,0 @@ -# OneDrive Client for Linux Application Security -This document details the following information: - -* Why is this application an 'unverified publisher'? -* Application Security and Permission Scopes -* How to change Permission Scopes -* How to review your existing application access consent - -## Why is this application an 'unverified publisher'? -Publisher Verification, as per the Microsoft [process](https://learn.microsoft.com/en-us/azure/active-directory/develop/publisher-verification-overview) has actually been configured, and, actually has been verified! - -### Verified Publisher Configuration Evidence -As per the image below, the Azure portal shows that the 'Publisher Domain' has actually been verified: -![confirmed_verified_publisher](./images/confirmed_verified_publisher.jpg) - -* The 'Publisher Domain' is: https://abraunegg.github.io/ -* The required 'Microsoft Identity Association' is: https://abraunegg.github.io/.well-known/microsoft-identity-association.json - -## Application Security and Permission Scopes -There are 2 main components regarding security for this application: -* Azure Application Permissions -* User Authentication Permissions - -Keeping this in mind, security options should follow the security principal of 'least privilege': -> The principle that a security architecture should be designed so that each entity -> is granted the minimum system resources and authorizations that the entity needs -> to perform its function. - -Reference: [https://csrc.nist.gov/glossary/term/least_privilege](https://csrc.nist.gov/glossary/term/least_privilege) - -As such, the following API permissions are used by default: - -### Default Azure Application Permissions - -| API / Permissions name | Type | Description | Admin consent required | -|---|---|---|---| -| Files.Read | Delegated | Have read-only access to user files | No | -| Files.Read.All | Delegated | Have read-only access to all files user can access | No | -| Sites.Read.All | Delegated | Have read-only access to all items in all site collections | No | -| offline_access | Delegated | Maintain access to data you have given it access to | No | - -![default_authentication_scopes](./images/default_authentication_scopes.jpg) - -### Default User Authentication Permissions - -When a user authenticates with Microsoft OneDrive, additional account permissions are provided by service to give the user specific access to their data. These are delegated permissions provided by the platform: - -| API / Permissions name | Type | Description | Admin consent required | -|---|---|---|---| -| Files.ReadWrite | Delegated | Have full access to user files | No | -| Files.ReadWrite.All | Delegated | Have full access to all files user can access | No | -| Sites.ReadWrite.All | Delegated | Have full access to all items in all site collections | No | -| offline_access | Delegated | Maintain access to data you have given it access to | No | - -When these delegated API permissions are combined, these provide the effective authentication scope for the OneDrive Client for Linux to access your data. The resulting effective 'default' permissions will be: - -| API / Permissions name | Type | Description | Admin consent required | -|---|---|---|---| -| Files.ReadWrite | Delegated | Have full access to user files | No | -| Files.ReadWrite.All | Delegated | Have full access to all files user can access | No | -| Sites.ReadWrite.All | Delegated | Have full access to all items in all site collections | No | -| offline_access | Delegated | Maintain access to data you have given it access to | No | - -These 'default' permissions will allow the OneDrive Client for Linux to read, write and delete data associated with your OneDrive Account. - -## Configuring read-only access to your OneDrive data -In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation. - -To change the application to 'read-only' access, add the following to your configuration file: -```text -read_only_auth_scope = "true" -``` -This will change the user authentication scope request to use read-only access. - -**Note:** When changing this value, you *must* re-authenticate the client using the `--reauth` option to utilise the change in authentication scopes. - -When using read-only authentication scopes, the uploading of any data or local change to OneDrive will fail with the following error: -``` -2022-Aug-06 13:16:45.3349625 ERROR: Microsoft OneDrive API returned an error with the following message: -2022-Aug-06 13:16:45.3351661 Error Message: HTTP request returned status code 403 (Forbidden) -2022-Aug-06 13:16:45.3352467 Error Reason: Access denied -2022-Aug-06 13:16:45.3352838 Error Timestamp: 2022-06-12T13:16:45 -2022-Aug-06 13:16:45.3353171 API Request ID: -``` - -As such, it is also advisable for you to add the following to your configuration file so that 'uploads' are prevented: -```text -download_only = "true" -``` - -**Important:** Additionally when using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data. See below on how to remove your prior application consent. - -## Reviewing your existing application access consent - -To review your existing application access consent, you need to access the following URL: https://account.live.com/consent/Manage - -From here, you are able to review what applications have been given what access to your data, and remove application access as required. diff --git a/docs/build-rpm-howto.md b/docs/build-rpm-howto.md deleted file mode 100644 index 5439c3668..000000000 --- a/docs/build-rpm-howto.md +++ /dev/null @@ -1,379 +0,0 @@ -# RPM Package Build Process -The instuctions below have been tested on the following systems: -* CentOS 7 x86_64 -* CentOS 8 x86_64 - -These instructions should also be applicable for RedHat & Fedora platforms, or any other RedHat RPM based distribution. - -## Prepare Package Development Environment (CentOS 7, 8) -Install the following dependencies on your build system: -```text -sudo yum groupinstall -y 'Development Tools' -sudo yum install -y libcurl-devel -sudo yum install -y sqlite-devel -sudo yum install -y libnotify-devel -sudo yum install -y wget -sudo yum install -y http://downloads.dlang.org/releases/2.x/2.088.0/dmd-2.088.0-0.fedora.x86_64.rpm -mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS} -``` - -## Build RPM from spec file -Build the RPM from the provided spec file: -```text -wget https://github.com/abraunegg/onedrive/archive/refs/tags/v2.4.22.tar.gz -O ~/rpmbuild/SOURCES/v2.4.22.tar.gz -wget https://raw.githubusercontent.com/abraunegg/onedrive/master/contrib/spec/onedrive.spec.in -O ~/rpmbuild/SPECS/onedrive.spec -rpmbuild -ba ~/rpmbuild/SPECS/onedrive.spec -``` - -## RPM Build Example Results -Below are example output results of building, installing and running the RPM package on the respective platforms: - -### CentOS 7 -```text -[alex@localhost ~]$ rpmbuild -ba ~/rpmbuild/SPECS/onedrive.spec -Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.wi6Tdz -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ cd /home/alex/rpmbuild/BUILD -+ rm -rf onedrive-2.4.15 -+ /usr/bin/tar -xf - -+ /usr/bin/gzip -dc /home/alex/rpmbuild/SOURCES/v2.4.15.tar.gz -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd onedrive-2.4.15 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.dyeEuM -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ cd onedrive-2.4.15 -+ CFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic' -+ export CFLAGS -+ CXXFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic' -+ export CXXFLAGS -+ FFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic -I/usr/lib64/gfortran/modules' -+ export FFLAGS -+ FCFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic -I/usr/lib64/gfortran/modules' -+ export FCFLAGS -+ LDFLAGS='-Wl,-z,relro ' -+ export LDFLAGS -+ '[' 1 == 1 ']' -+ '[' x86_64 == ppc64le ']' -++ find . -name config.guess -o -name config.sub -+ ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info -configure: WARNING: unrecognized options: --disable-dependency-tracking -checking for a BSD-compatible install... /usr/bin/install -c -checking for x86_64-redhat-linux-gnu-pkg-config... no -checking for pkg-config... /usr/bin/pkg-config -checking pkg-config is at least version 0.9.0... yes -checking for dmd... dmd -checking version of D compiler... 2.087.0 -checking for curl... yes -checking for sqlite... yes -configure: creating ./config.status -config.status: creating Makefile -config.status: creating contrib/pacman/PKGBUILD -config.status: creating contrib/spec/onedrive.spec -config.status: creating onedrive.1 -config.status: creating contrib/systemd/onedrive.service -config.status: creating contrib/systemd/onedrive@.service -configure: WARNING: unrecognized options: --disable-dependency-tracking -+ make -if [ -f .git/HEAD ] ; then \ - git describe --tags > version ; \ -else \ - echo v2.4.15 > version ; \ -fi -dmd -w -g -O -J. -L-lcurl -L-lsqlite3 -L-ldl src/config.d src/itemdb.d src/log.d src/main.d src/monitor.d src/onedrive.d src/qxor.d src/selective.d src/sqlite.d src/sync.d src/upload.d src/util.d src/progress.d src/arsd/cgi.d -ofonedrive -+ exit 0 -Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.L3JbHy -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ '[' /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 '!=' / ']' -+ rm -rf /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 -++ dirname /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 -+ mkdir -p /home/alex/rpmbuild/BUILDROOT -+ mkdir /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 -+ cd onedrive-2.4.15 -+ /usr/bin/make install DESTDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 PREFIX=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 -/usr/bin/install -c -D onedrive /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/bin/onedrive -/usr/bin/install -c -D onedrive.1 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/man/man1/onedrive.1 -/usr/bin/install -c -D -m 644 contrib/logrotate/onedrive.logrotate /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/etc/logrotate.d/onedrive -mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive -/usr/bin/install -c -D -m 644 README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive -/usr/bin/install -c -d -m 0755 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/user /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/system -/usr/bin/install -c -m 0644 contrib/systemd/onedrive@.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/system -/usr/bin/install -c -m 0644 contrib/systemd/onedrive.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/system -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-compress -+ /usr/lib/rpm/redhat/brp-strip /usr/bin/strip -+ /usr/lib/rpm/redhat/brp-strip-comment-note /usr/bin/strip /usr/bin/objdump -+ /usr/lib/rpm/redhat/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile /usr/bin/python 1 -+ /usr/lib/rpm/redhat/brp-python-hardlink -+ /usr/lib/rpm/redhat/brp-java-repack-jars -Processing files: onedrive-2.4.15-1.el7.x86_64 -Executing(%doc): /bin/sh -e /var/tmp/rpm-tmp.cpSXho -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ cd onedrive-2.4.15 -+ DOCDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 -+ export DOCDIR -+ /usr/bin/mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 -+ cp -pr README.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 -+ cp -pr LICENSE /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 -+ cp -pr CHANGELOG.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 -+ exit 0 -Provides: config(onedrive) = 2.4.15-1.el7 onedrive = 2.4.15-1.el7 onedrive(x86-64) = 2.4.15-1.el7 -Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1 -Requires(post): systemd -Requires(preun): systemd -Requires(postun): systemd -Requires: ld-linux-x86-64.so.2()(64bit) ld-linux-x86-64.so.2(GLIBC_2.3)(64bit) libc.so.6()(64bit) libc.so.6(GLIBC_2.14)(64bit) libc.so.6(GLIBC_2.15)(64bit) libc.so.6(GLIBC_2.2.5)(64bit) libc.so.6(GLIBC_2.3.2)(64bit) libc.so.6(GLIBC_2.3.4)(64bit) libc.so.6(GLIBC_2.4)(64bit) libc.so.6(GLIBC_2.6)(64bit) libc.so.6(GLIBC_2.8)(64bit) libc.so.6(GLIBC_2.9)(64bit) libcurl.so.4()(64bit) libdl.so.2()(64bit) libdl.so.2(GLIBC_2.2.5)(64bit) libgcc_s.so.1()(64bit) libgcc_s.so.1(GCC_3.0)(64bit) libgcc_s.so.1(GCC_4.2.0)(64bit) libm.so.6()(64bit) libm.so.6(GLIBC_2.2.5)(64bit) libpthread.so.0()(64bit) libpthread.so.0(GLIBC_2.2.5)(64bit) libpthread.so.0(GLIBC_2.3.2)(64bit) libpthread.so.0(GLIBC_2.3.4)(64bit) librt.so.1()(64bit) librt.so.1(GLIBC_2.2.5)(64bit) libsqlite3.so.0()(64bit) rtld(GNU_HASH) -Checking for unpackaged file(s): /usr/lib/rpm/check-files /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 -Wrote: /home/alex/rpmbuild/SRPMS/onedrive-2.4.15-1.el7.src.rpm -Wrote: /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm -Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.nWoW33 -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ cd onedrive-2.4.15 -+ exit 0 -[alex@localhost ~]$ sudo yum -y install /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm -Loaded plugins: fastestmirror -Examining /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm: onedrive-2.4.15-1.el7.x86_64 -Marking /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm to be installed -Resolving Dependencies ---> Running transaction check ----> Package onedrive.x86_64 0:2.4.15-1.el7 will be installed ---> Finished Dependency Resolution - -Dependencies Resolved - -============================================================================================================================================================================================== - Package Arch Version Repository Size -============================================================================================================================================================================================== -Installing: - onedrive x86_64 2.4.15-1.el7 /onedrive-2.4.15-1.el7.x86_64 7.2 M - -Transaction Summary -============================================================================================================================================================================================== -Install 1 Package - -Total size: 7.2 M -Installed size: 7.2 M -Downloading packages: -Running transaction check -Running transaction test -Transaction test succeeded -Running transaction - Installing : onedrive-2.4.15-1.el7.x86_64 1/1 - Verifying : onedrive-2.4.15-1.el7.x86_64 1/1 - -Installed: - onedrive.x86_64 0:2.4.15-1.el7 - -Complete! -[alex@localhost ~]$ which onedrive -/usr/bin/onedrive -[alex@localhost ~]$ onedrive --version -onedrive v2.4.15 -[alex@localhost ~]$ onedrive --display-config -onedrive version = v2.4.15 -Config path = /home/alex/.config/onedrive -Config file found in config path = false -Config option 'check_nosync' = false -Config option 'sync_dir' = /home/alex/OneDrive -Config option 'skip_dir' = -Config option 'skip_file' = ~*|.~*|*.tmp -Config option 'skip_dotfiles' = false -Config option 'skip_symlinks' = false -Config option 'monitor_interval' = 300 -Config option 'min_notify_changes' = 5 -Config option 'log_dir' = /var/log/onedrive/ -Config option 'classify_as_big_delete' = 1000 -Config option 'upload_only' = false -Config option 'no_remote_delete' = false -Config option 'remove_source_files' = false -Config option 'sync_root_files' = false -Selective sync 'sync_list' configured = false -Business Shared Folders configured = false -[alex@localhost ~]$ -``` - -### CentOS 8 -```text -[alex@localhost ~]$ rpmbuild -ba ~/rpmbuild/SPECS/onedrive.spec -Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.UINFyE -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ cd /home/alex/rpmbuild/BUILD -+ rm -rf onedrive-2.4.15 -+ /usr/bin/gzip -dc /home/alex/rpmbuild/SOURCES/v2.4.15.tar.gz -+ /usr/bin/tar -xof - -+ STATUS=0 -+ '[' 0 -ne 0 ']' -+ cd onedrive-2.4.15 -+ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . -+ exit 0 -Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.cX1WQa -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ cd onedrive-2.4.15 -+ CFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection' -+ export CFLAGS -+ CXXFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection' -+ export CXXFLAGS -+ FFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules' -+ export FFLAGS -+ FCFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules' -+ export FCFLAGS -+ LDFLAGS='-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld' -+ export LDFLAGS -+ '[' 1 = 1 ']' -+++ dirname ./configure -++ find . -name config.guess -o -name config.sub -+ '[' 1 = 1 ']' -+ '[' x '!=' 'x-Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld' ']' -++ find . -name ltmain.sh -+ ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info -configure: WARNING: unrecognized options: --disable-dependency-tracking -checking for a BSD-compatible install... /usr/bin/install -c -checking for x86_64-redhat-linux-gnu-pkg-config... /usr/bin/x86_64-redhat-linux-gnu-pkg-config -checking pkg-config is at least version 0.9.0... yes -checking for dmd... dmd -checking version of D compiler... 2.087.0 -checking for curl... yes -checking for sqlite... yes -configure: creating ./config.status -config.status: creating Makefile -config.status: creating contrib/pacman/PKGBUILD -config.status: creating contrib/spec/onedrive.spec -config.status: creating onedrive.1 -config.status: creating contrib/systemd/onedrive.service -config.status: creating contrib/systemd/onedrive@.service -configure: WARNING: unrecognized options: --disable-dependency-tracking -+ make -if [ -f .git/HEAD ] ; then \ - git describe --tags > version ; \ -else \ - echo v2.4.15 > version ; \ -fi -dmd -w -g -O -J. -L-lcurl -L-lsqlite3 -L-ldl src/config.d src/itemdb.d src/log.d src/main.d src/monitor.d src/onedrive.d src/qxor.d src/selective.d src/sqlite.d src/sync.d src/upload.d src/util.d src/progress.d src/arsd/cgi.d -ofonedrive -+ exit 0 -Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.dNFPdx -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ '[' /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 '!=' / ']' -+ rm -rf /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 -++ dirname /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 -+ mkdir -p /home/alex/rpmbuild/BUILDROOT -+ mkdir /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 -+ cd onedrive-2.4.15 -+ /usr/bin/make install DESTDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 'INSTALL=/usr/bin/install -p' PREFIX=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 -/usr/bin/install -p -D onedrive /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/bin/onedrive -/usr/bin/install -p -D onedrive.1 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/man/man1/onedrive.1 -/usr/bin/install -p -D -m 644 contrib/logrotate/onedrive.logrotate /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/etc/logrotate.d/onedrive -mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive -/usr/bin/install -p -D -m 644 README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive -/usr/bin/install -p -d -m 0755 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/user /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/system -/usr/bin/install -p -m 0644 contrib/systemd/onedrive@.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/system -/usr/bin/install -p -m 0644 contrib/systemd/onedrive.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/system -+ /usr/lib/rpm/check-buildroot -+ /usr/lib/rpm/redhat/brp-ldconfig -/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/etc/ld.so.conf: No such file or directory -+ /usr/lib/rpm/brp-compress -+ /usr/lib/rpm/brp-strip /usr/bin/strip -+ /usr/lib/rpm/brp-strip-comment-note /usr/bin/strip /usr/bin/objdump -+ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip -+ /usr/lib/rpm/brp-python-bytecompile 1 -+ /usr/lib/rpm/brp-python-hardlink -+ PYTHON3=/usr/libexec/platform-python -+ /usr/lib/rpm/redhat/brp-mangle-shebangs -Processing files: onedrive-2.4.15-1.el8.x86_64 -Executing(%doc): /bin/sh -e /var/tmp/rpm-tmp.TnFKbZ -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ cd onedrive-2.4.15 -+ DOCDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive -+ export LC_ALL=C -+ LC_ALL=C -+ export DOCDIR -+ /usr/bin/mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive -+ cp -pr README.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive -+ cp -pr LICENSE /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive -+ cp -pr CHANGELOG.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive -+ exit 0 -warning: File listed twice: /usr/share/doc/onedrive -warning: File listed twice: /usr/share/doc/onedrive/CHANGELOG.md -warning: File listed twice: /usr/share/doc/onedrive/LICENSE -warning: File listed twice: /usr/share/doc/onedrive/README.md -Provides: config(onedrive) = 2.4.15-1.el8 onedrive = 2.4.15-1.el8 onedrive(x86-64) = 2.4.15-1.el8 -Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1 -Requires(post): systemd -Requires(preun): systemd -Requires(postun): systemd -Requires: ld-linux-x86-64.so.2()(64bit) ld-linux-x86-64.so.2(GLIBC_2.3)(64bit) libc.so.6()(64bit) libc.so.6(GLIBC_2.14)(64bit) libc.so.6(GLIBC_2.15)(64bit) libc.so.6(GLIBC_2.2.5)(64bit) libc.so.6(GLIBC_2.3.2)(64bit) libc.so.6(GLIBC_2.3.4)(64bit) libc.so.6(GLIBC_2.4)(64bit) libc.so.6(GLIBC_2.6)(64bit) libc.so.6(GLIBC_2.8)(64bit) libc.so.6(GLIBC_2.9)(64bit) libcurl.so.4()(64bit) libdl.so.2()(64bit) libdl.so.2(GLIBC_2.2.5)(64bit) libgcc_s.so.1()(64bit) libgcc_s.so.1(GCC_3.0)(64bit) libgcc_s.so.1(GCC_4.2.0)(64bit) libm.so.6()(64bit) libm.so.6(GLIBC_2.2.5)(64bit) libpthread.so.0()(64bit) libpthread.so.0(GLIBC_2.2.5)(64bit) libpthread.so.0(GLIBC_2.3.2)(64bit) libpthread.so.0(GLIBC_2.3.4)(64bit) librt.so.1()(64bit) librt.so.1(GLIBC_2.2.5)(64bit) libsqlite3.so.0()(64bit) rtld(GNU_HASH) -Checking for unpackaged file(s): /usr/lib/rpm/check-files /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 -Wrote: /home/alex/rpmbuild/SRPMS/onedrive-2.4.15-1.el8.src.rpm -Wrote: /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el8.x86_64.rpm -Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.FAMTFz -+ umask 022 -+ cd /home/alex/rpmbuild/BUILD -+ cd onedrive-2.4.15 -+ exit 0 -[alex@localhost ~]$ sudo yum -y install /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el8.x86_64.rpm -Last metadata expiration check: 0:04:07 ago on Fri 14 Jan 2022 14:22:13 EST. -Dependencies resolved. -============================================================================================================================================================================================== - Package Architecture Version Repository Size -============================================================================================================================================================================================== -Installing: - onedrive x86_64 2.4.15-1.el8 @commandline 1.5 M - -Transaction Summary -============================================================================================================================================================================================== -Install 1 Package - -Total size: 1.5 M -Installed size: 7.1 M -Downloading Packages: -Running transaction check -Transaction check succeeded. -Running transaction test -Transaction test succeeded. -Running transaction - Preparing : 1/1 - Installing : onedrive-2.4.15-1.el8.x86_64 1/1 - Running scriptlet: onedrive-2.4.15-1.el8.x86_64 1/1 - Verifying : onedrive-2.4.15-1.el8.x86_64 1/1 - -Installed: - onedrive-2.4.15-1.el8.x86_64 - -Complete! -[alex@localhost ~]$ which onedrive -/usr/bin/onedrive -[alex@localhost ~]$ onedrive --version -onedrive v2.4.15 -[alex@localhost ~]$ onedrive --display-config -onedrive version = v2.4.15 -Config path = /home/alex/.config/onedrive -Config file found in config path = false -Config option 'check_nosync' = false -Config option 'sync_dir' = /home/alex/OneDrive -Config option 'skip_dir' = -Config option 'skip_file' = ~*|.~*|*.tmp -Config option 'skip_dotfiles' = false -Config option 'skip_symlinks' = false -Config option 'monitor_interval' = 300 -Config option 'min_notify_changes' = 5 -Config option 'log_dir' = /var/log/onedrive/ -Config option 'classify_as_big_delete' = 1000 -Config option 'upload_only' = false -Config option 'no_remote_delete' = false -Config option 'remove_source_files' = false -Config option 'sync_root_files' = false -Selective sync 'sync_list' configured = false -Business Shared Folders configured = false -[alex@localhost ~]$ -``` diff --git a/docs/business-shared-folders.md b/docs/business-shared-folders.md deleted file mode 100644 index 4282f4ac6..000000000 --- a/docs/business-shared-folders.md +++ /dev/null @@ -1,40 +0,0 @@ -# How to configure OneDrive Business Shared Folder Sync -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Important Note -This feature has been 100% re-written from v2.5.0 onwards. A pre-requesite before using this capability in v2.5.0 and above is for you to revert any Shared Business Folder configuration you may be currently using, including, but not limited to: -* Removing `sync_business_shared_folders = "true|false"` from your 'config' file -* Removing the 'business_shared_folders' file -* Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues. - -## Process Overview -Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: -1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you. -2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder -3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement. -4. Test the configuration using '--dry-run' -5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required - - -**NOTE:** This documentation will be updated as this feature progresses. - - -### Enable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_items = "true" -``` - -### Disable syncing of OneDrive Business Shared Folders via config file -```text -sync_business_shared_items = "false" -``` - -## Known Issues -Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. - -Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: - -![shared_with_me](./images/shared_with_me.JPG) - -This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/known-issues.md b/docs/known-issues.md deleted file mode 100644 index d6ac302a2..000000000 --- a/docs/known-issues.md +++ /dev/null @@ -1,60 +0,0 @@ -# List of Identified Known Issues -The following points detail known issues associated with this client: - -## Renaming or Moving Files in Standalone Mode causes online deletion and re-upload to occur -**Issue Tracker:** [#876](https://github.com/abraunegg/onedrive/issues/876), [#2579](https://github.com/abraunegg/onedrive/issues/2579) - -**Summary:** - -Renaming or moving files and/or folders while using the standalone sync option `--sync` this results in unnecessary data deletion online and subsequent re-upload. - -**Detailed Description:** - -In standalone mode (`--sync`), the renaming or moving folders locally that have already been synchronized leads to the data being deleted online and then re-uploaded in the next synchronization process. - -**Technical Explanation:** - -This behavior is expected from the client under these specific conditions. Renaming or moving files is interpreted as deleting them from their original location and creating them in a new location. In standalone sync mode, the client lacks the capability to track file system changes (including renames and moves) that occur when it is not running. This limitation is the root cause of the observed 'deletion and re-upload' cycle. - -**Recommended Workaround:** - -For effective tracking of file and folder renames or moves to new local directories, it is recommended to run the client in service mode (`--monitor`) rather than in standalone mode. This approach allows the client to immediately process these changes, enabling the data to be updated (renamed or moved) in the new location on OneDrive without undergoing deletion and re-upload. - -## Application 'stops' running without any visible reason -**Issue Tracker:** [#494](https://github.com/abraunegg/onedrive/issues/494), [#753](https://github.com/abraunegg/onedrive/issues/753), [#792](https://github.com/abraunegg/onedrive/issues/792), [#884](https://github.com/abraunegg/onedrive/issues/884), [#1162](https://github.com/abraunegg/onedrive/issues/1162), [#1408](https://github.com/abraunegg/onedrive/issues/1408), [#1520](https://github.com/abraunegg/onedrive/issues/1520), [#1526](https://github.com/abraunegg/onedrive/issues/1526) - -**Summary:** - -Users experience sudden shutdowns in a client application during file transfers with Microsoft's Europe Data Centers, likely due to unstable internet or HTTPS inspection issues. This problem, often signaled by an error code of 141, is related to the application's reliance on Curl and OpenSSL. Resolution steps include system updates, seeking support from OS vendors, ISPs, OpenSSL/Curl teams, and providing detailed debug logs to Microsoft for analysis. - -**Detailed Description:** - -The application unexpectedly stops functioning during upload or download operations when using the client. This issue occurs without any apparent reason. Running `echo $?` after the unexpected exit may return an error code of 141. - -This problem predominantly arises when the client interacts with Microsoft's Europe Data Centers. - -**Technical Explanation:** - -The client heavily relies on Curl and OpenSSL for operations with the Microsoft OneDrive service. A common observation during this error is an entry in the HTTPS Debug Log stating: -``` -OpenSSL SSL_read: SSL_ERROR_SYSCALL, errno 104 -``` -To confirm this as the root cause, a detailed HTTPS debug log can be generated with these commands: -``` ---verbose --verbose --debug-https -``` - -This error typically suggests one of the following issues: -* An unstable internet connection between the user and the OneDrive service. -* An issue with HTTPS transparent inspection services that monitor the traffic en route to the OneDrive service. - -**Recommended Resolution:** - -Recommended steps to address this issue include: -* Updating your operating system to the latest version. -* Seeking assistance from your OS vendor. -* Contacting your Internet Service Provider (ISP) or your IT Help Desk. -* Reporting the issue to the OpenSSL and/or Curl teams for improved handling of such connection failures. -* Creating a HTTPS Debug Log during the issue and submitting a support request to Microsoft with the log for their analysis. - -For more in-depth SSL troubleshooting, please read: https://maulwuff.de/research/ssl-debugging.html \ No newline at end of file diff --git a/docs/national-cloud-deployments.md b/docs/national-cloud-deployments.md deleted file mode 100644 index 6b348388d..000000000 --- a/docs/national-cloud-deployments.md +++ /dev/null @@ -1,145 +0,0 @@ -# How to configure access to specific Microsoft Azure deployments -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Process Overview -In some cases it is a requirement to utilise specific Microsoft Azure cloud deployments to conform with data and security reuqirements that requires data to reside within the geographic borders of that country. -Current national clouds that are supported are: -* Microsoft Cloud for US Government -* Microsoft Cloud Germany -* Azure and Office365 operated by 21Vianet in China - -In order to successfully use these specific Microsoft Azure deployments, the following steps are required: -1. Register an application with the Microsoft identity platform using the Azure portal -2. Configure the new application with the appropriate authentication scopes -3. Validate that the authentication / redirect URI is correct for your application registration -4. Configure the onedrive client to use the new application id as provided during application registration -5. Configure the onedrive client to use the right Microsoft Azure deployment region that your application was registered with -6. Authenticate the client - -## Step 1: Register a new application with Microsoft Azure -1. Log into your applicable Microsoft Azure Portal with your applicable Office365 identity: - -| National Cloud Environment | Microsoft Azure Portal | -|---|---| -| Microsoft Cloud for US Government | https://portal.azure.com/ | -| Microsoft Cloud Germany | https://portal.azure.com/ | -| Azure and Office365 operated by 21Vianet | https://portal.azure.cn/ | - -2. Select 'Azure Active Directory' as the service you wish to configure -3. Under 'Manage', select 'App registrations' to register a new application -4. Click 'New registration' -5. Type in the appropriate details required as per below: - -![application_registration](./images/application_registration.jpg) - -6. To save the application registration, click 'Register' and something similar to the following will be displayed: - -![application_registration_done](./images/application_registration_done.jpg) - -**Note:** The Application (client) ID UUID as displayed after client registration, is what is required as the 'application_id' for Step 4 below. - -## Step 2: Configure application authentication scopes -Configure the API permissions as per the following: - -| API / Permissions name | Type | Description | Admin consent required | -|---|---|---|---| -| Files.ReadWrite | Delegated | Have full access to user files | No | -| Files.ReadWrite.All | Delegated | Have full access to all files user can access | No | -| Sites.ReadWrite.All | Delegated | Have full access to all items in all site collections | No | -| offline_access | Delegated | Maintain access to data you have given it access to | No | - -![authentication_scopes](./images/authentication_scopes.jpg) - -## Step 3: Validate that the authentication / redirect URI is correct -Add the appropriate redirect URI for your Azure deployment: - -![authentication_response_uri](./images/authentication_response_uri.jpg) - -A valid entry for the response URI should be one of: -* https://login.microsoftonline.us/common/oauth2/nativeclient (Microsoft Cloud for US Government) -* https://login.microsoftonline.de/common/oauth2/nativeclient (Microsoft Cloud Germany) -* https://login.chinacloudapi.cn/common/oauth2/nativeclient (Azure and Office365 operated by 21Vianet in China) - -For a single-tenant application, it may be necessary to use your specific tenant id instead of "common": -* https://login.microsoftonline.us/example.onmicrosoft.us/oauth2/nativeclient (Microsoft Cloud for US Government) -* https://login.microsoftonline.de/example.onmicrosoft.de/oauth2/nativeclient (Microsoft Cloud Germany) -* https://login.chinacloudapi.cn/example.onmicrosoft.cn/oauth2/nativeclient (Azure and Office365 operated by 21Vianet in China) - -## Step 4: Configure the onedrive client to use new application registration -Update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following: -```text -application_id = "insert valid entry here" -``` - -This will reconfigure the client to use the new application registration you have created. - -**Example:** -```text -application_id = "22c49a0d-d21c-4792-aed1-8f163c982546" -``` - -## Step 5: Configure the onedrive client to use the specific Microsoft Azure deployment -Update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following: -```text -azure_ad_endpoint = "insert valid entry here" -``` - -Valid entries are: -* USL4 (Microsoft Cloud for US Government) -* USL5 (Microsoft Cloud for US Government - DOD) -* DE (Microsoft Cloud Germany) -* CN (Azure and Office365 operated by 21Vianet in China) - -This will configure your client to use the correct Azure AD and Graph endpoints as per [https://docs.microsoft.com/en-us/graph/deployments](https://docs.microsoft.com/en-us/graph/deployments) - -**Example:** -```text -azure_ad_endpoint = "USL4" -``` - -If the Microsoft Azure deployment does not support multi-tenant applications, update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following: -```text -azure_tenant_id = "insert valid entry here" -``` - -This will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common". -The tenant id may be the GUID Directory ID (formatted "00000000-0000-0000-0000-000000000000"), or the fully qualified tenant name (e.g. "example.onmicrosoft.us"). -The GUID Directory ID may be located in the Azure administation page as per [https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id](https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id). Note that you may need to go to your national-deployment-specific administration page, rather than following the links within that document. -The tenant name may be obtained by following the PowerShell instructions on [https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id](https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id); it is shown as the "TenantDomain" upon completion of the "Connect-AzureAD" command. - -**Example:** -```text -azure_tenant_id = "example.onmicrosoft.us" -# or -azure_tenant_id = "0c4be462-a1ab-499b-99e0-da08ce52a2cc" -``` - -## Step 6: Authenticate the client -Run the application without any additional command switches. - -You will be asked to open a specific URL by using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving permission to the application, you will be redirected to a blank page. Copy the URI of the blank page into the application. -```text -[user@hostname ~]$ onedrive - -Authorize this app visiting: - -https://..... - -Enter the response uri: - -``` - -**Example:** -``` -[user@hostname ~]$ onedrive -Authorize this app visiting: - -https://login.microsoftonline.com/common/oauth2/v2.0/authorize?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient - -Enter the response uri: https://login.microsoftonline.com/common/oauth2/nativeclient?code= - -Application has been successfully authorised, however no additional command switches were provided. - -Please use --help for further assistance in regards to running this application. -``` diff --git a/docs/privacy-policy.md b/docs/privacy-policy.md deleted file mode 100644 index 64fe1dd3c..000000000 --- a/docs/privacy-policy.md +++ /dev/null @@ -1,65 +0,0 @@ -# Privacy Policy -Effective Date: May 16 2018 - -## Introduction - -This Privacy Policy outlines how OneDrive Client for Linux ("we," "our," or "us") collects, uses, and protects information when you use our software ("OneDrive Client for Linux"). We respect your privacy and are committed to ensuring the confidentiality and security of any information you provide while using the Software. - -## Information We Do Not Collect - -We want to be transparent about the fact that we do not collect any personal data, usage data, or tracking data through the Software. This means: - -1. **No Personal Data**: We do not collect any information that can be used to personally identify you, such as your name, email address, phone number, or physical address. - -2. **No Usage Data**: We do not collect data about how you use the Software, such as the features you use, the duration of your sessions, or any interactions within the Software. - -3. **No Tracking Data**: We do not use cookies or similar tracking technologies to monitor your online behavior or track your activities across websites or apps. - -## How We Use Your Information - -Since we do not collect any personal, usage, or tracking data, there is no information for us to use for any purpose. - -## Third-Party Services - -The Software may include links to third-party websites or services, but we do not have control over the privacy practices or content of these third-party services. We encourage you to review the privacy policies of any third-party services you access through the Software. - -## Children's Privacy - -Since we do not collect any personal, usage, or tracking data, there is no restriction on the use of this application by anyone under the age of 18. - -## Information You Choose to Share - -While we do not collect personal data, usage data, or tracking data through the Software, there may be instances where you voluntarily choose to share information with us, particularly when submitting bug reports. These bug reports may contain sensitive information such as account details, file names, and directory names. It's important to note that these details are included in the logs and debug logs solely for the purpose of diagnosing and resolving technical issues with the Software. - -We want to emphasize that, even in these cases, we do not have access to your actual data. The logs and debug logs provided in bug reports are used exclusively for technical troubleshooting and debugging purposes. We take measures to treat this information with the utmost care, and it is only accessible to our technical support and development teams. We do not use this information for any other purpose, and we have strict security measures in place to protect it. - -## Protecting Your Sensitive Data - -We are committed to safeguarding your sensitive data and maintaining its confidentiality. To ensure its protection: - -1. **Limited Access**: Only authorized personnel within our technical support and development teams have access to the logs and debug logs containing sensitive data, and they are trained in handling this information securely. - -2. **Data Encryption**: We use industry-standard encryption protocols to protect the transmission and storage of sensitive data. - -3. **Data Retention**: We retain bug report data for a limited time necessary for resolving the reported issue. Once the issue is resolved, we promptly delete or anonymize the data. - -4. **Security Measures**: We employ robust security measures to prevent unauthorized access, disclosure, or alteration of sensitive data. - -By submitting a bug report, you acknowledge and consent to the inclusion of sensitive information in logs and debug logs for the sole purpose of addressing technical issues with the Software. - -## Your Responsibilities - -While we take measures to protect your sensitive data, it is essential for you to exercise caution when submitting bug reports. Please refrain from including any sensitive or personally identifiable information that is not directly related to the technical issue you are reporting. You have the option to redact or obfuscate sensitive details in bug reports to further protect your data. - -## Changes to this Privacy Policy - -We may update this Privacy Policy from time to time to reflect changes in our practices or for other operational, legal, or regulatory reasons. We will notify you of any material changes by posting the updated Privacy Policy on our website or through the Software. We encourage you to review this Privacy Policy periodically. - -## Contact Us - -If you have any questions or concerns about this Privacy Policy or our privacy practices, please contact us at support@mynas.com.au or via GitHub (https://github.com/abraunegg/onedrive) - -## Conclusion - -By using the Software, you agree to the terms outlined in this Privacy Policy. If you do not agree with any part of this policy, please discontinue the use of the Software. - diff --git a/docs/sharepoint-libraries.md b/docs/sharepoint-libraries.md deleted file mode 100644 index d1714d4ed..000000000 --- a/docs/sharepoint-libraries.md +++ /dev/null @@ -1,228 +0,0 @@ -# How to configure OneDrive SharePoint Shared Library sync -**WARNING:** Several users have reported files being overwritten causing data loss as a result of using this client with SharePoint Libraries when running as a systemd service. - -When this has been investigated, the following has been noted as potential root causes: -* File indexing application such as Baloo File Indexer or Tracker3 constantly indexing your OneDrive data -* The use of WPS Office and how it 'saves' files by deleting the existing item and replaces it with the saved data - -Additionally there could be a yet unknown bug with the client, however all debugging and data provided previously shows that an 'external' process to the 'onedrive' application modifies the files triggering the undesirable upload to occur. - -**Possible Preventative Actions:** -* Disable all File Indexing for your SharePoint Library data. It is out of scope to detail on how you should do this. -* Disable using a systemd service for syncing your SharePoint Library data. -* Do not use WPS Office to edit your documents. Use OpenOffice or LibreOffice as these do not exhibit the same 'delete to save' action that WPS Office has. - -Additionally, please use caution when using this client with SharePoint. - -## Application Version -Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. - -## Process Overview -Syncing a OneDrive SharePoint library requires additional configuration for your 'onedrive' client: -1. Login to OneDrive and under 'Shared Libraries' obtain the shared library name -2. Query that shared library name using the client to obtain the required configuration details -3. Create a unique local folder which will be the SharePoint Library 'root' -4. Configure the client's config file with the required 'drive_id' -5. Test the configuration using '--dry-run' -6. Sync the SharePoint Library as required - -**Note:** The `--get-O365-drive-id` process below requires a fully configured 'onedrive' configuration so that the applicable Drive ID for the given Office 365 SharePoint Shared Library can be determined. It is highly recommended that you do not use the application 'default' configuration directory for any SharePoint Site, and configure separate items for each site you wish to use. - -## 1. Listing available OneDrive SharePoint Libraries -Login to the OneDrive web interface and determine which shared library you wish to configure the client for: -![shared_libraries](./images/SharedLibraries.jpg) - -## 2. Query OneDrive API to obtain required configuration details -Run the following command using the 'onedrive' client to query the OneDrive API to obtain the required 'drive_id' of the SharePoint Library that you wish to sync: -```text -onedrive --get-O365-drive-id '' -``` -This will return something similar to the following: -```text -Configuration file successfully loaded -Configuring Global Azure AD Endpoints -Initializing the Synchronization Engine ... -Office 365 Library Name Query: ------------------------------------------------ -Site Name: -Library Name: -drive_id: b!6H_y8B...xU5 -Library URL: ------------------------------------------------ -``` -If there are no matches to the site you are attempting to search, the following will be displayed: -```text -Configuration file successfully loaded -Configuring Global Azure AD Endpoints -Initializing the Synchronization Engine ... -Office 365 Library Name Query: blah - -ERROR: The requested SharePoint site could not be found. Please check it's name and your permissions to access the site. - -The following SharePoint site names were returned: - * - * - ... - * -``` -This list of site names can be used as a basis to search for the correct site for which you are searching - -## 3. Create a new configuration directory and sync location for this SharePoint Library -Create a new configuration directory for this SharePoint Library in the following manner: -```text -mkdir ~/.config/SharePoint_My_Library_Name -``` - -Create a new local folder to store the SharePoint Library data in: -```text -mkdir ~/SharePoint_My_Library_Name -``` - -**Note:** Do not use spaces in the directory name, use '_' as a replacement - -## 4. Configure SharePoint Library config file with the required 'drive_id' & 'sync_dir' options -Download a copy of the default configuration file by downloading this file from GitHub and saving this file in the directory created above: -```text -wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/SharePoint_My_Library_Name/config -``` - -Update your 'onedrive' configuration file (`~/.config/SharePoint_My_Library_Name/config`) with the local folder where you will store your data: -```text -sync_dir = "~/SharePoint_My_Library_Name" -``` - -Update your 'onedrive' configuration file(`~/.config/SharePoint_My_Library_Name/config`) with the 'drive_id' value obtained in the steps above: -```text -drive_id = "insert the drive_id value from above here" -``` -The OneDrive client will now be configured to sync this SharePoint shared library to your local system and the location you have configured. - -**Note:** After changing `drive_id`, you must perform a full re-synchronization by adding `--resync` to your existing command line. - -## 5. Validate and Test the configuration -Validate your new configuration using the `--display-config` option to validate you have configured the application correctly: -```text -onedrive --confdir="~/.config/SharePoint_My_Library_Name" --display-config -``` - -Test your new configuration using the `--dry-run` option to validate the application configuration: -```text -onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbose --dry-run -``` - -**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration. - -## 6. Sync the SharePoint Library as required -Sync the SharePoint Library to your system with either `--synchronize` or `--monitor` operations: -```text -onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbose -``` - -```text -onedrive --confdir="~/.config/SharePoint_My_Library_Name" --monitor --verbose -``` - -**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration. - -## 7. Enable custom systemd service for SharePoint Library -Systemd can be used to automatically run this configuration in the background, however, a unique systemd service will need to be setup for this SharePoint Library instance - -In order to automatically start syncing each SharePoint Library, you will need to create a service file for each SharePoint Library. From the applicable 'systemd folder' where the applicable systemd service file exists: -* RHEL / CentOS: `/usr/lib/systemd/system` -* Others: `/usr/lib/systemd/user` and `/lib/systemd/system` - -### Step1: Create a new systemd service file -#### Red Hat Enterprise Linux, CentOS Linux -Copy the required service file to a new name: -```text -sudo cp /usr/lib/systemd/system/onedrive.service /usr/lib/systemd/system/onedrive-SharePoint_My_Library_Name.service -``` -or -```text -sudo cp /usr/lib/systemd/system/onedrive@.service /usr/lib/systemd/system/onedrive-SharePoint_My_Library_Name@.service -``` - -#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora -Copy the required service file to a new name: -```text -sudo cp /usr/lib/systemd/user/onedrive.service /usr/lib/systemd/user/onedrive-SharePoint_My_Library_Name.service -``` -or -```text -sudo cp /lib/systemd/system/onedrive@.service /lib/systemd/system/onedrive-SharePoint_My_Library_Name@.service -``` - -### Step 2: Edit new systemd service file -Edit the new systemd file, updating the line beginning with `ExecStart` so that the confdir mirrors the one you used above: -```text -ExecStart=/usr/local/bin/onedrive --monitor --confdir="/full/path/to/config/dir" -``` - -Example: -```text -ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/SharePoint_My_Library_Name" -``` - -**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded. - -### Step 3: Enable the new systemd service -Once the file is correctly editied, you can enable the new systemd service using the following commands. - -#### Red Hat Enterprise Linux, CentOS Linux -```text -systemctl enable onedrive-SharePoint_My_Library_Name -systemctl start onedrive-SharePoint_My_Library_Name -``` - -#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora -```text -systemctl --user enable onedrive-SharePoint_My_Library_Name -systemctl --user start onedrive-SharePoint_My_Library_Name -``` -or -```text -systemctl --user enable onedrive-SharePoint_My_Library_Name@myusername.service -systemctl --user start onedrive-SharePoint_My_Library_Name@myusername.service -``` - -### Step 4: Viewing systemd status and logs for the custom service -#### Viewing systemd service status - Red Hat Enterprise Linux, CentOS Linux -```text -systemctl status onedrive-SharePoint_My_Library_Name -``` - -#### Viewing systemd service status - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora -```text -systemctl --user status onedrive-SharePoint_My_Library_Name -``` - -#### Viewing journalctl systemd logs - Red Hat Enterprise Linux, CentOS Linux -```text -journalctl --unit=onedrive-SharePoint_My_Library_Name -f -``` - -#### Viewing journalctl systemd logs - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora -```text -journalctl --user --unit=onedrive-SharePoint_My_Library_Name -f -``` - -### Step 5: (Optional) Run custom systemd service at boot without user login -In some cases it may be desirable for the systemd service to start without having to login as your 'user' - -All the systemd steps above that utilise the `--user` option, will run the systemd service as your particular user. As such, the systemd service will not start unless you actually login to your system. - -To avoid this issue, you need to reconfigure your 'user' account so that the systemd services you have created will startup without you having to login to your system: -```text -loginctl enable-linger -``` - -Example: -```text -alex@ubuntu-headless:~$ loginctl enable-linger alex -``` - -## 8. Configuration for a SharePoint Library is complete -The 'onedrive' client configuration for this particular SharePoint Library is now complete. - -# How to configure multiple OneDrive SharePoint Shared Library sync -Create a new configuration as per the process above. Repeat these steps for each SharePoint Library that you wish to use. diff --git a/docs/terms-of-service.md b/docs/terms-of-service.md deleted file mode 100644 index cdf7c4328..000000000 --- a/docs/terms-of-service.md +++ /dev/null @@ -1,54 +0,0 @@ -# OneDrive Client for Linux - Software Service Terms of Service - -## 1. Introduction - -These Terms of Service ("Terms") govern your use of the OneDrive Client for Linux ("Application") software and related Microsoft OneDrive services ("Service") provided by Microsoft. By accessing or using the Service, you agree to comply with and be bound by these Terms. If you do not agree to these Terms, please do not use the Service. - -## 2. License Compliance - -The OneDrive Client for Linux software is licensed under the GNU General Public License, version 3.0 (the "GPLv3"). Your use of the software must comply with the terms and conditions of the GPLv3. A copy of the GPLv3 can be found here: https://www.gnu.org/licenses/gpl-3.0.en.html - -## 3. Use of the Service - -### 3.1. Access and Accounts - -You may need to create an account or provide personal information to access certain features of the Service. You are responsible for maintaining the confidentiality of your account information and are solely responsible for all activities that occur under your account. - -### 3.2. Prohibited Activities - -You agree not to: - -- Use the Service in any way that violates applicable laws or regulations. -- Use the Service to engage in any unlawful, harmful, or fraudulent activity. -- Use the Service in any manner that disrupts, damages, or impairs the Service. - -## 4. Intellectual Property - -The OneDrive Client for Linux software is subject to the GPLv3, and you must respect all copyrights, trademarks, and other intellectual property rights associated with the software. Any contributions you make to the software must also comply with the GPLv3. - -## 5. Disclaimer of Warranties - -The OneDrive Client for Linux software is provided "as is" without any warranties, either expressed or implied. We do not guarantee that the use of the Application will be error-free or uninterrupted. - -Microsoft is not responsible for OneDrive Client for Linux. Any issues or problems with OneDrive Client for Linux should be raised on GitHub at https://github.com/abraunegg/onedrive or email support@mynas.com.au - -OneDrive Client for Linux is not responsible for the Microsoft OneDrive Service or the Microsoft Graph API Service that this Application utilizes. Any issue with either Microsoft OneDrive or Microsoft Graph API should be raised with Microsoft via their support channel in your country. - -## 6. Limitation of Liability - -To the fullest extent permitted by law, we shall not be liable for any direct, indirect, incidental, special, consequential, or punitive damages, or any loss of profits or revenues, whether incurred directly or indirectly, or any loss of data, use, goodwill, or other intangible losses, resulting from (a) your use or inability to use the Service, or (b) any other matter relating to the Service. - -This limitiation of liability explicitly relates to the use of the OneDrive Client for Linux software and does not affect your rights under the GPLv3. - -## 7. Changes to Terms - -We reserve the right to update or modify these Terms at any time without prior notice. Any changes will be effective immediately upon posting on GitHub. Your continued use of the Service after the posting of changes constitutes your acceptance of such changes. Changes can be reviewed on GitHub. - -## 8. Governing Law - -These Terms shall be governed by and construed in accordance with the laws of Australia, without regard to its conflict of law principles. - -## 9. Contact Us - -If you have any questions or concerns about these Terms, please contact us at https://github.com/abraunegg/onedrive or email support@mynas.com.au - diff --git a/docs/ubuntu-package-install.md b/docs/ubuntu-package-install.md deleted file mode 100644 index df20db923..000000000 --- a/docs/ubuntu-package-install.md +++ /dev/null @@ -1,420 +0,0 @@ -# Installation of 'onedrive' package on Debian and Ubuntu - -This document covers the appropriate steps to install the 'onedrive' client using the provided packages for Debian and Ubuntu. - -#### Important information for all Ubuntu and Ubuntu based distribution users: -This information is specifically for the following platforms and distributions: - -* Lubuntu -* Linux Mint -* POP OS -* Peppermint OS -* Raspbian -* Ubuntu - -Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all§ion=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Ubuntu Universe packages are out-of-date and are not supported and should not be used. - -## Determine which instructions to use -Ubuntu and its clones are based on various different releases, thus, you must use the correct instructions below, otherwise you may run into package dependancy issues and will be unable to install the client. - -### Step 1: Remove any configured PPA and associated 'onedrive' package and systemd service files -Many Internet 'help' pages provide inconsistent details on how to install the OneDrive Client for Linux. A number of these websites continue to point users to install the client via the yann1ck PPA repository however this PPA no longer exists and should not be used. - -To remove the PPA repository and the older client, perform the following actions: -```text -sudo apt remove onedrive -sudo add-apt-repository --remove ppa:yann1ck/onedrive -``` - -Additionally, Ubuntu and its clones have a bad habit of creating a 'default' systemd service file when installing the 'onedrive' package so that the client will automatically run the client post being authenticated. This systemd entry is erroneous and needs to be removed. -``` -Created symlink /etc/systemd/user/default.target.wants/onedrive.service → /usr/lib/systemd/user/onedrive.service. -``` -To remove this symbolic link, run the following command: -``` -sudo rm /etc/systemd/user/default.target.wants/onedrive.service -``` - -### Step 2: Ensure your system is up-to-date -Use a script, similar to the following to ensure your system is updated correctly: -```text -#!/bin/bash -rm -rf /var/lib/dpkg/lock-frontend -rm -rf /var/lib/dpkg/lock -apt-get update -apt-get upgrade -y -apt-get dist-upgrade -y -apt-get autoremove -y -apt-get autoclean -y -``` - -Run this script as 'root' by using `su -` to elevate to 'root'. Example below: -```text -Welcome to Ubuntu 20.04.1 LTS (GNU/Linux 5.4.0-48-generic x86_64) - - * Documentation: https://help.ubuntu.com - * Management: https://landscape.canonical.com - * Support: https://ubuntu.com/advantage - -425 updates can be installed immediately. -208 of these updates are security updates. -To see these additional updates run: apt list --upgradable - -Your Hardware Enablement Stack (HWE) is supported until April 2025. -Last login: Thu Jan 20 14:21:48 2022 from my.ip.address -alex@ubuntu-20-LTS:~$ su - -Password: -root@ubuntu-20-LTS:~# ls -la -total 28 -drwx------ 3 root root 4096 Oct 10 2020 . -drwxr-xr-x 20 root root 4096 Oct 10 2020 .. --rw------- 1 root root 175 Jan 20 14:23 .bash_history --rw-r--r-- 1 root root 3106 Dec 6 2019 .bashrc -drwx------ 2 root root 4096 Apr 23 2020 .cache --rw-r--r-- 1 root root 161 Dec 6 2019 .profile --rwxr-xr-x 1 root root 174 Oct 10 2020 update-os.sh -root@ubuntu-20-LTS:~# cat update-os.sh -#!/bin/bash -rm -rf /var/lib/dpkg/lock-frontend -rm -rf /var/lib/dpkg/lock -apt-get update -apt-get upgrade -y -apt-get dist-upgrade -y -apt-get autoremove -y -apt-get autoclean -y -root@ubuntu-20-LTS:~# ./update-os.sh -Hit:1 http://au.archive.ubuntu.com/ubuntu focal InRelease -Hit:2 http://au.archive.ubuntu.com/ubuntu focal-updates InRelease -Hit:3 http://au.archive.ubuntu.com/ubuntu focal-backports InRelease -Hit:4 http://security.ubuntu.com/ubuntu focal-security InRelease -Reading package lists... 96% -... -Sourcing file `/etc/default/grub' -Sourcing file `/etc/default/grub.d/init-select.cfg' -Generating grub configuration file ... -Found linux image: /boot/vmlinuz-5.13.0-27-generic -Found initrd image: /boot/initrd.img-5.13.0-27-generic -Found linux image: /boot/vmlinuz-5.4.0-48-generic -Found initrd image: /boot/initrd.img-5.4.0-48-generic -Found memtest86+ image: /boot/memtest86+.elf -Found memtest86+ image: /boot/memtest86+.bin -done -Removing linux-modules-5.4.0-26-generic (5.4.0-26.30) ... -Processing triggers for libc-bin (2.31-0ubuntu9.2) ... -Reading package lists... Done -Building dependency tree -Reading state information... Done -root@ubuntu-20-LTS:~# -``` - -Reboot your system after running this process before continuing with Step 3. -```text -reboot -``` - -### Step 3: Determine what your OS is based on -Determine what your OS is based on. To do this, run the following command: -```text -lsb_release -a -``` -**Example:** -```text -alex@ubuntu-system:~$ lsb_release -a -No LSB modules are available. -Distributor ID: Ubuntu -Description: Ubuntu 22.04 LTS -Release: 22.04 -Codename: jammy -``` - -### Step 4: Pick the correct instructions to use -If required, review the table below based on your 'lsb_release' information to pick the appropriate instructions to use: - -| Release & Codename | Instructions to use | -|--------------------|---------------------| -| Linux Mint 19.x | This platform is End-of-Life (EOL) and no longer supported. You must upgrade to Linux Mint 21.x | -| Linux Mint 20.x | Use [Ubuntu 20.04](#distribution-ubuntu-2004) instructions below | -| Linux Mint 21.x | Use [Ubuntu 22.04](#distribution-ubuntu-2204) instructions below | -| Linux Mint Debian Edition (LMDE) 5 / Elsie | Use [Debian 11](#distribution-debian-11) instructions below | -| Linux Mint Debian Edition (LMDE) 6 / Faye | Use [Debian 12](#distribution-debian-12) instructions below | -| Debian 9 | This platform is End-of-Life (EOL) and no longer supported. You must upgrade to Debian 12 | -| Debian 10 | You must build from source or upgrade your Operating System to Debian 12 | -| Debian 11 | Use [Debian 11](#distribution-debian-11) instructions below | -| Debian 12 | Use [Debian 12](#distribution-debian-12) instructions below | -| Debian Sid | Refer to https://packages.debian.org/sid/onedrive for assistance | -| Raspbian GNU/Linux 10 | You must build from source or upgrade your Operating System to Raspbian GNU/Linux 12 | -| Raspbian GNU/Linux 11 | Use [Debian 11](#distribution-debian-11) instructions below | -| Raspbian GNU/Linux 12 | Use [Debian 12](#distribution-debian-12) instructions below | -| Ubuntu 18.04 / Bionic | This platform is End-of-Life (EOL) and no longer supported. You must upgrade to Ubuntu 22.04 | -| Ubuntu 20.04 / Focal | Use [Ubuntu 20.04](#distribution-ubuntu-2004) instructions below | -| Ubuntu 21.04 / Hirsute | Use [Ubuntu 21.04](#distribution-ubuntu-2104) instructions below | -| Ubuntu 21.10 / Impish | Use [Ubuntu 21.10](#distribution-ubuntu-2110) instructions below | -| Ubuntu 22.04 / Jammy | Use [Ubuntu 22.04](#distribution-ubuntu-2204) instructions below | -| Ubuntu 22.10 / Kinetic | Use [Ubuntu 22.10](#distribution-ubuntu-2210) instructions below | -| Ubuntu 23.04 / Lunar | Use [Ubuntu 23.04](#distribution-ubuntu-2304) instructions below | -| Ubuntu 23.10 / Mantic | Use [Ubuntu 23.10](#distribution-ubuntu-2310) instructions below | - -**Note:** If your Linux distribution and release is not in the table above, you have 2 options: - -1. Compile the application from source. Refer to install.md (Compilation & Installation) for assistance. -2. Raise a support case with your Linux Distribution to provide you with an applicable package you can use. - -## Distribution Package Install Instructions - -### Distribution: Debian 11 -The packages support the following platform architectures: -|  i686  | x86_64 | ARMHF | AARCH64 | -|:----:|:------:|:-----:|:-------:| -|✔|✔|✔|✔| - -#### Step 1: Add the OpenSuSE Build Service repository release key -Add the OpenSuSE Build Service repository release key using the following command: -```text -wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_11/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null -``` - -#### Step 2: Add the OpenSuSE Build Service repository -Add the OpenSuSE Build Service repository using the following command: -```text -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_11/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list -``` - -#### Step 3: Update your apt package cache -Run: `sudo apt-get update` - -#### Step 4: Install 'onedrive' -Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` - -#### Step 5: Read 'Known Issues' with these packages -Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. - -### Distribution: Debian 12 -The packages support the following platform architectures: -|  i686  | x86_64 | ARMHF | AARCH64 | -|:----:|:------:|:-----:|:-------:| -|✔|✔|✔|✔| - -#### Step 1: Add the OpenSuSE Build Service repository release key -Add the OpenSuSE Build Service repository release key using the following command: -```text -wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_12/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null -``` - -#### Step 2: Add the OpenSuSE Build Service repository -Add the OpenSuSE Build Service repository using the following command: -```text -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_12/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list -``` - -#### Step 3: Update your apt package cache -Run: `sudo apt-get update` - -#### Step 4: Install 'onedrive' -Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` - -#### Step 5: Read 'Known Issues' with these packages -Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. - -### Distribution: Ubuntu 20.04 -The packages support the following platform architectures: -|  i686  | x86_64 | ARMHF | AARCH64 | -|:----:|:------:|:-----:|:-------:| -|❌|✔|✔|✔| - -#### Step 1: Add the OpenSuSE Build Service repository release key -Add the OpenSuSE Build Service repository release key using the following command: -```text -wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_20.04/Release.key | sudo apt-key add - -``` - -#### Step 2: Add the OpenSuSE Build Service repository -Add the OpenSuSE Build Service repository using the following command: -```text -echo 'deb https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_20.04/ ./' | sudo tee /etc/apt/sources.list.d/onedrive.list -``` - -#### Step 3: Update your apt package cache -Run: `sudo apt-get update` - -#### Step 4: Install 'onedrive' -Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` - -#### Step 5: Read 'Known Issues' with these packages -Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. - -### Distribution: Ubuntu 21.04 -The packages support the following platform architectures: -|  i686  | x86_64 | ARMHF | AARCH64 | -|:----:|:------:|:-----:|:-------:| -|❌|✔|✔|✔| - -#### Step 1: Add the OpenSuSE Build Service repository release key -Add the OpenSuSE Build Service repository release key using the following command: -```text -wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null -``` - -#### Step 2: Add the OpenSuSE Build Service repository -Add the OpenSuSE Build Service repository using the following command: -```text -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list -``` - -#### Step 3: Update your apt package cache -Run: `sudo apt-get update` - -#### Step 4: Install 'onedrive' -Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` - -#### Step 5: Read 'Known Issues' with these packages -Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. - -### Distribution: Ubuntu 21.10 -The packages support the following platform architectures: -|  i686  | x86_64 | ARMHF | AARCH64 | -|:----:|:------:|:-----:|:-------:| -|❌|✔|✔|✔| - -#### Step 1: Add the OpenSuSE Build Service repository release key -Add the OpenSuSE Build Service repository release key using the following command: -```text -wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.10/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null -``` - -#### Step 2: Add the OpenSuSE Build Service repository -Add the OpenSuSE Build Service repository using the following command: -```text -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.10/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list -``` - -#### Step 3: Update your apt package cache -Run: `sudo apt-get update` - -#### Step 4: Install 'onedrive' -Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` - -#### Step 5: Read 'Known Issues' with these packages -Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. - -### Distribution: Ubuntu 22.04 -The packages support the following platform architectures: -|  i686  | x86_64 | ARMHF | AARCH64 | -|:----:|:------:|:-----:|:-------:| -|❌|✔|✔|✔| - -#### Step 1: Add the OpenSuSE Build Service repository release key -Add the OpenSuSE Build Service repository release key using the following command: -```text -wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null -``` - -#### Step 2: Add the OpenSuSE Build Service repository -Add the OpenSuSE Build Service repository using the following command: -```text -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list -``` - -#### Step 3: Update your apt package cache -Run: `sudo apt-get update` - -#### Step 4: Install 'onedrive' -Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` - -#### Step 5: Read 'Known Issues' with these packages -Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. - -### Distribution: Ubuntu 22.10 -The packages support the following platform architectures: -|  i686  | x86_64 | ARMHF | AARCH64 | -|:----:|:------:|:-----:|:-------:| -|❌|✔|✔|✔| - -#### Step 1: Add the OpenSuSE Build Service repository release key -Add the OpenSuSE Build Service repository release key using the following command: -```text -wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.10/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null -``` - -#### Step 2: Add the OpenSuSE Build Service repository -Add the OpenSuSE Build Service repository using the following command: -```text -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.10/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list -``` - -#### Step 3: Update your apt package cache -Run: `sudo apt-get update` - -#### Step 4: Install 'onedrive' -Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` - -#### Step 5: Read 'Known Issues' with these packages -Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. - -### Distribution: Ubuntu 23.04 -The packages support the following platform architectures: -|  i686  | x86_64 | ARMHF | AARCH64 | -|:----:|:------:|:-----:|:-------:| -|❌|✔|✔|✔| - -#### Step 1: Add the OpenSuSE Build Service repository release key -Add the OpenSuSE Build Service repository release key using the following command: -```text -wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null -``` - -#### Step 2: Add the OpenSuSE Build Service repository -Add the OpenSuSE Build Service repository using the following command: -```text -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list -``` - -#### Step 3: Update your apt package cache -Run: `sudo apt-get update` - -#### Step 4: Install 'onedrive' -Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` - -#### Step 5: Read 'Known Issues' with these packages -Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. - -### Distribution: Ubuntu 23.10 -The packages support the following platform architectures: -|  i686  | x86_64 | ARMHF | AARCH64 | -|:----:|:------:|:-----:|:-------:| -|❌|✔|❌|✔| - -#### Step 1: Add the OpenSuSE Build Service repository release key -Add the OpenSuSE Build Service repository release key using the following command: -```text -wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.10/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null -``` - -#### Step 2: Add the OpenSuSE Build Service repository -Add the OpenSuSE Build Service repository using the following command: -```text -echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.10/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list -``` - -#### Step 3: Update your apt package cache -Run: `sudo apt-get update` - -#### Step 4: Install 'onedrive' -Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` - -#### Step 5: Read 'Known Issues' with these packages -Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. - - -## Known Issues with Installing from the above packages - -### 1. The client may segfault | core-dump when exiting -When the client is run in `--monitor` mode manually, or when using the systemd service, the client may segfault on exit. - -This issue is caused by the way the 'onedrive' packages are built using the distribution LDC package & the default distribution compiler options which is the root cause for this issue. Refer to: https://bugs.launchpad.net/ubuntu/+source/ldc/+bug/1895969 - -**Additional references:** -* https://github.com/abraunegg/onedrive/issues/1053 -* https://github.com/abraunegg/onedrive/issues/1609 - -**Resolution Options:** -* Uninstall the package and build client from source From 89b467e2e869efaefb706986b3f3fd4d7df1e12f Mon Sep 17 00:00:00 2001 From: abraunegg Date: Tue, 9 Jan 2024 09:24:29 +1100 Subject: [PATCH 003/305] Re-add documents * Re-add documents --- docs/advanced-usage.md | 302 ++++++++ docs/application-config-options.md | 1075 ++++++++++++++++++++++++++++ docs/application-security.md | 97 +++ docs/build-rpm-howto.md | 379 ++++++++++ docs/business-shared-folders.md | 40 ++ docs/docker.md | 397 ++++++++++ docs/install.md | 282 ++++++++ docs/known-issues.md | 60 ++ docs/national-cloud-deployments.md | 145 ++++ docs/podman.md | 361 ++++++++++ docs/privacy-policy.md | 65 ++ docs/sharepoint-libraries.md | 228 ++++++ docs/terms-of-service.md | 54 ++ docs/ubuntu-package-install.md | 420 +++++++++++ docs/usage.md | 943 ++++++++++++++++++++++++ 15 files changed, 4848 insertions(+) create mode 100644 docs/advanced-usage.md create mode 100644 docs/application-config-options.md create mode 100644 docs/application-security.md create mode 100644 docs/build-rpm-howto.md create mode 100644 docs/business-shared-folders.md create mode 100644 docs/docker.md create mode 100644 docs/install.md create mode 100644 docs/known-issues.md create mode 100644 docs/national-cloud-deployments.md create mode 100644 docs/podman.md create mode 100644 docs/privacy-policy.md create mode 100644 docs/sharepoint-libraries.md create mode 100644 docs/terms-of-service.md create mode 100644 docs/ubuntu-package-install.md create mode 100644 docs/usage.md diff --git a/docs/advanced-usage.md b/docs/advanced-usage.md new file mode 100644 index 000000000..2701909d8 --- /dev/null +++ b/docs/advanced-usage.md @@ -0,0 +1,302 @@ +# Advanced Configuration of the OneDrive Free Client +This document covers the following scenarios: +* [Configuring the client to use multiple OneDrive accounts / configurations](#configuring-the-client-to-use-multiple-onedrive-accounts--configurations) +* [Configuring the client to use multiple OneDrive accounts / configurations using Docker](#configuring-the-client-to-use-multiple-onedrive-accounts--configurations-using-docker) +* [Configuring the client for use in dual-boot (Windows / Linux) situations](#configuring-the-client-for-use-in-dual-boot-windows--linux-situations) +* [Configuring the client for use when 'sync_dir' is a mounted directory](#configuring-the-client-for-use-when-sync_dir-is-a-mounted-directory) +* [Upload data from the local ~/OneDrive folder to a specific location on OneDrive](#upload-data-from-the-local-onedrive-folder-to-a-specific-location-on-onedrive) + +## Configuring the client to use multiple OneDrive accounts / configurations +Essentially, each OneDrive account or SharePoint Shared Library which you require to be synced needs to have its own and unique configuration, local sync directory and service files. To do this, the following steps are needed: +1. Create a unique configuration folder for each onedrive client configuration that you need +2. Copy to this folder a copy of the default configuration file +3. Update the default configuration file as required, changing the required minimum config options and any additional options as needed to support your multi-account configuration +4. Authenticate the client using the new configuration directory +5. Test the configuration using '--display-config' and '--dry-run' +6. Sync the OneDrive account data as required using `--synchronize` or `--monitor` +7. Configure a unique systemd service file for this account configuration + +### 1. Create a unique configuration folder for each onedrive client configuration that you need +Make the configuration folder as required for this new configuration, for example: +```text +mkdir ~/.config/my-new-config +``` + +### 2. Copy to this folder a copy of the default configuration file +Copy to this folder a copy of the default configuration file by downloading this file from GitHub and saving this file in the directory created above: +```text +wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/my-new-config/config +``` + +### 3. Update the default configuration file +The following config options *must* be updated to ensure that individual account data is not cross populated with other OneDrive accounts or other configurations: +* sync_dir + +Other options that may require to be updated, depending on the OneDrive account that is being configured: +* drive_id +* application_id +* sync_business_shared_folders +* skip_dir +* skip_file +* Creation of a 'sync_list' file if required +* Creation of a 'business_shared_folders' file if required + +### 4. Authenticate the client +Authenticate the client using the specific configuration file: +```text +onedrive --confdir="~/.config/my-new-config" +``` +You will be asked to open a specific URL by using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving permission to the application, you will be redirected to a blank page. Copy the URI of the blank page into the application. +```text +[user@hostname ~]$ onedrive --confdir="~/.config/my-new-config" +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Authorize this app visiting: + +https://..... + +Enter the response uri: + +``` + +### 5. Display and Test the configuration +Test the configuration using '--display-config' and '--dry-run'. By doing so, this allows you to test any configuration that you have currently made, enabling you to fix this configuration before using the configuration. + +#### Display the configuration +```text +onedrive --confdir="~/.config/my-new-config" --display-config +``` + +#### Test the configuration by performing a dry-run +```text +onedrive --confdir="~/.config/my-new-config" --synchronize --verbose --dry-run +``` + +If both of these operate as per your expectation, the configuration of this client setup is complete and validated. If not, amend your configuration as required. + +### 6. Sync the OneDrive account data as required +Sync the data for the new account configuration as required: +```text +onedrive --confdir="~/.config/my-new-config" --synchronize --verbose +``` +or +```text +onedrive --confdir="~/.config/my-new-config" --monitor --verbose +``` + +* `--synchronize` does a one-time sync +* `--monitor` keeps the application running and monitoring for changes both local and remote + +### 7. Automatic syncing of new OneDrive configuration +In order to automatically start syncing your OneDrive accounts, you will need to create a service file for each account. From the applicable 'systemd folder' where the applicable systemd service file exists: +* RHEL / CentOS: `/usr/lib/systemd/system` +* Others: `/usr/lib/systemd/user` and `/lib/systemd/system` + +### Step1: Create a new systemd service file +#### Red Hat Enterprise Linux, CentOS Linux +Copy the required service file to a new name: +```text +sudo cp /usr/lib/systemd/system/onedrive.service /usr/lib/systemd/system/onedrive-my-new-config +``` +or +```text +sudo cp /usr/lib/systemd/system/onedrive@.service /usr/lib/systemd/system/onedrive-my-new-config@.service +``` + +#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora +Copy the required service file to a new name: +```text +sudo cp /usr/lib/systemd/user/onedrive.service /usr/lib/systemd/user/onedrive-my-new-config.service +``` +or +```text +sudo cp /lib/systemd/system/onedrive@.service /lib/systemd/system/onedrive-my-new-config@.service +``` + +### Step 2: Edit new systemd service file +Edit the new systemd file, updating the line beginning with `ExecStart` so that the confdir mirrors the one you used above: +```text +ExecStart=/usr/local/bin/onedrive --monitor --confdir="/full/path/to/config/dir" +``` + +Example: +```text +ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/my-new-config" +``` + +**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded. + +### Step 3: Enable the new systemd service +Once the file is correctly editied, you can enable the new systemd service using the following commands. + +#### Red Hat Enterprise Linux, CentOS Linux +```text +systemctl enable onedrive-my-new-config +systemctl start onedrive-my-new-config +``` + +#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora +```text +systemctl --user enable onedrive-my-new-config +systemctl --user start onedrive-my-new-config +``` +or +```text +systemctl --user enable onedrive-my-new-config@myusername.service +systemctl --user start onedrive-my-new-config@myusername.service +``` + +### Step 4: Viewing systemd status and logs for the custom service +#### Viewing systemd service status - Red Hat Enterprise Linux, CentOS Linux +```text +systemctl status onedrive-my-new-config +``` + +#### Viewing systemd service status - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora +```text +systemctl --user status onedrive-my-new-config +``` + +#### Viewing journalctl systemd logs - Red Hat Enterprise Linux, CentOS Linux +```text +journalctl --unit=onedrive-my-new-config -f +``` + +#### Viewing journalctl systemd logs - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora +```text +journalctl --user --unit=onedrive-my-new-config -f +``` + +### Step 5: (Optional) Run custom systemd service at boot without user login +In some cases it may be desirable for the systemd service to start without having to login as your 'user' + +All the systemd steps above that utilise the `--user` option, will run the systemd service as your particular user. As such, the systemd service will not start unless you actually login to your system. + +To avoid this issue, you need to reconfigure your 'user' account so that the systemd services you have created will startup without you having to login to your system: +```text +loginctl enable-linger +``` + +Example: +```text +alex@ubuntu-headless:~$ loginctl enable-linger alex +``` + +Repeat these steps for each OneDrive new account that you wish to use. + +## Configuring the client to use multiple OneDrive accounts / configurations using Docker +In some situations it may be desirable to run multiple Docker containers at the same time, each with their own configuration. + +To run the Docker container successfully, it needs two unique Docker volumes to operate: +* Your configuration Docker volumes +* Your data Docker volume + +When running multiple Docker containers, this is no different - each Docker container must have it's own configuration and data volume. + +### High level steps: +1. Create the required unique Docker volumes for the configuration volume +2. Create the required unique local path used for the Docker data volume +3. Start the multiple Docker containers with the required configuration for each container + +#### Create the required unique Docker volumes for the configuration volume +Create the required unique Docker volumes for the configuration volume(s): +```text +docker volume create onedrive_conf_sharepoint_site1 +docker volume create onedrive_conf_sharepoint_site2 +docker volume create onedrive_conf_sharepoint_site3 +... +docker volume create onedrive_conf_sharepoint_site50 +``` + +#### Create the required unique local path used for the Docker data volume +Create the required unique local path used for the Docker data volume +```text +mkdir -p /use/full/local/path/no/tilda/SharePointSite1 +mkdir -p /use/full/local/path/no/tilda/SharePointSite2 +mkdir -p /use/full/local/path/no/tilda/SharePointSite3 +... +mkdir -p /use/full/local/path/no/tilda/SharePointSite50 +``` + +#### Start the Docker container with the required configuration (example) +```text +docker run -it --name onedrive -v onedrive_conf_sharepoint_site1:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite1:/onedrive/data" driveone/onedrive:latest +docker run -it --name onedrive -v onedrive_conf_sharepoint_site2:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite2:/onedrive/data" driveone/onedrive:latest +docker run -it --name onedrive -v onedrive_conf_sharepoint_site3:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite3:/onedrive/data" driveone/onedrive:latest +... +docker run -it --name onedrive -v onedrive_conf_sharepoint_site50:/onedrive/conf -v "/use/full/local/path/no/tilda/SharePointSite50:/onedrive/data" driveone/onedrive:latest +``` + +#### TIP +To avoid 're-authenticating' and 'authorising' each individual Docker container, if all the Docker containers are using the 'same' OneDrive credentials, you can re-use the 'refresh_token' from one Docker container to another by copying this file to the configuration Docker volume of each Docker container. + +If the account credentials are different .. you will need to re-authenticate each Docker container individually. + +## Configuring the client for use in dual-boot (Windows / Linux) situations +When dual booting Windows and Linux, depending on the Windows OneDrive account configuration, the 'Files On-Demand' option may be enabled when running OneDrive within your Windows environment. + +When this option is enabled in Windows, if you are sharing this location between your Windows and Linux systems, all files will be a 0 byte link, and cannot be used under Linux. + +To fix the problem of windows turning all files (that should be kept offline) into links, you have to uncheck a specific option in the onedrive settings window. The option in question is `Save space and download files as you use them`. + +To find this setting, open the onedrive pop-up window from the taskbar, click "Help & Settings" > "Settings". This opens a new window. Go to the tab "Settings" and look for the section "Files On-Demand". + +After unchecking the option and clicking "OK", the Windows OneDrive client should restart itself and start actually downloading your files so they will truely be available on your disk when offline. These files will then be fully accessible under Linux and the Linux OneDrive client. + +| OneDrive Personal | Onedrive Business
SharePoint | +|---|---| +| ![Uncheck-Personal](./images/personal-files-on-demand.png) | ![Uncheck-Business](./images/business-files-on-demand.png) | + +## Configuring the client for use when 'sync_dir' is a mounted directory +In some environments, your setup might be that your configured 'sync_dir' is pointing to another mounted file system - a NFS|CIFS location, an external drive (USB stuc, eSATA etc). As such, you configure your 'sync_dir' as follows: +```text +sync_dir = "/path/to/mountpoint/OneDrive" +``` + +The issue here is - how does the client react if the mount point gets removed - network loss, device removal? + +The client has zero knowledge of any event that causes a mountpoint to become unavailable, thus, the client (if you are running as a service) will assume that you deleted the files, thus, will go ahead and delete all your files on OneDrive. This is most certainly an undesirable action. + +There are a few options here which you can configure in your 'config' file to assist you to prevent this sort of item from occuring: +1. classify_as_big_delete +2. check_nomount +3. check_nosync + +**Note:** Before making any change to your configuration, stop any sync process & stop any onedrive systemd service from running. + +### classify_as_big_delete +By default, this uses a value of 1000 files|folders. An undesirable unmount if you have more than 1000 files, this default level will prevent the client from executing the online delete. Modify this value up or down as desired + +### check_nomount & check_nosync +These two options are really the right safe guards to use. + +In your 'mount point', *before* you mount your external folder|device, create empty `.nosync` file, so that this is the *only* file present in the mount location before you mount your data to your mount point. When you mount your data, this '.nosync' file will not be visible, but, if the device you are mounting goes away - this '.nosync' file is the only file visible. + +Next, in your 'config' file, configure the following options: `check_nomount = "true"` and `check_nosync = "true"` + +What this will do is tell the client, if at *any* point you see this file - stop syncing - thus, protecting your online data from being deleted by the mounted device being suddenly unavailable. + +After making this sort of change - test with `--dry-run` so you can see the impacts of your mount point being unavailable, and how the client is now reacting. Once you are happy with how the system will react, restart your sync processes. + + +## Upload data from the local ~/OneDrive folder to a specific location on OneDrive +In some environments, you may not want your local ~/OneDrive folder to be uploaded directly to the root of your OneDrive account online. + +Unfortunatly, the OneDrive API lacks any facility to perform a re-direction of data during upload. + +The workaround for this is to structure your local filesystem and reconfigure your client to achieve the desired goal. + +### High level steps: +1. Create a new folder, for example `/opt/OneDrive` +2. Configure your application config 'sync_dir' to look at this folder +3. Inside `/opt/OneDrive` create the folder you wish to sync the data online to, for example: `/opt/OneDrive/RemoteOnlineDestination` +4. Configure the application to only sync `/opt/OneDrive/RemoteDestination` via 'sync_list' +5. Symbolically link `~/OneDrive` -> `/opt/OneDrive/RemoteOnlineDestination` + +### Outcome: +* Your `~/OneDrive` will look / feel as per normal +* The data will be stored online under `/RemoteOnlineDestination` + +### Testing: +* Validate your configuration with `onedrive --display-config` +* Test your configuration with `onedrive --dry-run` diff --git a/docs/application-config-options.md b/docs/application-config-options.md new file mode 100644 index 000000000..31b50614b --- /dev/null +++ b/docs/application-config-options.md @@ -0,0 +1,1075 @@ +# Application Configuration Options for the OneDrive Client for Linux +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Table of Contents + +- [Configuration File Options](#configuration-file-options) + - [application_id](#application_id) + - [azure_ad_endpoint](#azure_ad_endpoint) + - [azure_tenant_id](#azure_tenant_id) + - [bypass_data_preservation](#bypass_data_preservation) + - [check_nomount](#check_nomount) + - [check_nosync](#check_nosync) + - [classify_as_big_delete](#classify_as_big_delete) + - [cleanup_local_files](#cleanup_local_files) + - [connect_timeout](#connect_timeout) + - [data_timeout](#data_timeout) + - [debug_https](#debug_https) + - [disable_download_validation](#disable_download_validation) + - [disable_notifications](#disable_notifications) + - [disable_upload_validation](#disable_upload_validation) + - [display_running_config](#display_running_config) + - [dns_timeout](#dns_timeout) + - [download_only](#download_only) + - [drive_id](#drive_id) + - [dry_run](#dry_run) + - [enable_logging](#enable_logging) + - [force_http_11](#force_http_11) + - [ip_protocol_version](#ip_protocol_version) + - [local_first](#local_first) + - [log_dir](#log_dir) + - [monitor_fullscan_frequency](#monitor_fullscan_frequency) + - [monitor_interval](#monitor_interval) + - [monitor_log_frequency](#monitor_log_frequency) + - [no_remote_delete](#no_remote_delete) + - [operation_timeout](#operation_timeout) + - [rate_limit](#rate_limit) + - [read_only_auth_scope](#read_only_auth_scope) + - [remove_source_files](#remove_source_files) + - [resync](#resync) + - [resync_auth](#resync_auth) + - [skip_dir](#skip_dir) + - [skip_dir_strict_match](#skip_dir_strict_match) + - [skip_dotfiles](#skip_dotfiles) + - [skip_file](#skip_file) + - [skip_size](#skip_size) + - [skip_symlinks](#skip_symlinks) + - [space_reservation](#space_reservation) + - [sync_business_shared_items](#sync_business_shared_items) + - [sync_dir](#sync_dir) + - [sync_dir_permissions](#sync_dir_permissions) + - [sync_file_permissions](#sync_file_permissions) + - [sync_root_files](#sync_root_files) + - [upload_only](#upload_only) + - [user_agent](#user_agent) + - [webhook_enabled](#webhook_enabled) + - [webhook_expiration_interval](#webhook_expiration_interval) + - [webhook_listening_host](#webhook_listening_host) + - [webhook_listening_port](#webhook_listening_port) + - [webhook_public_url](#webhook_public_url) + - [webhook_renewal_interval](#webhook_renewal_interval) +- [Command Line Interface (CLI) Only Options](#command-line-interface-cli-only-options) + - [CLI Option: --auth-files](#cli-option---auth-files) + - [CLI Option: --auth-response](#cli-option---auth-response) + - [CLI Option: --confdir](#cli-option---confdir) + - [CLI Option: --create-directory](#cli-option---create-directory) + - [CLI Option: --create-share-link](#cli-option---create-share-link) + - [CLI Option: --destination-directory](#cli-option---destination-directory) + - [CLI Option: --display-config](#cli-option---display-config) + - [CLI Option: --display-sync-status](#cli-option---display-sync-status) + - [CLI Option: --display-quota](#cli-option---display-quota) + - [CLI Option: --force](#cli-option---force) + - [CLI Option: --force-sync](#cli-option---force-sync) + - [CLI Option: --get-file-link](#cli-option---get-file-link) + - [CLI Option: --get-sharepoint-drive-id](#cli-option---get-sharepoint-drive-id) + - [CLI Option: --logout](#cli-option---logout) + - [CLI Option: --modified-by](#cli-option---modified-by) + - [CLI Option: --monitor | -m](#cli-option---monitor--m) + - [CLI Option: --print-access-token](#cli-option---print-access-token) + - [CLI Option: --reauth](#cli-option---reauth) + - [CLI Option: --remove-directory](#cli-option---remove-directory) + - [CLI Option: --single-directory](#cli-option---single-directory) + - [CLI Option: --source-directory](#cli-option---source-directory) + - [CLI Option: --sync | -s](#cli-option---sync--s) + - [CLI Option: --verbose | -v+](#cli-option---verbose--v) + - [CLI Option: --with-editing-perms](#cli-option---with-editing-perms) +- [Depreciated Configuration File and CLI Options](#depreciated-configuration-file-and-cli-options) + - [min_notify_changes](#min_notify_changes) + - [CLI Option: --synchronize](#cli-option---synchronize) + + +## Configuration File Options + +### application_id +_**Description:**_ This is the config option for application id that used used to identify itself to Microsoft OneDrive. In some circumstances, it may be desirable to use your own application id. To do this, you must register a new application with Microsoft Azure via https://portal.azure.com/, then use your new application id with this config option. + +_**Value Type:**_ String + +_**Default Value:**_ d50ca740-c83f-4d1b-b616-12c519384f0c + +_**Config Example:**_ `application_id = "d50ca740-c83f-4d1b-b616-12c519384f0c"` + +### azure_ad_endpoint +_**Description:**_ This is the config option to change the Microsoft Azure Authentication Endpoint that the client uses to conform with data and security requirements that requires data to reside within the geographic borders of that country. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Valid Values:**_ USL4, USL5, DE, CN + +_**Config Example:**_ `azure_ad_endpoint = "DE"` + +### azure_tenant_id +_**Description:**_ This config option allows the locking of the client to a specific single tenant and will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common". The tenant id may be the GUID Directory ID or the fully qualified tenant name. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Config Example:**_ `azure_tenant_id = "example.onmicrosoft.us"` or `azure_tenant_id = "0c4be462-a1ab-499b-99e0-da08ce52a2cc"` + +_**Additional Usage Requirement:**_ Must be configured if 'azure_ad_endpoint' is configured. + +### bypass_data_preservation +_**Description:**_ This config option allows the disabling of preserving local data by renaming the local file in the event of data conflict. If this is enabled, you will experience data loss on your local data as the local file will be over-written with data from OneDrive online. Use with care and caution. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `bypass_data_preservation = "false"` or `bypass_data_preservation = "true"` + +### check_nomount +_**Description:**_ This config option is useful to prevent application startup & ongoing use in 'Monitor Mode' if the configured 'sync_dir' is a separate disk that is being mounted by your system. This option will check for the presence of a `.nosync` file in your mount point, and if present, abort any sync process to preserve data. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `check_nomount = "false"` or `check_nomount = "true"` + +_**CLI Option:**_ `--check-for-nomount` + +_**Additional Usage Requirement:**_ Create a `.nosync` file in your mount point *before* you mount your disk so that this is visible, in your mount point if your disk is unmounted. + +### check_nosync +_**Description:**_ This config option is useful to prevent the sync of a *local* directory to Microsoft OneDrive. It will *not* check for this file online to prevent the download of directories to your local system. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `check_nosync = "false"` or `check_nosync = "true"` + +_**CLI Option Use:**_ `--check-for-nosync` + +_**Additional Usage Requirement:**_ Create a `.nosync` file in any *local* directory that you wish to not sync to Microsoft OneDrive when you enable this option. + +### classify_as_big_delete +_**Description:**_ This config option defines the number of children in a path that is locally removed which will be classified as a 'big data delete' to safeguard large data removals - which are typically accidental local delete events. + +_**Value Type:**_ Integer + +_**Default Value:**_ 1000 + +_**Config Example:**_ `classify_as_big_delete = "2000"` + +_**CLI Option Use:**_ `--classify-as-big-delete 2000` + +_**Additional Usage Requirement:**_ If this option is triggered, you will need to add `--force` to force a sync to occur. + +### cleanup_local_files +_**Description:**_ This config option provides the capability to cleanup local files and folders if they are removed online. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `cleanup_local_files = "false"` or `cleanup_local_files = "true"` + +_**CLI Option Use:**_ `--cleanup-local-files` + +_**Additional Usage Requirement:**_ This configuration option can only be used with 'download_only'. It cannot be used with any other application option. + +### connect_timeout +_**Description:**_ This configuration setting manages the TCP connection timeout duration in seconds for HTTPS connections to Microsoft OneDrive when using the curl library. + +_**Value Type:**_ Integer + +_**Default Value:**_ 30 + +_**Config Example:**_ `connect_timeout = "20"` + +### data_timeout +_**Description:**_ This setting controls the timeout duration, in seconds, for when data is not received on an active connection to Microsoft OneDrive over HTTPS when using the curl library, before that connection is timeout out. + +_**Value Type:**_ Integer + +_**Default Value:**_ 240 + +_**Config Example:**_ `data_timeout = "300"` + +### debug_https +_**Description:**_ This setting controls whether the curl library is configured to output additional data to assist with diagnosing HTTPS issues and problems. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `debug_https = "false"` or `debug_https = "true"` + +_**CLI Option Use:**_ `--debug-https` + +_**Additional Usage Notes:**_ Whilst this option can be used at any time, it is advisable that you only use this option when advised as this will output your `Authorization: bearer` - which is your authentication token to Microsoft OneDrive. + +### disable_download_validation +_**Description:**_ This option determines whether the client will conduct integrity validation on files downloaded from Microsoft OneDrive. Sometimes, when downloading files, particularly from SharePoint, there is a discrepancy between the file size reported by the OneDrive API and the byte count received from the SharePoint HTTP Server for the same file. Enable this option to disable the integrity checks performed by this client. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_download_validation = "false"` or `disable_download_validation = "true"` + +_**CLI Option Use:**_ `--disable-download-validation` + +_**Additional Usage Notes:**_ If you're downloading data from SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. + +### disable_notifications +_**Description:**_ This setting controls whether GUI notifications are sent from the client to your display manager session. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_notifications = "false"` or `disable_notifications = "true"` + +_**CLI Option Use:**_ `--disable-notifications` + +### disable_upload_validation +_**Description:**_ This option determines whether the client will conduct integrity validation on files uploaded to Microsoft OneDrive. Sometimes, when uploading files, particularly to SharePoint, SharePoint will modify your file post upload by adding new data to your file which breaks the integrity checking of the upload performed by this client. Enable this option to disable the integrity checks performed by this client. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `disable_upload_validation = "false"` or `disable_upload_validation = "true"` + +_**CLI Option Use:**_ `--disable-upload-validation` + +_**Additional Usage Notes:**_ If you're uploading data to SharePoint or OneDrive Business Shared Folders, you might find it necessary to activate this option. It's important to note that any issues encountered aren't due to a problem with this client; instead, they should be regarded as issues with the Microsoft OneDrive technology stack. + +### display_running_config +_**Description:**_ This option will include the running config of the application at application startup. This may be desirable to enable when running in containerised environments so that any application logging that is occuring, will have the application configuration being consumed at startup, written out to any applicable log file. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `display_running_config = "false"` or `display_running_config = "true"` + +_**CLI Option Use:**_ `--display-running-config` + +### dns_timeout +_**Description:**_ This setting controls the libcurl DNS cache value. By default, libcurl caches this info for 60 seconds. This libcurl DNS cache timeout is entirely speculative that a name resolves to the same address for a small amount of time into the future as libcurl does not use DNS TTL properties. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ Integer + +_**Default Value:**_ 60 + +_**Config Example:**_ `dns_timeout = "90"` + +### download_only +_**Description:**_ This setting forces the client to only download data from Microsoft OneDrive and replicate that data locally. No changes made locally will be uploaded to Microsoft OneDrive when using this option. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `download_only = "false"` or `download_only = "true"` + +_**CLI Option Use:**_ `--download-only` + +### drive_id +_**Description:**_ This setting controls the specific drive identifier the client will use when syncing with Microsoft OneDrive. + +_**Value Type:**_ String + +_**Default Value:**_ *None* + +_**Config Example:**_ `drive_id = "b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB"` + +_**Additional Usage Notes:**_ This option is typically only used when configuring the client to sync a specific SharePoint Library. If this configuration option is specified in your config file, a value must be specified otherwise the application will exit citing a fatal error has occured. + +### dry_run +_**Description:**_ This setting controls the application capability to test your application configuration without actually performing any actual activity (download, upload, move, delete, folder creation). + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `dry_run = "false"` or `dry_run = "true"` + +_**CLI Option Use:**_ `--dry-run` + +### enable_logging +_**Description:**_ This setting controls the application logging all actions to a separate file. By default, all log files will be written to `/var/log/onedrive`, however this can changed by using the 'log_dir' config option + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `enable_logging = "false"` or `enable_logging = "true"` + +_**CLI Option Use:**_ `--enable-logging` + +_**Additional Usage Notes:**_ Additional configuration is potentially required to configure the default log directory. Refer to usage.md for details (ADD LINK) + +### force_http_11 +_**Description:**_ This setting controls the application HTTP protocol version. By default, the application will use libcurl defaults for which HTTP prodocol version will be used to interact with Microsoft OneDrive. Use this setting to downgrade libcurl to only use HTTP/1.1. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `force_http_11 = "false"` or `force_http_11 = "true"` + +_**CLI Option Use:**_ `--force-http-11` + +### ip_protocol_version +_**Description:**_ This setting controls the application IP protocol that should be used when communicating with Microsoft OneDrive. The default is to use IPv4 and IPv6 networks for communicating to Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 + +_**Valid Values:**_ 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + +_**Config Example:**_ `ip_protocol_version = "0"` or `ip_protocol_version = "1"` or `ip_protocol_version = "2"` + +_**Additional Usage Notes:**_ In some environments where IPv4 and IPv6 are configured at the same time, this causes resolution and routing issues to Microsoft OneDrive. If this is the case, it is advisable to change 'ip_protocol_version' to match your environment. + +### local_first +_**Description:**_ This setting controls what the application considers the 'source of truth' for your data. By default, what is stored online will be considered as the 'source of truth' when syncing to your local machine. When using this option, your local data will be considered the 'source of truth'. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `local_first = "false"` or `local_first = "true"` + +_**CLI Option Use:**_ `--local-first` + +### log_dir +_**Description:**_ This setting controls the custom application log path when 'enable_logging' has been enabled. By default, all log files will be written to `/var/log/onedrive`. + +_**Value Type:**_ String + +_**Default Value:**_ *None* + +_**Config Example:**_ `log_dir = "~/logs/"` + +_**CLI Option Use:**_ `--log-dir "~/logs/"` + +### monitor_fullscan_frequency +_**Description:**_ This configuration option controls the number of 'monitor_interval' iterations between when a full scan of your data is performed to ensure data integrity and consistency. + +_**Value Type:**_ Integer + +_**Default Value:**_ 12 + +_**Config Example:**_ `monitor_fullscan_frequency = "24"` + +_**CLI Option Use:**_ `--monitor-fullscan-frequency '24'` + +_**Additional Usage Notes:**_ By default without configuration, 'monitor_fullscan_frequency' is set to 12. In this default state, this means that a full scan is performed every 'monitor_interval' x 'monitor_fullscan_frequency' = 3600 seconds. This setting is only applicable when running in `--monitor` mode. Setting this configuration option to '0' will *disable* the full scan of your data online. + +### monitor_interval +_**Description:**_ This configuration setting determines how often the synchronisation loops run in --monitor mode, measured in seconds. When this time period elapses, the client will check for online changes in Microsoft OneDrive, conduct integrity checks on local data and scan the local 'sync_dir' to identify any new content that hasn't been uploaded yet. + +_**Value Type:**_ Integer + +_**Default Value:**_ 300 + +_**Config Example:**_ `monitor_interval = "600"` + +_**CLI Option Use:**_ `--monitor-interval '600'` + +_**Additional Usage Notes:**_ A minimum value of 300 is enforced for this configuration setting. + +### monitor_log_frequency +_**Description:**_ This configuration option controls the suppression of frequently printed log items to the system console when using `--monitor` mode. The aim of this configuration item is to reduce the log output when near zero sync activity is occuring. + +_**Value Type:**_ Integer + +_**Default Value:**_ 12 + +_**Config Example:**_ `monitor_log_frequency = "24"` + +_**CLI Option Use:**_ `--monitor-log-frequency '24'` + +_**Additional Usage Notes:**_ + +By default, at application start-up when using `--monitor` mode, the following will be logged to indicate that the application has correctly started and has performed all the initial processing steps: +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in: /home/user/OneDrive +OneDrive synchronisation interval (seconds): 300 +Initialising filesystem inotify monitoring ... +Performing initial syncronisation to ensure consistent local state ... +Starting a sync with Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. +Processing changes and items received from Microsoft OneDrive ... +Performing a database consistency and integrity check on locally stored data ... +Scanning the local file system '~/OneDrive' for new data to upload ... +Performing a final true-up scan of online data from Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: b!bO8V6s9SSk9R7mWhpIjUrotN73WlW3tEv3OxP_QfIdQimEdOHR-1So6CqeG1MfDB .. +Processing changes and items received from Microsoft OneDrive ... +Sync with Microsoft OneDrive is complete +``` +Then, based on 'monitor_log_frequency', the following output will be logged until the suppression loop value is reached: +```text +Starting a sync with Microsoft OneDrive +Syncing changes from Microsoft OneDrive ... +Sync with Microsoft OneDrive is complete +``` +**Note:** The additional log output `Performing a database consistency and integrity check on locally stored data ...` will only be displayed when this activity is occuring which is triggered by 'monitor_fullscan_frequency'. + +**Note:** If verbose application output is being used (`--verbose`), then this configuration setting has zero effect, as application verbose output takes priority over application output surpression. + +### no_remote_delete +_**Description:**_ This configuration option controls whether local file and folder deletes are actioned on Microsoft OneDrive. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `local_first = "false"` or `local_first = "true"` + +_**CLI Option Use:**_ `--no-remote-delete` + +_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only` + +### operation_timeout +_**Description:**_ This configuration option controls the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ Integer + +_**Default Value:**_ 3600 + +_**Config Example:**_ `operation_timeout = "3600"` + +### rate_limit +_**Description:**_ This configuration option controls the bandwidth used by the application, per thread, when interacting with Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 (unlimited, use available bandwidth per thread) + +_**Valid Values:**_ Valid tested values for this configuration option are as follows: + +* 131072 = 128 KB/s - absolute minimum for basic application operations to prevent timeouts +* 262144 = 256 KB/s +* 524288 = 512 KB/s +* 1048576 = 1 MB/s +* 10485760 = 10 MB/s +* 104857600 = 100 MB/s + +_**Config Example:**_ `rate_limit = "131072"` + +### read_only_auth_scope +_**Description:**_ This configuration option controls whether the OneDrive Client for Linux operates in a totally in read-only operation. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `read_only_auth_scope = "false"` or `read_only_auth_scope = "true"` + +_**Additional Usage Notes:**_ When using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data until you revoke this consent. + +### remove_source_files +_**Description:**_ This configuration option controls whether the OneDrive Client for Linux removes the local file post successful transfer to Microsoft OneDrive. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `remove_source_files = "false"` or `remove_source_files = "true"` + +_**CLI Option Use:**_ `--remove-source-files` + +_**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only` + +### resync +_**Description:**_ This configuration option controls whether the known local sync state with Microsoft OneDrive is removed at application startup. When this option is used, a full scan of your data online is performed to ensure that the local sync state is correctly built back up. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `resync = "false"` or `resync = "true"` + +_**CLI Option Use:**_ `--resync` + +_**Additional Usage Notes:**_ It's highly recommended to use this option only if the application prompts you to do so. Don't blindly use this option as a default option. If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: +* drive_id +* sync_dir +* skip_file +* skip_dir +* skip_dotfiles +* skip_symlinks +* sync_business_shared_items +* Creating, Modifying or Deleting the 'sync_list' file + +### resync_auth +_**Description:**_ This configuration option controls the approval of performing a 'resync' which can be beneficial in automated environments. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `resync_auth = "false"` or `resync_auth = "true"` + +_**CLI Option Use:**_ `--resync-auth` + +_**Additional Usage Notes:**_ In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' resync requirement, this option allows you to automatically acknowledge the resync prompt. + +### skip_dir +_**Description:**_ This configuration option controls whether the application skips certain directories from being synced. Directories can be specified in 2 ways: + +* As a single entry. This will search the respective path for this entry and skip all instances where this directory is present, where ever it may exist. +* As a full path entry. This will skip the explicit path as set. + +**Important:** Entries for 'skip_dir' are *relative* to your 'sync_dir' path. + +_**Value Type:**_ String + +_**Default Value:**_ *Empty* - not required for normal operation + +_**Config Example:**_ + +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. + +```text +skip_dir = "Desktop|Documents/IISExpress|Documents/SQL Server Management Studio|Documents/Visual Studio*|Documents/WindowsPowerShell|.Rproj-user" +``` + +The 'skip_dir' option can also be specified multiple times within your config file, for example: +```text +skip_dir = "SkipThisDirectoryAnywhere" +skip_dir = ".SkipThisOtherDirectoryAnywhere" +skip_dir = "/Explicit/Path/To/A/Directory" +skip_dir = "/Another/Explicit/Path/To/Different/Directory" +``` + +This will be interpreted the same as: +```text +skip_dir = "SkipThisDirectoryAnywhere|.SkipThisOtherDirectoryAnywhere|/Explicit/Path/To/A/Directory|/Another/Explicit/Path/To/Different/Directory" +``` + +_**CLI Option Use:**_ `--skip-dir 'SkipThisDirectoryAnywhere|.SkipThisOtherDirectoryAnywhere|/Explicit/Path/To/A/Directory|/Another/Explicit/Path/To/Different/Directory'` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. + +### skip_dir_strict_match +_**Description:**_ This configuration option controls whether the application performs strict directory matching when checking 'skip_dir' items. When enabled, the 'skip_dir' item must be a full path match to the path to be skipped. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_dir_strict_match = "false"` or `skip_dir_strict_match = "true"` + +_**CLI Option Use:**_ `--skip-dir-strict-match` + +### skip_dotfiles +_**Description:**_ This configuration option controls whether the application will skip all .files and .folders when performing sync operations. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_dotfiles = "false"` or `skip_dotfiles = "true"` + +_**CLI Option Use:**_ `--skip-dot-files` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### skip_file +_**Description:**_ This configuration option controls whether the application skips certain files from being synced. + +_**Value Type:**_ String + +_**Default Value:**_ `~*|.~*|*.tmp|*.swp|*.partial` + +_**Config Example:**_ + +Patterns are case insensitive. `*` and `?` [wildcards characters](https://technet.microsoft.com/en-us/library/bb490639.aspx) are supported. Use `|` to separate multiple patterns. + +By default, the following files will be skipped: +* Files that start with ~ +* Files that start with .~ (like .~lock.* files generated by LibreOffice) +* Files that end in .tmp, .swp and .partial + +Files can be skipped in the following fashion: +* Specify a wildcard, eg: '*.txt' (skip all txt files) +* Explicitly specify the filename and it's full path relative to your sync_dir, eg: '/path/to/file/filename.ext' +* Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' + +```text +# When changing a config option below, remove the '#' from the start of the line +# For explanations of all config options below see docs/USAGE.md or the man page. +# +# sync_dir = "~/OneDrive" +skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx" +# monitor_interval = "300" +# skip_dir = "" +# log_dir = "/var/log/onedrive/" +``` +The 'skip_file' option can be specified multiple times within your config file, for example: +```text +skip_file = "~*|.~*|*.tmp|*.swp" +skip_file = "*.blah" +skip_file = "never_sync.file" +skip_file = "/Documents/keepass.kdbx" +``` +This will be interpreted the same as: +```text +skip_file = "~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx" +``` + +_**CLI Option Use:**_ `--skip-file '~*|.~*|*.tmp|*.swp|*.blah|never_sync.file|/Documents/keepass.kdbx'` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. If using the config file and CLI option is used, the CLI option will *replace* the config file entries. After changing or modifying this option, you will be required to perform a resync. + +### skip_size +_**Description:**_ This configuration option controls whether the application skips syncing certain files larger than the specified size. The value specified is in MB. + +_**Value Type:**_ Integer + +_**Default Value:**_ 0 (all files, regardless of size, are synced) + +_**Config Example:**_ `skip_size = "50"` + +_**CLI Option Use:**_ `--skip-size '50'` + +### skip_symlinks +_**Description:**_ This configuration option controls whether the application will skip all symbolic links when performing sync operations. Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `skip_symlinks = "false"` or `skip_symlinks = "true"` + +_**CLI Option Use:**_ `--skip-symlinks` + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### space_reservation +_**Description:**_ This configuration option controls how much local disk space should be reserved, to prevent the application from filling up your entire disk due to misconfiguration + +_**Value Type:**_ Integer + +_**Default Value:**_ 50 MB (expressesed as Bytes when using `--display-config`) + +_**Config Example:**_ `space_reservation = "100"` + +_**CLI Option Use:**_ `--space-reservation '100'` + +### sync_business_shared_items +_**Description:**_ This configuration option controls whether OneDrive Business | Office 365 Shared Folders, when added as a 'shortcut' to your 'My Files' will be synced to your local system. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `sync_business_shared_items = "false"` or `sync_business_shared_items = "true"` + +_**CLI Option Use:**_ *none* - this is a config file option only + +_**Additional Usage Notes:**_ This option is considered a 'Client Side Filtering Rule' and if configured, is utilised for all sync operations. After changing this option, you will be required to perform a resync. + +### sync_dir +_**Description:**_ This configuration option determines the location on your local filesystem where your data from Microsoft OneDrive will be saved. + +_**Value Type:**_ String + +_**Default Value:**_ `~/OneDrive` + +_**Config Example:**_ `sync_dir = "~/MyDirToSync"` + +_**CLI Option Use:**_ `--syncdir '~/MyDirToSync'` + +_**Additional Usage Notes:**_ After changing this option, you will be required to perform a resync. + +### sync_dir_permissions +_**Description:**_ This configuration option defines the directory permissions applied when a new directory is created locally during the process of syncing your data from Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `700` - This provides the following permissions: `drwx------` + +_**Config Example:**_ `sync_dir_permissions = "700"` + +_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. + +### sync_file_permissions +_**Description:**_ This configuration option defines the file permissions applied when a new file is created locally during the process of syncing your data from Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `600` - This provides the following permissions: `-rw-------` + +_**Config Example:**_ `sync_file_permissions = "600"` + +_**Additional Usage Notes:**_ Use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. You will need to manually update all existing directory permissions if you modify this value. + +### sync_root_files +_**Description:**_ This configuration option manages the synchronisation of files located in the 'sync_dir' root when using a 'sync_list.' It enables you to sync all these files by default, eliminating the need to repeatedly modify your 'sync_list' and initiate resynchronisation. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `sync_root_files = "false"` or `sync_root_files = "true"` + +_**CLI Option Use:**_ `--sync-root-files` + +_**Additional Usage Notes:**_ Although it's not mandatory, it's recommended that after enabling this option, you perform a `--resync`. This ensures that any previously excluded content is now included in your sync process. + +### upload_only +_**Description:**_ This setting forces the client to only upload data to Microsoft OneDrive and replicate the locate state online. By default, this will also remove content online, that has been removed locally. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ `upload_only = "false"` or `upload_only = "true"` + +_**CLI Option Use:**_ `--upload-only` + +_**Additional Usage Notes:**_ To ensure that data deleted locally remains accessible online, you can use the 'no_remote_delete' option. If you want to delete the data from your local storage after a successful upload to Microsoft OneDrive, you can use the 'remove_source_files' option. + +### user_agent +_**Description:**_ This configuration option controls the 'User-Agent' request header that is presented to Microsoft Graph API when accessing the Microsoft OneDrive service. This string lets servers and network peers identify the application, operating system, vendor, and/or version of the application making the request. We recommend users not to tamper with this option unless strictly necessary. + +_**Value Type:**_ String + +_**Default Value:**_ `ISV|abraunegg|OneDrive Client for Linux/vX.Y.Z-A-bcdefghi` + +_**Config Example:**_ `user_agent = "ISV|CompanyName|AppName/Version"` + +_**Additional Usage Notes:**_ The current value conforms the the Microsoft Graph API documentation for presenting an appropriate 'User-Agent' header and aligns to the registered 'application_id' that this application uses. + +### webhook_enabled +_**Description:**_ This configuration option controls the application feature 'webhooks' to allow you to subscribe to remote updates as published by Microsoft OneDrive. This option only operates when the client is using 'Monitor Mode'. + +_**Value Type:**_ Boolean + +_**Default Value:**_ False + +_**Config Example:**_ The following is the minimum working example that needs to be added to your 'config' file to enable 'webhooks' successfully: +```text +webhook_enabled = "true" +webhook_public_url = "http://:8888/" +``` + +_**Additional Usage Notes:**_ + +etting `webhook_enabled = "true"` enables the webhook feature in 'monitor' mode. The onedrive process will listen for incoming updates at a configurable endpoint, which defaults to `0.0.0.0:8888`. The `webhook_public_url` must be set to an public-facing url for Microsoft to send updates to your webhook. + +If your host is directly exposed to the Internet, the `webhook_public_url` can be set to `http://:8888/` to match the default endpoint. In this case, it is also advisable to configure a reverse proxy like `nginx` to proxy the traffic to the client. For example, below is a nginx config snippet to proxy traffic into the webhook: +```text +server { + listen 80; + location /webhooks/onedrive { + proxy_http_version 1.1; + proxy_pass http://127.0.0.1:8888; + } +} +``` + +With nginx running, you can configure 'webhook_public_url' to `https:///webhooks/onedrive` + +**Note:** A valid HTTPS certificate is required for your public-facing URL if using nginx. + +If you receive this application error: `Subscription validation request failed. Response must exactly match validationToken query parameter.` the most likely cause for this error will be your nginx configuration. + +To resolve this configuration issue, potentially investigate the following configuration for nginx: +```text +server { + listen 80; + location /webhooks/onedrive { + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Original-Request-URI $request_uri; + proxy_read_timeout 300s; + proxy_connect_timeout 75s; + proxy_buffering off; + proxy_http_version 1.1; + proxy_pass http://127.0.0.1:8888; + } +} +``` +For any further nginx configuration assistance, please refer to: https://docs.nginx.com/ + +### webhook_expiration_interval +_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription expires. The value is expressed in the number of seconds before expiry. + +_**Value Type:**_ Integer + +_**Default Value:**_ 600 + +_**Config Example:**_ `webhook_expiration_interval = "1200"` + +### webhook_listening_host +_**Description:**_ This configuration option controls the host address that this client binds to, when the webhook feature is enabled. + +_**Value Type:**_ String + +_**Default Value:**_ 0.0.0.0 + +_**Config Example:**_ `webhook_listening_host = ""` - this will use the default value. `webhook_listening_host = "192.168.3.4"` - this will bind the client to use the IP address 192.168.3.4. + +_**Additional Usage Notes:**_ Use in conjunction with 'webhook_listening_port' to change the webhook listening endpoint. + +### webhook_listening_port +_**Description:**_ This configuration option controls the TCP port that this client listens on, when the webhook feature is enabled. + +_**Value Type:**_ Integer + +_**Default Value:**_ 8888 + +_**Config Example:**_ `webhook_listening_port = "9999"` + +_**Additional Usage Notes:**_ Use in conjunction with 'webhook_listening_host' to change the webhook listening endpoint. + +### webhook_public_url +_**Description:**_ This configuration option controls the URL that Microsoft will send subscription notifications to. This must be a valid Internet accessible URL. + +_**Value Type:**_ String + +_**Default Value:**_ *empty* + +_**Config Example:**_ + +* If your host is directly connected to the Internet: `webhook_public_url = "http://:8888/"` +* If you are using nginx to reverse proxy traffic from the Internet: `webhook_public_url = "https:///webhooks/onedrive"` + +### webhook_renewal_interval +_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription is renewed. The value is expressed in the number of seconds before renewal. + +_**Value Type:**_ Integer + +_**Default Value:**_ 300 + +_**Config Example:**_ `webhook_renewal_interval = "600"` + +### webhook_retry_interval +_**Description:**_ This configuration option controls the frequency at which an existing Microsoft OneDrive webhook subscription is retried when creating or renewing a subscription failed. The value is expressed in the number of seconds before retry. + +_**Value Type:**_ Integer + +_**Default Value:**_ 60 + +_**Config Example:**_ `webhook_retry_interval = "120"` + +## Command Line Interface (CLI) Only Options + +### CLI Option: --auth-files +_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via specific files that the application uses to read the authentication data from. + +_**Usage Example:**_ `onedrive --auth-files authUrl:responseUrl` + +_**Additional Usage Notes:**_ The authorisation URL is written to the specified 'authUrl' file, then onedrive waits for the file 'responseUrl' to be present, and reads the authentication response from that file. Example: + +```text +onedrive --auth-files '~/onedrive-auth-url:~/onedrive-response-url' +Reading configuration file: /home/alex/.config/onedrive/config +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Client requires authentication before proceeding. Waiting for --auth-files elements to be available. +``` +At this point, the client has written the file `~/onedrive-auth-url` which contains the authentication URL that needs to be visited to perform the authentication process. The client will now wait and watch for the presence of the file `~/onedrive-response-url`. + +Visit the authentication URL, and then create a new file called `~/onedrive-response-url` with the response URI. Once this has been done, the application will acknowledge the presence of this file, read the contents, and authenticate the application. +```text +Sync Engine Initialised with new Onedrive API instance + + --sync or --monitor switches missing from your command line input. Please add one (not both) of these switches to your command line or use 'onedrive --help' for further assistance. + +No OneDrive sync will be performed without one of these two arguments being present. +``` + +### CLI Option: --auth-response +_**Description:**_ This CLI option allows the user to perform application authentication not via an interactive dialog but via providing the authentication response URI directly. + +_**Usage Example:**_ `onedrive --auth-response https://login.microsoftonline.com/common/oauth2/nativeclient?code=` + +_**Additional Usage Notes:**_ Typically, unless the application client identifier, authentication scopes are being modified or a specific Azure Tenant is being specified, the authentication URL will mostlikely be as follows: +```text +https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient +``` +With this URL being known, it is possible ahead of time to request an authentication token by visiting this URL, and performing the authenticaton access request. + +### CLI Option: --confdir +_**Description:**_ This CLI option allows the user to specify where all the application configuration and relevant components are stored. + +_**Usage Example:**_ `onedrive --confdir '~/.config/onedrive-business/'` + +_**Additional Usage Notes:**_ If using this option, it must be specified each and every time the application is used. If this is ommited, the application default configuration directory will be used. + +### CLI Option: --create-directory +_**Description:**_ This CLI option allows the user to create the specified directory path on Microsoft OneDrive without performing a sync. + +_**Usage Example:**_ `onedrive --create-directory 'path/of/new/folder/structure/to/create/'` + +_**Additional Usage Notes:**_ The specified path to create is relative to your configured 'sync_dir'. + +### CLI Option: --create-share-link +_**Description:**_ This CLI option enables the creation of a shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. By default, the permissions for the file will be 'read-only'. + +_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt'` + +_**Additional Usage Notes:**_ If writable access to the file is required, you must add `--with-editing-perms` to your command. See below for details. + +### CLI Option: --destination-directory +_**Description:**_ This CLI option specifies the 'destination' portion of moving a file or folder online, without performing a sync operation. + +_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` + +_**Additional Usage Notes:**_ All specified paths are relative to your configured 'sync_dir'. + +### CLI Option: --display-config +_**Description:**_ This CLI option will display the effective application configuration + +_**Usage Example:**_ `onedrive --display-config` + +### CLI Option: --display-sync-status +_**Description:**_ This CLI option will display the sync status of the configured 'sync_dir' + +_**Usage Example:**_ `onedrive --display-sync-status` + +_**Additional Usage Notes:**_ This option can also use the `--single-directory` option to determine the sync status of a specific directory within the configured 'sync_dir' + +### CLI Option: ---display-quota +_**Description:**_ This CLI option will display the quota status of the account drive id or the configured 'drive_id' value + +_**Usage Example:**_ `onedrive --display-quota` + +### CLI Option: --force +_**Description:**_ This CLI option enables the force the deletion of data when a 'big delete' is detected. + +_**Usage Example:**_ `onedrive --sync --verbose --force` + +_**Additional Usage Notes:**_ This option should only be used exclusively in cases where you've initiated a 'big delete' and genuinely intend to remove all the data that is set to be deleted online. + +### CLI Option: --force-sync +_**Description:**_ This CLI option enables the syncing of a specific directory, using the Client Side Filtering application defaults, overriding any user application configuration. + +_**Usage Example:**_ `onedrive --sync --verbose --force-sync --single-directory 'Data' + +_**Additional Usage Notes:**_ When this option is used, you will be presented with the following warning and risk acceptance: +```text +WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --synch --single-directory --force-sync being used + +The use of --force-sync will reconfigure the application to use defaults. This may have untold and unknown future impacts. +By proceeding in using this option you accept any impacts including any data loss that may occur as a result of using --force-sync. + +Are you sure you wish to proceed with --force-sync [Y/N] +``` +To procceed with this sync task, you must risk accept the actions you are taking. If you have any concerns, first use `--dry-run` and evaluate the outcome before proceeding with the actual action. + +### CLI Option: --get-file-link +_**Description:**_ This CLI option queries the OneDrive API and return's the WebURL for the given local file. + +_**Usage Example:**_ `onedrive --get-file-link 'relative/path/to/your/file.txt'` + +_**Additional Usage Notes:**_ The path that you should use must be relative to your 'sync_dir' + +### CLI Option: --get-sharepoint-drive-id +_**Description:**_ This CLI option queries the OneDrive API and return's the Office 365 Drive ID for a given Office 365 SharePoint Shared Library that can then be used with 'drive_id' to sync a specific SharePoint Library. + +_**Usage Example:**_ `onedrive --get-sharepoint-drive-id '*'` or `onedrive --get-sharepoint-drive-id 'PointPublishing Hub Site'` + +### CLI Option: --logout +_**Description:**_ This CLI option removes this clients authentictaion status with Microsoft OneDrive. Any further application use will requrie the application to be re-authenticated with Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --logout` + +### CLI Option: --modified-by +_**Description:**_ This CLI option queries the OneDrive API and return's the last modified details for the given local file. + +_**Usage Example:**_ `onedrive --modified-by 'relative/path/to/your/file.txt'` + +_**Additional Usage Notes:**_ The path that you should use must be relative to your 'sync_dir' + +### CLI Option: --monitor | -m +_**Description:**_ This CLI option controls the 'Monitor Mode' operational aspect of the client. When this option is used, the client will perform on-going syncs of data between Microsoft OneDrive and your local system. Local changes will be uploaded in near-realtime, whilst online changes will be downloaded on the next sync process. The frequency of these checks is governed by the 'monitor_interval' value. + +_**Usage Example:**_ `onedrive --monitor` or `onedrive -m` + +### CLI Option: --print-access-token +_**Description:**_ Print the current access token being used to access Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --verbose --verbose --debug-https --print-access-token` + +_**Additional Usage Notes:**_ Do not use this option if you do not know why you are wanting to use it. Be highly cautious of exposing this object. Change your password if you feel that you have inadvertantly exposed this token. + +### CLI Option: --reauth +_**Description:**_ This CLI option controls the ability to re-authenticate your client with Microsoft OneDrive. + +_**Usage Example:**_ `onedrive --reauth` + +### CLI Option: --remove-directory +_**Description:**_ This CLI option allows the user to remove the specified directory path on Microsoft OneDrive without performing a sync. + +_**Usage Example:**_ `onedrive --remove-directory 'path/of/new/folder/structure/to/remove/'` + +_**Additional Usage Notes:**_ The specified path to remove is relative to your configured 'sync_dir'. + +### CLI Option: --single-directory +_**Description:**_ This CLI option controls the applications ability to sync a specific single directory. + +_**Usage Example:**_ `onedrive --sync --single-directory 'Data'` + +_**Additional Usage Notes:**_ The path specified is relative to your configured 'sync_dir' path. If the physical local path 'Folder' to sync is `~/OneDrive/Data/Folder` then the command would be `--single-directory 'Data/Folder'`. + +### CLI Option: --source-directory +_**Description:**_ This CLI option specifies the 'source' portion of moving a file or folder online, without performing a sync operation. + +_**Usage Example:**_ `onedrive --source-directory 'path/as/source/' --destination-directory 'path/as/destination'` + +_**Additional Usage Notes:**_ All specified paths are relative to your configured 'sync_dir'. + +### CLI Option: --sync | -s +_**Description:**_ This CLI option controls the 'Standalone Mode' operational aspect of the client. When this option is used, the client will perform a one-time sync of data between Microsoft OneDrive and your local system. + +_**Usage Example:**_ `onedrive --sync` or `onedrive -s` + +### CLI Option: --verbose | -v+ +_**Description:**_ This CLI option controls the verbosity of the application output. Use the option once, to have normal verbose output, use twice to have debug level application output. + +_**Usage Example:**_ `onedrive --sync --verbose` or `onedrive --monitor --verbose` + +### CLI Option: --with-editing-perms +_**Description:**_ This CLI option enables the creation of a writable shareable file link that can be provided to users to access the file that is stored on Microsoft OneDrive. This option can only be used in conjunction with `--create-share-link` + +_**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.txt' --with-editing-perms` + +_**Additional Usage Notes:**_ Placement of `--with-editing-perms` is critical. It *must* be placed after the file path as per the example above. + +## Depreciated Configuration File and CLI Options +The following configuration options are no longer supported + +### min_notify_changes +_**Description:**_ Minimum number of pending incoming changes necessary to trigger a GUI desktop notification. + +_**Depreciated Config Example:**_ `min_notify_changes = "50"` + +_**Depreciated CLI Option:**_ `--min-notify-changes '50'` + +_**Reason for depreciation:**_ Application has been totally re-written. When this item was introduced, it was done so to reduce spamming of all events to the GUI desktop. + +### CLI Option: --synchronize +_**Description:**_ Perform a synchronisation with Microsoft OneDrive + +_**Depreciated CLI Option:**_ `--synchronize` + +_**Reason for depreciation:**_ `--synchronize` has been depreciated in favour of `--sync` or `-s` diff --git a/docs/application-security.md b/docs/application-security.md new file mode 100644 index 000000000..7c22c4f13 --- /dev/null +++ b/docs/application-security.md @@ -0,0 +1,97 @@ +# OneDrive Client for Linux Application Security +This document details the following information: + +* Why is this application an 'unverified publisher'? +* Application Security and Permission Scopes +* How to change Permission Scopes +* How to review your existing application access consent + +## Why is this application an 'unverified publisher'? +Publisher Verification, as per the Microsoft [process](https://learn.microsoft.com/en-us/azure/active-directory/develop/publisher-verification-overview) has actually been configured, and, actually has been verified! + +### Verified Publisher Configuration Evidence +As per the image below, the Azure portal shows that the 'Publisher Domain' has actually been verified: +![confirmed_verified_publisher](./images/confirmed_verified_publisher.jpg) + +* The 'Publisher Domain' is: https://abraunegg.github.io/ +* The required 'Microsoft Identity Association' is: https://abraunegg.github.io/.well-known/microsoft-identity-association.json + +## Application Security and Permission Scopes +There are 2 main components regarding security for this application: +* Azure Application Permissions +* User Authentication Permissions + +Keeping this in mind, security options should follow the security principal of 'least privilege': +> The principle that a security architecture should be designed so that each entity +> is granted the minimum system resources and authorizations that the entity needs +> to perform its function. + +Reference: [https://csrc.nist.gov/glossary/term/least_privilege](https://csrc.nist.gov/glossary/term/least_privilege) + +As such, the following API permissions are used by default: + +### Default Azure Application Permissions + +| API / Permissions name | Type | Description | Admin consent required | +|---|---|---|---| +| Files.Read | Delegated | Have read-only access to user files | No | +| Files.Read.All | Delegated | Have read-only access to all files user can access | No | +| Sites.Read.All | Delegated | Have read-only access to all items in all site collections | No | +| offline_access | Delegated | Maintain access to data you have given it access to | No | + +![default_authentication_scopes](./images/default_authentication_scopes.jpg) + +### Default User Authentication Permissions + +When a user authenticates with Microsoft OneDrive, additional account permissions are provided by service to give the user specific access to their data. These are delegated permissions provided by the platform: + +| API / Permissions name | Type | Description | Admin consent required | +|---|---|---|---| +| Files.ReadWrite | Delegated | Have full access to user files | No | +| Files.ReadWrite.All | Delegated | Have full access to all files user can access | No | +| Sites.ReadWrite.All | Delegated | Have full access to all items in all site collections | No | +| offline_access | Delegated | Maintain access to data you have given it access to | No | + +When these delegated API permissions are combined, these provide the effective authentication scope for the OneDrive Client for Linux to access your data. The resulting effective 'default' permissions will be: + +| API / Permissions name | Type | Description | Admin consent required | +|---|---|---|---| +| Files.ReadWrite | Delegated | Have full access to user files | No | +| Files.ReadWrite.All | Delegated | Have full access to all files user can access | No | +| Sites.ReadWrite.All | Delegated | Have full access to all items in all site collections | No | +| offline_access | Delegated | Maintain access to data you have given it access to | No | + +These 'default' permissions will allow the OneDrive Client for Linux to read, write and delete data associated with your OneDrive Account. + +## Configuring read-only access to your OneDrive data +In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation. + +To change the application to 'read-only' access, add the following to your configuration file: +```text +read_only_auth_scope = "true" +``` +This will change the user authentication scope request to use read-only access. + +**Note:** When changing this value, you *must* re-authenticate the client using the `--reauth` option to utilise the change in authentication scopes. + +When using read-only authentication scopes, the uploading of any data or local change to OneDrive will fail with the following error: +``` +2022-Aug-06 13:16:45.3349625 ERROR: Microsoft OneDrive API returned an error with the following message: +2022-Aug-06 13:16:45.3351661 Error Message: HTTP request returned status code 403 (Forbidden) +2022-Aug-06 13:16:45.3352467 Error Reason: Access denied +2022-Aug-06 13:16:45.3352838 Error Timestamp: 2022-06-12T13:16:45 +2022-Aug-06 13:16:45.3353171 API Request ID: +``` + +As such, it is also advisable for you to add the following to your configuration file so that 'uploads' are prevented: +```text +download_only = "true" +``` + +**Important:** Additionally when using 'read_only_auth_scope' you also will need to remove your existing application access consent otherwise old authentication consent will be valid and will be used. This will mean the application will technically have the consent to upload data. See below on how to remove your prior application consent. + +## Reviewing your existing application access consent + +To review your existing application access consent, you need to access the following URL: https://account.live.com/consent/Manage + +From here, you are able to review what applications have been given what access to your data, and remove application access as required. diff --git a/docs/build-rpm-howto.md b/docs/build-rpm-howto.md new file mode 100644 index 000000000..5439c3668 --- /dev/null +++ b/docs/build-rpm-howto.md @@ -0,0 +1,379 @@ +# RPM Package Build Process +The instuctions below have been tested on the following systems: +* CentOS 7 x86_64 +* CentOS 8 x86_64 + +These instructions should also be applicable for RedHat & Fedora platforms, or any other RedHat RPM based distribution. + +## Prepare Package Development Environment (CentOS 7, 8) +Install the following dependencies on your build system: +```text +sudo yum groupinstall -y 'Development Tools' +sudo yum install -y libcurl-devel +sudo yum install -y sqlite-devel +sudo yum install -y libnotify-devel +sudo yum install -y wget +sudo yum install -y http://downloads.dlang.org/releases/2.x/2.088.0/dmd-2.088.0-0.fedora.x86_64.rpm +mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS} +``` + +## Build RPM from spec file +Build the RPM from the provided spec file: +```text +wget https://github.com/abraunegg/onedrive/archive/refs/tags/v2.4.22.tar.gz -O ~/rpmbuild/SOURCES/v2.4.22.tar.gz +wget https://raw.githubusercontent.com/abraunegg/onedrive/master/contrib/spec/onedrive.spec.in -O ~/rpmbuild/SPECS/onedrive.spec +rpmbuild -ba ~/rpmbuild/SPECS/onedrive.spec +``` + +## RPM Build Example Results +Below are example output results of building, installing and running the RPM package on the respective platforms: + +### CentOS 7 +```text +[alex@localhost ~]$ rpmbuild -ba ~/rpmbuild/SPECS/onedrive.spec +Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.wi6Tdz ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ cd /home/alex/rpmbuild/BUILD ++ rm -rf onedrive-2.4.15 ++ /usr/bin/tar -xf - ++ /usr/bin/gzip -dc /home/alex/rpmbuild/SOURCES/v2.4.15.tar.gz ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd onedrive-2.4.15 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 +Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.dyeEuM ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ cd onedrive-2.4.15 ++ CFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic' ++ export CFLAGS ++ CXXFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic' ++ export CXXFLAGS ++ FFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic -I/usr/lib64/gfortran/modules' ++ export FFLAGS ++ FCFLAGS='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector-strong --param=ssp-buffer-size=4 -grecord-gcc-switches -m64 -mtune=generic -I/usr/lib64/gfortran/modules' ++ export FCFLAGS ++ LDFLAGS='-Wl,-z,relro ' ++ export LDFLAGS ++ '[' 1 == 1 ']' ++ '[' x86_64 == ppc64le ']' +++ find . -name config.guess -o -name config.sub ++ ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info +configure: WARNING: unrecognized options: --disable-dependency-tracking +checking for a BSD-compatible install... /usr/bin/install -c +checking for x86_64-redhat-linux-gnu-pkg-config... no +checking for pkg-config... /usr/bin/pkg-config +checking pkg-config is at least version 0.9.0... yes +checking for dmd... dmd +checking version of D compiler... 2.087.0 +checking for curl... yes +checking for sqlite... yes +configure: creating ./config.status +config.status: creating Makefile +config.status: creating contrib/pacman/PKGBUILD +config.status: creating contrib/spec/onedrive.spec +config.status: creating onedrive.1 +config.status: creating contrib/systemd/onedrive.service +config.status: creating contrib/systemd/onedrive@.service +configure: WARNING: unrecognized options: --disable-dependency-tracking ++ make +if [ -f .git/HEAD ] ; then \ + git describe --tags > version ; \ +else \ + echo v2.4.15 > version ; \ +fi +dmd -w -g -O -J. -L-lcurl -L-lsqlite3 -L-ldl src/config.d src/itemdb.d src/log.d src/main.d src/monitor.d src/onedrive.d src/qxor.d src/selective.d src/sqlite.d src/sync.d src/upload.d src/util.d src/progress.d src/arsd/cgi.d -ofonedrive ++ exit 0 +Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.L3JbHy ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ '[' /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 '!=' / ']' ++ rm -rf /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 +++ dirname /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 ++ mkdir -p /home/alex/rpmbuild/BUILDROOT ++ mkdir /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 ++ cd onedrive-2.4.15 ++ /usr/bin/make install DESTDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 PREFIX=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 +/usr/bin/install -c -D onedrive /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/bin/onedrive +/usr/bin/install -c -D onedrive.1 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/man/man1/onedrive.1 +/usr/bin/install -c -D -m 644 contrib/logrotate/onedrive.logrotate /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/etc/logrotate.d/onedrive +mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive +/usr/bin/install -c -D -m 644 README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive +/usr/bin/install -c -d -m 0755 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/user /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/system +/usr/bin/install -c -m 0644 contrib/systemd/onedrive@.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/system +/usr/bin/install -c -m 0644 contrib/systemd/onedrive.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/lib/systemd/system ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-compress ++ /usr/lib/rpm/redhat/brp-strip /usr/bin/strip ++ /usr/lib/rpm/redhat/brp-strip-comment-note /usr/bin/strip /usr/bin/objdump ++ /usr/lib/rpm/redhat/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile /usr/bin/python 1 ++ /usr/lib/rpm/redhat/brp-python-hardlink ++ /usr/lib/rpm/redhat/brp-java-repack-jars +Processing files: onedrive-2.4.15-1.el7.x86_64 +Executing(%doc): /bin/sh -e /var/tmp/rpm-tmp.cpSXho ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ cd onedrive-2.4.15 ++ DOCDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 ++ export DOCDIR ++ /usr/bin/mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 ++ cp -pr README.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 ++ cp -pr LICENSE /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 ++ cp -pr CHANGELOG.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64/usr/share/doc/onedrive-2.4.15 ++ exit 0 +Provides: config(onedrive) = 2.4.15-1.el7 onedrive = 2.4.15-1.el7 onedrive(x86-64) = 2.4.15-1.el7 +Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1 +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd +Requires: ld-linux-x86-64.so.2()(64bit) ld-linux-x86-64.so.2(GLIBC_2.3)(64bit) libc.so.6()(64bit) libc.so.6(GLIBC_2.14)(64bit) libc.so.6(GLIBC_2.15)(64bit) libc.so.6(GLIBC_2.2.5)(64bit) libc.so.6(GLIBC_2.3.2)(64bit) libc.so.6(GLIBC_2.3.4)(64bit) libc.so.6(GLIBC_2.4)(64bit) libc.so.6(GLIBC_2.6)(64bit) libc.so.6(GLIBC_2.8)(64bit) libc.so.6(GLIBC_2.9)(64bit) libcurl.so.4()(64bit) libdl.so.2()(64bit) libdl.so.2(GLIBC_2.2.5)(64bit) libgcc_s.so.1()(64bit) libgcc_s.so.1(GCC_3.0)(64bit) libgcc_s.so.1(GCC_4.2.0)(64bit) libm.so.6()(64bit) libm.so.6(GLIBC_2.2.5)(64bit) libpthread.so.0()(64bit) libpthread.so.0(GLIBC_2.2.5)(64bit) libpthread.so.0(GLIBC_2.3.2)(64bit) libpthread.so.0(GLIBC_2.3.4)(64bit) librt.so.1()(64bit) librt.so.1(GLIBC_2.2.5)(64bit) libsqlite3.so.0()(64bit) rtld(GNU_HASH) +Checking for unpackaged file(s): /usr/lib/rpm/check-files /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el7.x86_64 +Wrote: /home/alex/rpmbuild/SRPMS/onedrive-2.4.15-1.el7.src.rpm +Wrote: /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm +Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.nWoW33 ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ cd onedrive-2.4.15 ++ exit 0 +[alex@localhost ~]$ sudo yum -y install /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm +Loaded plugins: fastestmirror +Examining /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm: onedrive-2.4.15-1.el7.x86_64 +Marking /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el7.x86_64.rpm to be installed +Resolving Dependencies +--> Running transaction check +---> Package onedrive.x86_64 0:2.4.15-1.el7 will be installed +--> Finished Dependency Resolution + +Dependencies Resolved + +============================================================================================================================================================================================== + Package Arch Version Repository Size +============================================================================================================================================================================================== +Installing: + onedrive x86_64 2.4.15-1.el7 /onedrive-2.4.15-1.el7.x86_64 7.2 M + +Transaction Summary +============================================================================================================================================================================================== +Install 1 Package + +Total size: 7.2 M +Installed size: 7.2 M +Downloading packages: +Running transaction check +Running transaction test +Transaction test succeeded +Running transaction + Installing : onedrive-2.4.15-1.el7.x86_64 1/1 + Verifying : onedrive-2.4.15-1.el7.x86_64 1/1 + +Installed: + onedrive.x86_64 0:2.4.15-1.el7 + +Complete! +[alex@localhost ~]$ which onedrive +/usr/bin/onedrive +[alex@localhost ~]$ onedrive --version +onedrive v2.4.15 +[alex@localhost ~]$ onedrive --display-config +onedrive version = v2.4.15 +Config path = /home/alex/.config/onedrive +Config file found in config path = false +Config option 'check_nosync' = false +Config option 'sync_dir' = /home/alex/OneDrive +Config option 'skip_dir' = +Config option 'skip_file' = ~*|.~*|*.tmp +Config option 'skip_dotfiles' = false +Config option 'skip_symlinks' = false +Config option 'monitor_interval' = 300 +Config option 'min_notify_changes' = 5 +Config option 'log_dir' = /var/log/onedrive/ +Config option 'classify_as_big_delete' = 1000 +Config option 'upload_only' = false +Config option 'no_remote_delete' = false +Config option 'remove_source_files' = false +Config option 'sync_root_files' = false +Selective sync 'sync_list' configured = false +Business Shared Folders configured = false +[alex@localhost ~]$ +``` + +### CentOS 8 +```text +[alex@localhost ~]$ rpmbuild -ba ~/rpmbuild/SPECS/onedrive.spec +Executing(%prep): /bin/sh -e /var/tmp/rpm-tmp.UINFyE ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ cd /home/alex/rpmbuild/BUILD ++ rm -rf onedrive-2.4.15 ++ /usr/bin/gzip -dc /home/alex/rpmbuild/SOURCES/v2.4.15.tar.gz ++ /usr/bin/tar -xof - ++ STATUS=0 ++ '[' 0 -ne 0 ']' ++ cd onedrive-2.4.15 ++ /usr/bin/chmod -Rf a+rX,u+w,g-w,o-w . ++ exit 0 +Executing(%build): /bin/sh -e /var/tmp/rpm-tmp.cX1WQa ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ cd onedrive-2.4.15 ++ CFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection' ++ export CFLAGS ++ CXXFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection' ++ export CXXFLAGS ++ FFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules' ++ export FFLAGS ++ FCFLAGS='-O2 -g -pipe -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -fexceptions -fstack-protector-strong -grecord-gcc-switches -specs=/usr/lib/rpm/redhat/redhat-hardened-cc1 -specs=/usr/lib/rpm/redhat/redhat-annobin-cc1 -m64 -mtune=generic -fasynchronous-unwind-tables -fstack-clash-protection -fcf-protection -I/usr/lib64/gfortran/modules' ++ export FCFLAGS ++ LDFLAGS='-Wl,-z,relro -Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld' ++ export LDFLAGS ++ '[' 1 = 1 ']' ++++ dirname ./configure +++ find . -name config.guess -o -name config.sub ++ '[' 1 = 1 ']' ++ '[' x '!=' 'x-Wl,-z,now -specs=/usr/lib/rpm/redhat/redhat-hardened-ld' ']' +++ find . -name ltmain.sh ++ ./configure --build=x86_64-redhat-linux-gnu --host=x86_64-redhat-linux-gnu --program-prefix= --disable-dependency-tracking --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --sysconfdir=/etc --datadir=/usr/share --includedir=/usr/include --libdir=/usr/lib64 --libexecdir=/usr/libexec --localstatedir=/var --sharedstatedir=/var/lib --mandir=/usr/share/man --infodir=/usr/share/info +configure: WARNING: unrecognized options: --disable-dependency-tracking +checking for a BSD-compatible install... /usr/bin/install -c +checking for x86_64-redhat-linux-gnu-pkg-config... /usr/bin/x86_64-redhat-linux-gnu-pkg-config +checking pkg-config is at least version 0.9.0... yes +checking for dmd... dmd +checking version of D compiler... 2.087.0 +checking for curl... yes +checking for sqlite... yes +configure: creating ./config.status +config.status: creating Makefile +config.status: creating contrib/pacman/PKGBUILD +config.status: creating contrib/spec/onedrive.spec +config.status: creating onedrive.1 +config.status: creating contrib/systemd/onedrive.service +config.status: creating contrib/systemd/onedrive@.service +configure: WARNING: unrecognized options: --disable-dependency-tracking ++ make +if [ -f .git/HEAD ] ; then \ + git describe --tags > version ; \ +else \ + echo v2.4.15 > version ; \ +fi +dmd -w -g -O -J. -L-lcurl -L-lsqlite3 -L-ldl src/config.d src/itemdb.d src/log.d src/main.d src/monitor.d src/onedrive.d src/qxor.d src/selective.d src/sqlite.d src/sync.d src/upload.d src/util.d src/progress.d src/arsd/cgi.d -ofonedrive ++ exit 0 +Executing(%install): /bin/sh -e /var/tmp/rpm-tmp.dNFPdx ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ '[' /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 '!=' / ']' ++ rm -rf /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 +++ dirname /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 ++ mkdir -p /home/alex/rpmbuild/BUILDROOT ++ mkdir /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 ++ cd onedrive-2.4.15 ++ /usr/bin/make install DESTDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 'INSTALL=/usr/bin/install -p' PREFIX=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 +/usr/bin/install -p -D onedrive /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/bin/onedrive +/usr/bin/install -p -D onedrive.1 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/man/man1/onedrive.1 +/usr/bin/install -p -D -m 644 contrib/logrotate/onedrive.logrotate /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/etc/logrotate.d/onedrive +mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive +/usr/bin/install -p -D -m 644 README.md config LICENSE CHANGELOG.md docs/Docker.md docs/INSTALL.md docs/SharePoint-Shared-Libraries.md docs/USAGE.md docs/BusinessSharedFolders.md docs/advanced-usage.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive +/usr/bin/install -p -d -m 0755 /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/user /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/system +/usr/bin/install -p -m 0644 contrib/systemd/onedrive@.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/system +/usr/bin/install -p -m 0644 contrib/systemd/onedrive.service /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/lib/systemd/system ++ /usr/lib/rpm/check-buildroot ++ /usr/lib/rpm/redhat/brp-ldconfig +/sbin/ldconfig: Warning: ignoring configuration file that cannot be opened: /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/etc/ld.so.conf: No such file or directory ++ /usr/lib/rpm/brp-compress ++ /usr/lib/rpm/brp-strip /usr/bin/strip ++ /usr/lib/rpm/brp-strip-comment-note /usr/bin/strip /usr/bin/objdump ++ /usr/lib/rpm/brp-strip-static-archive /usr/bin/strip ++ /usr/lib/rpm/brp-python-bytecompile 1 ++ /usr/lib/rpm/brp-python-hardlink ++ PYTHON3=/usr/libexec/platform-python ++ /usr/lib/rpm/redhat/brp-mangle-shebangs +Processing files: onedrive-2.4.15-1.el8.x86_64 +Executing(%doc): /bin/sh -e /var/tmp/rpm-tmp.TnFKbZ ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ cd onedrive-2.4.15 ++ DOCDIR=/home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive ++ export LC_ALL=C ++ LC_ALL=C ++ export DOCDIR ++ /usr/bin/mkdir -p /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive ++ cp -pr README.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive ++ cp -pr LICENSE /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive ++ cp -pr CHANGELOG.md /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64/usr/share/doc/onedrive ++ exit 0 +warning: File listed twice: /usr/share/doc/onedrive +warning: File listed twice: /usr/share/doc/onedrive/CHANGELOG.md +warning: File listed twice: /usr/share/doc/onedrive/LICENSE +warning: File listed twice: /usr/share/doc/onedrive/README.md +Provides: config(onedrive) = 2.4.15-1.el8 onedrive = 2.4.15-1.el8 onedrive(x86-64) = 2.4.15-1.el8 +Requires(rpmlib): rpmlib(CompressedFileNames) <= 3.0.4-1 rpmlib(FileDigests) <= 4.6.0-1 rpmlib(PayloadFilesHavePrefix) <= 4.0-1 +Requires(post): systemd +Requires(preun): systemd +Requires(postun): systemd +Requires: ld-linux-x86-64.so.2()(64bit) ld-linux-x86-64.so.2(GLIBC_2.3)(64bit) libc.so.6()(64bit) libc.so.6(GLIBC_2.14)(64bit) libc.so.6(GLIBC_2.15)(64bit) libc.so.6(GLIBC_2.2.5)(64bit) libc.so.6(GLIBC_2.3.2)(64bit) libc.so.6(GLIBC_2.3.4)(64bit) libc.so.6(GLIBC_2.4)(64bit) libc.so.6(GLIBC_2.6)(64bit) libc.so.6(GLIBC_2.8)(64bit) libc.so.6(GLIBC_2.9)(64bit) libcurl.so.4()(64bit) libdl.so.2()(64bit) libdl.so.2(GLIBC_2.2.5)(64bit) libgcc_s.so.1()(64bit) libgcc_s.so.1(GCC_3.0)(64bit) libgcc_s.so.1(GCC_4.2.0)(64bit) libm.so.6()(64bit) libm.so.6(GLIBC_2.2.5)(64bit) libpthread.so.0()(64bit) libpthread.so.0(GLIBC_2.2.5)(64bit) libpthread.so.0(GLIBC_2.3.2)(64bit) libpthread.so.0(GLIBC_2.3.4)(64bit) librt.so.1()(64bit) librt.so.1(GLIBC_2.2.5)(64bit) libsqlite3.so.0()(64bit) rtld(GNU_HASH) +Checking for unpackaged file(s): /usr/lib/rpm/check-files /home/alex/rpmbuild/BUILDROOT/onedrive-2.4.15-1.el8.x86_64 +Wrote: /home/alex/rpmbuild/SRPMS/onedrive-2.4.15-1.el8.src.rpm +Wrote: /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el8.x86_64.rpm +Executing(%clean): /bin/sh -e /var/tmp/rpm-tmp.FAMTFz ++ umask 022 ++ cd /home/alex/rpmbuild/BUILD ++ cd onedrive-2.4.15 ++ exit 0 +[alex@localhost ~]$ sudo yum -y install /home/alex/rpmbuild/RPMS/x86_64/onedrive-2.4.15-1.el8.x86_64.rpm +Last metadata expiration check: 0:04:07 ago on Fri 14 Jan 2022 14:22:13 EST. +Dependencies resolved. +============================================================================================================================================================================================== + Package Architecture Version Repository Size +============================================================================================================================================================================================== +Installing: + onedrive x86_64 2.4.15-1.el8 @commandline 1.5 M + +Transaction Summary +============================================================================================================================================================================================== +Install 1 Package + +Total size: 1.5 M +Installed size: 7.1 M +Downloading Packages: +Running transaction check +Transaction check succeeded. +Running transaction test +Transaction test succeeded. +Running transaction + Preparing : 1/1 + Installing : onedrive-2.4.15-1.el8.x86_64 1/1 + Running scriptlet: onedrive-2.4.15-1.el8.x86_64 1/1 + Verifying : onedrive-2.4.15-1.el8.x86_64 1/1 + +Installed: + onedrive-2.4.15-1.el8.x86_64 + +Complete! +[alex@localhost ~]$ which onedrive +/usr/bin/onedrive +[alex@localhost ~]$ onedrive --version +onedrive v2.4.15 +[alex@localhost ~]$ onedrive --display-config +onedrive version = v2.4.15 +Config path = /home/alex/.config/onedrive +Config file found in config path = false +Config option 'check_nosync' = false +Config option 'sync_dir' = /home/alex/OneDrive +Config option 'skip_dir' = +Config option 'skip_file' = ~*|.~*|*.tmp +Config option 'skip_dotfiles' = false +Config option 'skip_symlinks' = false +Config option 'monitor_interval' = 300 +Config option 'min_notify_changes' = 5 +Config option 'log_dir' = /var/log/onedrive/ +Config option 'classify_as_big_delete' = 1000 +Config option 'upload_only' = false +Config option 'no_remote_delete' = false +Config option 'remove_source_files' = false +Config option 'sync_root_files' = false +Selective sync 'sync_list' configured = false +Business Shared Folders configured = false +[alex@localhost ~]$ +``` diff --git a/docs/business-shared-folders.md b/docs/business-shared-folders.md new file mode 100644 index 000000000..4282f4ac6 --- /dev/null +++ b/docs/business-shared-folders.md @@ -0,0 +1,40 @@ +# How to configure OneDrive Business Shared Folder Sync +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Important Note +This feature has been 100% re-written from v2.5.0 onwards. A pre-requesite before using this capability in v2.5.0 and above is for you to revert any Shared Business Folder configuration you may be currently using, including, but not limited to: +* Removing `sync_business_shared_folders = "true|false"` from your 'config' file +* Removing the 'business_shared_folders' file +* Removing any local data | shared folder data from your configured 'sync_dir' to ensure that there are no conflicts or issues. + +## Process Overview +Syncing OneDrive Business Shared Folders requires additional configuration for your 'onedrive' client: +1. From the OneDrive web interface, review the 'Shared' objects that have been shared with you. +2. Select the applicable folder, and click the 'Add shortcut to My files', which will then add this to your 'My files' folder +3. Update your OneDrive Client for Linux 'config' file to enable the feature by adding `sync_business_shared_items = "true"`. Adding this option will trigger a `--resync` requirement. +4. Test the configuration using '--dry-run' +5. Remove the use of '--dry-run' and sync the OneDrive Business Shared folders as required + + +**NOTE:** This documentation will be updated as this feature progresses. + + +### Enable syncing of OneDrive Business Shared Folders via config file +```text +sync_business_shared_items = "true" +``` + +### Disable syncing of OneDrive Business Shared Folders via config file +```text +sync_business_shared_items = "false" +``` + +## Known Issues +Shared folders, shared with you from people outside of your 'organisation' are unable to be synced. This is due to the Microsoft Graph API not presenting these folders. + +Shared folders that match this scenario, when you view 'Shared' via OneDrive online, will have a 'world' symbol as per below: + +![shared_with_me](./images/shared_with_me.JPG) + +This issue is being tracked by: [#966](https://github.com/abraunegg/onedrive/issues/966) diff --git a/docs/docker.md b/docs/docker.md new file mode 100644 index 000000000..1bf6251ff --- /dev/null +++ b/docs/docker.md @@ -0,0 +1,397 @@ +# Run the OneDrive Client for Linux under Docker +This client can be run as a Docker container, with 3 available container base options for you to choose from: + +| Container Base | Docker Tag | Description | i686 | x86_64 | ARMHF | AARCH64 | +|----------------|-------------|----------------------------------------------------------------|:------:|:------:|:-----:|:-------:| +| Alpine Linux | edge-alpine | Docker container based on Alpine 3.18 using 'master' |❌|✔|❌|✔| +| Alpine Linux | alpine | Docker container based on Alpine 3.18 using latest release |❌|✔|❌|✔| +| Debian | debian | Docker container based on Debian Stable using latest release |✔|✔|✔|✔| +| Debian | edge | Docker container based on Debian Stable using 'master' |✔|✔|✔|✔| +| Debian | edge-debian | Docker container based on Debian Stable using 'master' |✔|✔|✔|✔| +| Debian | latest | Docker container based on Debian Stable using latest release |✔|✔|✔|✔| +| Fedora | edge-fedora | Docker container based on Fedora 38 using 'master' |❌|✔|❌|✔| +| Fedora | fedora | Docker container based on Fedora 38 using latest release |❌|✔|❌|✔| + +These containers offer a simple monitoring-mode service for the OneDrive Client for Linux. + +The instructions below have been validated on: +* Fedora 38 + +The instructions below will utilise the 'edge' tag, however this can be substituted for any of the other docker tags such as 'latest' from the table above if desired. + +The 'edge' Docker Container will align closer to all documentation and features, where as 'latest' is the release version from a static point in time. The 'latest' tag however may contain bugs and/or issues that will have been fixed, and those fixes are contained in 'edge'. + +Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in. + +**Note:** The below instructions for docker has been tested and validated when logging into the system as an unprivileged user (non 'root' user). + +## High Level Configuration Steps +1. Install 'docker' as per your distribution platform's instructions if not already installed. +2. Configure 'docker' to allow non-privileged users to run Docker commands +3. Disable 'SELinux' as per your distribution platform's instructions +4. Test 'docker' by running a test container without using `sudo` +5. Prepare the required docker volumes to store the configuration and data +6. Run the 'onedrive' container and perform authorisation +7. Running the 'onedrive' container under 'docker' + +## Configuration Steps + +### 1. Install 'docker' on your platform +Install 'docker' as per your distribution platform's instructions if not already installed. + +### 2. Configure 'docker' to allow non-privileged users to run Docker commands +Read https://docs.docker.com/engine/install/linux-postinstall/ to configure the 'docker' user group with your user account to allow your non 'root' user to run 'docker' commands. + +### 3. Disable SELinux on your platform +In order to run the Docker container, SELinux must be disabled. Without doing this, when the application is authenticated in the steps below, the following error will be presented: +```text +ERROR: The local file system returned an error with the following message: + Error Message: /onedrive/conf/refresh_token: Permission denied + +The database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3 +``` +The only known work-around for the above problem at present is to disable SELinux. Please refer to your distribution platform's instructions on how to perform this step. + +* Fedora: https://docs.fedoraproject.org/en-US/quick-docs/selinux-changing-states-and-modes/#_disabling_selinux +* Red Hat Enterprise Linux: https://access.redhat.com/solutions/3176 + +Post disabling SELinux and reboot your system, confirm that `getenforce` returns `Disabled`: +```text +$ getenforce +Disabled +``` + +If you are still experiencing permission issues despite disabling SELinux, please read https://www.redhat.com/sysadmin/container-permission-denied-errors + +### 4. Test 'docker' on your platform +Ensure that 'docker' is running as a system service, and is enabled to be activated on system reboot: +```bash +sudo systemctl enable --now docker +``` + +Test that 'docker' is operational for your 'non-root' user, as per below: +```bash +[alex@fedora-38-docker-host ~]$ docker run hello-world +Unable to find image 'hello-world:latest' locally +latest: Pulling from library/hello-world +719385e32844: Pull complete +Digest: sha256:88ec0acaa3ec199d3b7eaf73588f4518c25f9d34f58ce9a0df68429c5af48e8d +Status: Downloaded newer image for hello-world:latest + +Hello from Docker! +This message shows that your installation appears to be working correctly. + +To generate this message, Docker took the following steps: + 1. The Docker client contacted the Docker daemon. + 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. + (amd64) + 3. The Docker daemon created a new container from that image which runs the + executable that produces the output you are currently reading. + 4. The Docker daemon streamed that output to the Docker client, which sent it + to your terminal. + +To try something more ambitious, you can run an Ubuntu container with: + $ docker run -it ubuntu bash + +Share images, automate workflows, and more with a free Docker ID: + https://hub.docker.com/ + +For more examples and ideas, visit: + https://docs.docker.com/get-started/ + +[alex@fedora-38-docker-host ~]$ +``` + +### 5. Configure the required docker volumes +The 'onedrive' Docker container requires 2 docker volumes to operate: +* Config Volume +* Data Volume + +The first volume is the configuration volume that stores all the applicable application configuration + current runtime state. In a non-containerised environment, this normally resides in `~/.config/onedrive` - in a containerised environment this is stored in the volume tagged as `/onedrive/conf` + +The second volume is the data volume, where all your data from Microsoft OneDrive is stored locally. This volume is mapped to an actual directory point on your local filesystem and this is stored in the volume tagged as `/onedrive/data` + +#### 5.1 Prepare the 'config' volume +Create the 'config' volume with the following command: +```bash +docker volume create onedrive_conf +``` + +This will create a docker volume labeled `onedrive_conf`, where all configuration of your onedrive account will be stored. You can add a custom config file in this location at a later point in time if required. + +#### 5.2 Prepare the 'data' volume +Create the 'data' volume with the following command: +```bash +docker volume create onedrive_data +``` + +This will create a docker volume labeled `onedrive_data` and will map to a path on your local filesystem. This is where your data from Microsoft OneDrive will be stored. Keep in mind that: + +* The owner of this specified folder must not be root +* The owner of this specified folder must have permissions for its parent directory +* Docker will attempt to change the permissions of the volume to the user the container is configured to run as + +**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Docker container will fail to start with the following error message: +```bash +ROOT level privileges prohibited! +``` + +### 6. First run of Docker container under docker and performing authorisation +The 'onedrive' client within the container first needs to be authorised with your Microsoft account. This is achieved by initially running docker in interactive mode. + +Run the docker image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`). + +**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the docker container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the docker volume mapping to occur. + +It is also a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`). The script below will use `id` to evaluate your system environment to use the correct values. +```bash +export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" +export ONEDRIVE_UID=`id -u` +export ONEDRIVE_GID=`id -g` +mkdir -p ${ONEDRIVE_DATA_DIR} +docker run -it --name onedrive -v onedrive_conf:/onedrive/conf \ + -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" \ + -e "ONEDRIVE_UID=${ONEDRIVE_UID}" \ + -e "ONEDRIVE_GID=${ONEDRIVE_GID}" \ + driveone/onedrive:edge +``` + +When the Docker container successfully starts: +* You will be asked to open a specific link using your web browser +* Login to your Microsoft Account and give the application the permission +* After giving the permission, you will be redirected to a blank page +* Copy the URI of the blank page into the application prompt to authorise the application + +Once the 'onedrive' application is authorised, the client will automatically start monitoring your `ONEDRIVE_DATA_DIR` for data changes to be uploaded to OneDrive. Files stored on OneDrive will be downloaded to this location. + +If the client is working as expected, you can detach from the container with Ctrl+p, Ctrl+q. + +### 7. Running the 'onedrive' container under 'docker' + +#### 7.1 Check if the monitor service is running +```bash +docker ps -f name=onedrive +``` + +#### 7.2 Show 'onedrive' runtime logs +```bash +docker logs onedrive +``` + +#### 7.3 Stop running 'onedrive' container +```bash +docker stop onedrive +``` + +#### 7.4 Start 'onedrive' container +```bash +docker start onedrive +``` + +#### 7.5 Remove 'onedrive' container +```bash +docker rm -f onedrive +``` + +## Advanced Usage + +### How to use Docker-compose +You can utilise `docker-compose` if available on your platform if you are able to use docker compose schemas > 3. + +In the following example it is assumed you have a `ONEDRIVE_DATA_DIR` environment variable and have already created the `onedrive_conf` volume. + +You can also use docker bind mounts for the configuration folder, e.g. `export ONEDRIVE_CONF="${HOME}/OneDriveConfig"`. + +``` +version: "3" +services: + onedrive: + image: driveone/onedrive:edge + restart: unless-stopped + environment: + - ONEDRIVE_UID=${PUID} + - ONEDRIVE_GID=${PGID} + volumes: + - onedrive_conf:/onedrive/conf + - ${ONEDRIVE_DATA_DIR}:/onedrive/data +``` + +Note that you still have to perform step 3: First Run. + +### Editing the running configuration and using a 'config' file +The 'onedrive' client should run in default configuration, however you can change this default configuration by placing a custom config file in the `onedrive_conf` docker volume. First download the default config from [here](https://raw.githubusercontent.com/abraunegg/onedrive/master/config) +Then put it into your onedrive_conf volume path, which can be found with: + +```bash +docker volume inspect onedrive_conf +``` + +Or you can map your own config folder to the config volume. Make sure to copy all files from the docker volume into your mapped folder first. + +The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) + +### Syncing multiple accounts +There are many ways to do this, the easiest is probably to do the following: +1. Create a second docker config volume (replace `Work` with your desired name): `docker volume create onedrive_conf_Work` +2. And start a second docker monitor container (again replace `Work` with your desired name): +``` +export ONEDRIVE_DATA_DIR_WORK="/home/abraunegg/OneDriveWork" +mkdir -p ${ONEDRIVE_DATA_DIR_WORK} +docker run -it --restart unless-stopped --name onedrive_Work -v onedrive_conf_Work:/onedrive/conf -v "${ONEDRIVE_DATA_DIR_WORK}:/onedrive/data" driveone/onedrive:edge +``` + +### Run or update the Docker container with one script +If you are experienced with docker and onedrive, you can use the following script: + +```bash +# Update ONEDRIVE_DATA_DIR with correct OneDrive directory path +ONEDRIVE_DATA_DIR="${HOME}/OneDrive" +# Create directory if non-existant +mkdir -p ${ONEDRIVE_DATA_DIR} + +firstRun='-d' +docker pull driveone/onedrive:edge +docker inspect onedrive_conf > /dev/null 2>&1 || { docker volume create onedrive_conf; firstRun='-it'; } +docker inspect onedrive > /dev/null 2>&1 && docker rm -f onedrive +docker run $firstRun --restart unless-stopped --name onedrive -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge +``` + +## Supported Docker Environment Variables +| Variable | Purpose | Sample Value | +| ---------------- | --------------------------------------------------- |:--------------------------------------------------------------------------------------------------------------------------------:| +| ONEDRIVE_UID | UserID (UID) to run as | 1000 | +| ONEDRIVE_GID | GroupID (GID) to run as | 1000 | +| ONEDRIVE_VERBOSE | Controls "--verbose" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_DEBUG | Controls "--verbose --verbose" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_DEBUG_HTTPS | Controls "--debug-https" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_RESYNC | Controls "--resync" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_DOWNLOADONLY | Controls "--download-only" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_UPLOADONLY | Controls "--upload-only" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_NOREMOTEDELETE | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | +| ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | +| ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | +| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) | +| ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | +| ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | + +### Environment Variables Usage Examples +**Verbose Output:** +```bash +docker container run -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge +``` +**Debug Output:** +```bash +docker container run -e ONEDRIVE_DEBUG=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge +``` +**Perform a --resync:** +```bash +docker container run -e ONEDRIVE_RESYNC=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge +``` +**Perform a --resync and --verbose:** +```bash +docker container run -e ONEDRIVE_RESYNC=1 -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge +``` +**Perform a --logout and re-authenticate:** +```bash +docker container run -it -e ONEDRIVE_LOGOUT=1 -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" driveone/onedrive:edge +``` + +## Building a custom Docker image + +### Build Environment Requirements +* Build environment must have at least 1GB of memory & 2GB swap space + +You can validate your build environment memory status with the following command: +```text +cat /proc/meminfo | grep -E 'MemFree|Swap' +``` +This should result in the following similar output: +```text +MemFree: 3704644 kB +SwapCached: 0 kB +SwapTotal: 8117244 kB +SwapFree: 8117244 kB +``` + +If you do not have enough swap space, you can use the following script to dynamically allocate a swapfile for building the Docker container: + +```bash +cd /var +sudo fallocate -l 1.5G swapfile +sudo chmod 600 swapfile +sudo mkswap swapfile +sudo swapon swapfile +# make swap permanent +sudo nano /etc/fstab +# add "/swapfile swap swap defaults 0 0" at the end of file +# check it has been assigned +swapon -s +free -h +``` + +If you are running a Raspberry Pi, you will need to edit your system configuration to increase your swapfile: + +* Modify the file `/etc/dphys-swapfile` and edit the `CONF_SWAPSIZE`, for example: `CONF_SWAPSIZE=2048`. + +A reboot of your Raspberry Pi is required to make this change effective. + +### Building and running a custom Docker image +You can also build your own image instead of pulling the one from [hub.docker.com](https://hub.docker.com/r/driveone/onedrive): +```bash +git clone https://github.com/abraunegg/onedrive +cd onedrive +docker build . -t local-onedrive -f contrib/docker/Dockerfile +docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive:latest +``` + +There are alternate, smaller images available by using `Dockerfile-debian` or `Dockerfile-alpine`. These [multi-stage builder pattern](https://docs.docker.com/develop/develop-images/multistage-build/) Dockerfiles require Docker version at least 17.05. + +### How to build and run a custom Docker image based on Debian +``` bash +docker build . -t local-ondrive-debian -f contrib/docker/Dockerfile-debian +docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-ondrive-debian:latest +``` + +### How to build and run a custom Docker image based on Alpine Linux +``` bash +docker build . -t local-ondrive-alpine -f contrib/docker/Dockerfile-alpine +docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-ondrive-alpine:latest +``` + +### How to build and run a custom Docker image for ARMHF (Raspberry Pi) +Compatible with: +* Raspberry Pi +* Raspberry Pi 2 +* Raspberry Pi Zero +* Raspberry Pi 3 +* Raspberry Pi 4 +``` bash +docker build . -t local-onedrive-armhf -f contrib/docker/Dockerfile-debian +docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive-armhf:latest +``` + +### How to build and run a custom Docker image for AARCH64 Platforms +``` bash +docker build . -t local-onedrive-aarch64 -f contrib/docker/Dockerfile-debian +docker container run -v onedrive_conf:/onedrive/conf -v "${ONEDRIVE_DATA_DIR}:/onedrive/data" local-onedrive-aarch64:latest +``` +### How to support double-byte languages +In some geographic regions, you may need to change and/or update the locale specification of the Docker container to better support the local language used for your local filesystem. To do this, follow the example below: +``` +FROM driveone/onedrive + +ENV DEBIAN_FRONTEND noninteractive + +RUN apt-get update +RUN apt-get install -y locales + +RUN echo "ja_JP.UTF-8 UTF-8" > /etc/locale.gen && \ + locale-gen ja_JP.UTF-8 && \ + dpkg-reconfigure locales && \ + /usr/sbin/update-locale LANG=ja_JP.UTF-8 + +ENV LC_ALL ja_JP.UTF-8 +``` +The above example changes the Docker container to support Japanese. To support your local language, change `ja_JP.UTF-8` to the required entry. \ No newline at end of file diff --git a/docs/install.md b/docs/install.md new file mode 100644 index 000000000..f5338122d --- /dev/null +++ b/docs/install.md @@ -0,0 +1,282 @@ +# Installing or Upgrading using Distribution Packages or Building the OneDrive Client for Linux from source + +## Installing or Upgrading using Distribution Packages +This project has been packaged for the following Linux distributions as per below. The current client release is: [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) + +Only the current release version or greater is supported. Earlier versions are not supported and should not be installed or used. + +#### Important Note: +Distribution packages may be of an older release when compared to the latest release that is [available](https://github.com/abraunegg/onedrive/releases). If any package version indicator below is 'red' for your distribution, it is recommended that you build from source. Do not install the software from the available distribution package. If a package is out of date, please contact the package maintainer for resolution. + +| Distribution | Package Name & Package Link |   PKG_Version   |  i686  | x86_64 | ARMHF | AARCH64 | Extra Details | +|---------------------------------|------------------------------------------------------------------------------|:---------------:|:----:|:------:|:-----:|:-------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Alpine Linux | [onedrive](https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge) |Alpine Linux Edge package|❌|✔|❌|✔ | | +| Arch Linux

Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |AUR package|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)

**Note:** You must first install 'base-devel' as this is a pre-requisite for using the AUR

**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'

**Note:** System must have at least 1GB of memory & 1GB swap space +| Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |Debian 11 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |Debian 12 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Debian Sid | [onedrive](https://packages.debian.org/sid/onedrive) |Debian Sid package|✔|✔|✔|✔| | +| Fedora | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |Fedora Rawhide package|✔|✔|✔|✔| | +| Gentoo | [onedrive](https://gpo.zugaina.org/net-misc/onedrive) | No API Available |✔|✔|❌|❌| | +| Homebrew | [onedrive](https://formulae.brew.sh/formula/onedrive) | Homebrew package |❌|✔|❌|❌| | +| Linux Mint 20.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories

It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Linux Mint 21.x | [onedrive](https://community.linuxmint.com/software/view/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Linux Mint Repositories

It is recommended that for Linux Mint that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| NixOS | [onedrive](https://search.nixos.org/packages?channel=20.09&from=0&size=50&sort=relevance&query=onedrive)|nixpkgs unstable package|❌|✔|❌|❌| Use package `onedrive` either by adding it to `configuration.nix` or by using the command `nix-env -iA .onedrive`. This does not install a service. To install a service, use unstable channel (will stabilize in 20.09) and add `services.onedrive.enable=true` in `configuration.nix`. You can also add a custom package using the `services.onedrive.package` option (recommended since package lags upstream). Enabling the service installs a default package too (based on the channel). You can also add multiple onedrive accounts trivially, see [documentation](https://github.com/NixOS/nixpkgs/pull/77734#issuecomment-575874225). | +| OpenSuSE | [onedrive](https://software.opensuse.org/package/onedrive) |openSUSE Tumbleweed package|✔|✔|❌|❌| | +| OpenSuSE Build Service | [onedrive](https://build.opensuse.org/package/show/home:npreining:debian-ubuntu-onedrive/onedrive) | No API Available |✔|✔|✔|✔| Package Build Service for Debian and Ubuntu | +| Raspbian | [onedrive](https://archive.raspbian.org/raspbian/pool/main/o/onedrive/) |Raspbian Stable package |❌|❌|✔|✔| **Note:** Do not install from Raspbian Package Repositories

It is recommended that for Raspbian that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | +| Slackware | [onedrive](https://slackbuilds.org/result/?search=onedrive&sv=) |SlackBuilds package|✔|✔|❌|❌| | +| Solus | [onedrive](https://dev.getsol.us/search/query/FB7PIf1jG9Z9/#R) |Solus package|✔|✔|❌|❌| | +| Ubuntu 20.04 | [onedrive](https://packages.ubuntu.com/focal/onedrive) |Ubuntu 20.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Ubuntu 22.04 | [onedrive](https://packages.ubuntu.com/jammy/onedrive) |Ubuntu 22.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Ubuntu 23.04 | [onedrive](https://packages.ubuntu.com/lunar/onedrive) |Ubuntu 23.04 package |❌|✔|✔|✔| **Note:** Do not install from Ubuntu Universe

It is recommended that for Ubuntu that you install from OpenSuSE Build Service using the Ubuntu Package Install [Instructions](ubuntu-package-install.md) | +| Void Linux | [onedrive](https://voidlinux.org/packages/?arch=x86_64&q=onedrive) |Void Linux x86_64 package|✔|✔|❌|❌| | + +#### Important information for all Ubuntu and Ubuntu based distribution users: +This information is specifically for the following platforms and distributions: +* Ubuntu +* Lubuntu +* Linux Mint +* POP OS +* Peppermint OS + +Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all§ion=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Universe packages are out-of-date and are not supported and should not be used. If you wish to use a package, it is highly recommended that you utilise the [OpenSuSE Build Service](ubuntu-package-install.md) to install packages for these platforms. If the OpenSuSE Build Service does not cater for your version, your only option is to build from source. + +If you wish to change this situation so that you can just use the Universe packages via 'apt install onedrive', consider becoming the Ubuntu package maintainer and contribute back to your community. + +## Building from Source - High Level Requirements +* Build environment must have at least 1GB of memory & 1GB swap space +* Install the required distribution package dependencies +* [libcurl](http://curl.haxx.se/libcurl/) +* [SQLite 3](https://www.sqlite.org/) >= 3.7.15 +* [Digital Mars D Compiler (DMD)](http://dlang.org/download.html) or [LDC – the LLVM-based D Compiler](https://github.com/ldc-developers/ldc) + +**Note:** DMD version >= 2.088.0 or LDC version >= 1.18.0 is required to compile this application + +### Example for installing DMD Compiler +```text +curl -fsS https://dlang.org/install.sh | bash -s dmd +``` + +### Example for installing LDC Compiler +```text +curl -fsS https://dlang.org/install.sh | bash -s ldc +``` + +## Distribution Package Dependencies +### Dependencies: Ubuntu 16.x +Ubuntu Linux 16.x LTS reached the end of its five-year LTS window on April 30th 2021 and is no longer supported. + +### Dependencies: Ubuntu 18.x / Lubuntu 18.x +Ubuntu Linux 18.x LTS reached the end of its five-year LTS window on May 31th 2023 and is no longer supported. + +### Dependencies: Debian 9 +Debian 9 reached the end of its five-year support window on June 30th 2022 and is no longer supported. + +### Dependencies: Ubuntu 20.x -> Ubuntu 23.x / Debian 10 -> Debian 12 - x86_64 +These dependencies are also applicable for all Ubuntu based distributions such as: +* Lubuntu +* Linux Mint +* POP OS +* Peppermint OS +```text +sudo apt install build-essential +sudo apt install libcurl4-openssl-dev libsqlite3-dev pkg-config git curl +curl -fsS https://dlang.org/install.sh | bash -s dmd +``` +For notifications the following is also necessary: +```text +sudo apt install libnotify-dev +``` + +### Dependencies: CentOS 6.x / RHEL 6.x +CentOS 6.x and RHEL 6.x reached End of Life status on November 30th 2020 and is no longer supported. + +### Dependencies: Fedora < Version 18 / CentOS 7.x / RHEL 7.x +```text +sudo yum groupinstall 'Development Tools' +sudo yum install libcurl-devel sqlite-devel +curl -fsS https://dlang.org/install.sh | bash -s dmd-2.099.0 +``` +For notifications the following is also necessary: +```text +sudo yum install libnotify-devel +``` + +### Dependencies: Fedora > Version 18 / CentOS 8.x / RHEL 8.x / RHEL 9.x +```text +sudo dnf groupinstall 'Development Tools' +sudo dnf install libcurl-devel sqlite-devel +curl -fsS https://dlang.org/install.sh | bash -s dmd +``` +For notifications the following is also necessary: +```text +sudo dnf install libnotify-devel +``` + +### Dependencies: Arch Linux & Manjaro Linux +```text +sudo pacman -S make pkg-config curl sqlite ldc +``` +For notifications the following is also necessary: +```text +sudo pacman -S libnotify +``` + +### Dependencies: Raspbian (ARMHF) and Ubuntu 22.x / Debian 11 / Debian 12 / Raspbian (ARM64) +**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later. + +These instructions were validated using: +* `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-armhf-lite) using Raspberry Pi 3B (revision 1.2) +* `Linux raspberrypi 5.10.92-v8+ #1514 SMP PREEMPT Mon Jan 17 17:39:38 GMT 2022 aarch64` (2022-01-28-raspios-bullseye-arm64-lite) using Raspberry Pi 3B (revision 1.2) +* `Linux ubuntu 5.15.0-1005-raspi #5-Ubuntu SMP PREEMPT Mon Apr 4 12:21:48 UTC 2022 aarch64 aarch64 aarch64 GNU/Linux` (ubuntu-22.04-preinstalled-server-arm64+raspi) using Raspberry Pi 3B (revision 1.2) + +**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`. + +```text +sudo apt install build-essential +sudo apt install libcurl4-openssl-dev libsqlite3-dev pkg-config git curl ldc +``` +For notifications the following is also necessary: +```text +sudo apt install libnotify-dev +``` + +### Dependencies: Gentoo +```text +sudo emerge app-portage/layman +sudo layman -a dlang +``` +Add ebuild from contrib/gentoo to a local overlay to use. + +For notifications the following is also necessary: +```text +sudo emerge x11-libs/libnotify +``` + +### Dependencies: OpenSuSE Leap 15.0 +```text +sudo zypper addrepo https://download.opensuse.org/repositories/devel:languages:D/openSUSE_Leap_15.0/devel:languages:D.repo +sudo zypper refresh +sudo zypper install gcc git libcurl-devel sqlite3-devel dmd phobos-devel phobos-devel-static +``` +For notifications the following is also necessary: +```text +sudo zypper install libnotify-devel +``` + +### Dependencies: OpenSuSE Leap 15.1 +```text +sudo zypper addrepo https://download.opensuse.org/repositories/devel:languages:D/openSUSE_Leap_15.1/devel:languages:D.repo +sudo zypper refresh +sudo zypper install gcc git libcurl-devel sqlite3-devel dmd phobos-devel phobos-devel-static +``` +For notifications the following is also necessary: +```text +sudo zypper install libnotify-devel +``` + +### Dependencies: OpenSuSE Leap 15.2 +```text +sudo zypper refresh +sudo zypper install gcc git libcurl-devel sqlite3-devel dmd phobos-devel phobos-devel-static +``` +For notifications the following is also necessary: +```text +sudo zypper install libnotify-devel +``` + +## Compilation & Installation +### High Level Steps +1. Install the platform dependencies for your Linux OS +2. Activate your DMD or LDC compiler +3. Clone the GitHub repository, run configure and make, then install +4. Deactivate your DMD or LDC compiler + +### Building using DMD Reference Compiler +Before cloning and compiling, if you have installed DMD via curl for your OS, you will need to activate DMD as per example below: +```text +Run `source ~/dlang/dmd-2.088.0/activate` in your shell to use dmd-2.088.0. +This will setup PATH, LIBRARY_PATH, LD_LIBRARY_PATH, DMD, DC, and PS1. +Run `deactivate` later on to restore your environment. +``` +Without performing this step, the compilation process will fail. + +**Note:** Depending on your DMD version, substitute `2.088.0` above with your DMD version that is installed. + +```text +git clone https://github.com/abraunegg/onedrive.git +cd onedrive +./configure +make clean; make; +sudo make install +``` + +### Build options +#### GUI Notification Support +GUI notification support can be enabled using the `configure` switch `--enable-notifications`. + +#### systemd service directory customisation support +Systemd service files are installed in the appropriate directories on the system, +as provided by `pkg-config systemd` settings. If the need for overriding the +deduced path are necessary, the two options `--with-systemdsystemunitdir` (for +the Systemd system unit location), and `--with-systemduserunitdir` (for the +Systemd user unit location) can be specified. Passing in `no` to one of these +options disabled service file installation. + +#### Additional Compiler Debug +By passing `--enable-debug` to the `configure` call, `onedrive` gets built with additional debug +information, useful (for example) to get `perf`-issued figures. + +#### Shell Completion Support +By passing `--enable-completions` to the `configure` call, shell completion functions are +installed for `bash`, `zsh` and `fish`. The installation directories are determined +as far as possible automatically, but can be overridden by passing +`--with-bash-completion-dir=`, `--with-zsh-completion-dir=`, and +`--with-fish-completion-dir=` to `configure`. + +### Building using a different compiler (for example [LDC](https://wiki.dlang.org/LDC)) +#### ARMHF Architecture (Raspbian) and ARM64 Architecture (Ubuntu 22.x / Debian 11 / Raspbian) +**Note:** The minimum LDC compiler version required to compile this application is now 1.18.0, which is not available for Debian Buster or distributions based on Debian Buster. You are advised to first upgrade your platform distribution to one that is based on Debian Bullseye (Debian 11) or later. + +**Note:** Build environment must have at least 1GB of memory & 1GB swap space. Check with `swapon`. +```text +git clone https://github.com/abraunegg/onedrive.git +cd onedrive +./configure DC=/usr/bin/ldmd2 +make clean; make +sudo make install +``` + +## Upgrading the client +If you have installed the client from a distribution package, the client will be updated when the distribution package is updated by the package maintainer and will be updated to the new application version when you perform your package update. + +If you have built the client from source, to upgrade your client, it is recommended that you first uninstall your existing 'onedrive' binary (see below), then re-install the client by re-cloning, re-compiling and re-installing the client again to install the new version. + +**Note:** Following the uninstall process will remove all client components including *all* systemd files, including any custom files created for specific access such as SharePoint Libraries. + +You can optionally choose to not perform this uninstallation step, and simply re-install the client by re-cloning, re-compiling and re-installing the client again - however the risk here is that you end up with two onedrive client binaries on your system, and depending on your system search path preferences, this will determine which binary is used. + +**Important:** Before performing any upgrade, it is highly recommended for you to stop any running systemd service if applicable to ensure that these services are restarted using the updated client version. + +Post re-install, to confirm that you have the new version of the client installed, use `onedrive --version` to determine the client version that is now installed. + +## Uninstalling the client +### Uninstalling the client if installed from distribution package +Follow your distribution documentation to uninstall the package that you installed + +### Uninstalling the client if installed and built from source +From within your GitHub repository clone, perform the following to remove the 'onedrive' binary: +```text +sudo make uninstall +``` + +If you are not upgrading your client, to remove your application state and configuration, perform the following additional step: +``` +rm -rf ~/.config/onedrive +``` +**Note:** If you are using the `--confdir option`, substitute `~/.config/onedrive` for the correct directory storing your client configuration. + +If you want to just delete the application key, but keep the items database: +```text +rm -f ~/.config/onedrive/refresh_token +``` diff --git a/docs/known-issues.md b/docs/known-issues.md new file mode 100644 index 000000000..d6ac302a2 --- /dev/null +++ b/docs/known-issues.md @@ -0,0 +1,60 @@ +# List of Identified Known Issues +The following points detail known issues associated with this client: + +## Renaming or Moving Files in Standalone Mode causes online deletion and re-upload to occur +**Issue Tracker:** [#876](https://github.com/abraunegg/onedrive/issues/876), [#2579](https://github.com/abraunegg/onedrive/issues/2579) + +**Summary:** + +Renaming or moving files and/or folders while using the standalone sync option `--sync` this results in unnecessary data deletion online and subsequent re-upload. + +**Detailed Description:** + +In standalone mode (`--sync`), the renaming or moving folders locally that have already been synchronized leads to the data being deleted online and then re-uploaded in the next synchronization process. + +**Technical Explanation:** + +This behavior is expected from the client under these specific conditions. Renaming or moving files is interpreted as deleting them from their original location and creating them in a new location. In standalone sync mode, the client lacks the capability to track file system changes (including renames and moves) that occur when it is not running. This limitation is the root cause of the observed 'deletion and re-upload' cycle. + +**Recommended Workaround:** + +For effective tracking of file and folder renames or moves to new local directories, it is recommended to run the client in service mode (`--monitor`) rather than in standalone mode. This approach allows the client to immediately process these changes, enabling the data to be updated (renamed or moved) in the new location on OneDrive without undergoing deletion and re-upload. + +## Application 'stops' running without any visible reason +**Issue Tracker:** [#494](https://github.com/abraunegg/onedrive/issues/494), [#753](https://github.com/abraunegg/onedrive/issues/753), [#792](https://github.com/abraunegg/onedrive/issues/792), [#884](https://github.com/abraunegg/onedrive/issues/884), [#1162](https://github.com/abraunegg/onedrive/issues/1162), [#1408](https://github.com/abraunegg/onedrive/issues/1408), [#1520](https://github.com/abraunegg/onedrive/issues/1520), [#1526](https://github.com/abraunegg/onedrive/issues/1526) + +**Summary:** + +Users experience sudden shutdowns in a client application during file transfers with Microsoft's Europe Data Centers, likely due to unstable internet or HTTPS inspection issues. This problem, often signaled by an error code of 141, is related to the application's reliance on Curl and OpenSSL. Resolution steps include system updates, seeking support from OS vendors, ISPs, OpenSSL/Curl teams, and providing detailed debug logs to Microsoft for analysis. + +**Detailed Description:** + +The application unexpectedly stops functioning during upload or download operations when using the client. This issue occurs without any apparent reason. Running `echo $?` after the unexpected exit may return an error code of 141. + +This problem predominantly arises when the client interacts with Microsoft's Europe Data Centers. + +**Technical Explanation:** + +The client heavily relies on Curl and OpenSSL for operations with the Microsoft OneDrive service. A common observation during this error is an entry in the HTTPS Debug Log stating: +``` +OpenSSL SSL_read: SSL_ERROR_SYSCALL, errno 104 +``` +To confirm this as the root cause, a detailed HTTPS debug log can be generated with these commands: +``` +--verbose --verbose --debug-https +``` + +This error typically suggests one of the following issues: +* An unstable internet connection between the user and the OneDrive service. +* An issue with HTTPS transparent inspection services that monitor the traffic en route to the OneDrive service. + +**Recommended Resolution:** + +Recommended steps to address this issue include: +* Updating your operating system to the latest version. +* Seeking assistance from your OS vendor. +* Contacting your Internet Service Provider (ISP) or your IT Help Desk. +* Reporting the issue to the OpenSSL and/or Curl teams for improved handling of such connection failures. +* Creating a HTTPS Debug Log during the issue and submitting a support request to Microsoft with the log for their analysis. + +For more in-depth SSL troubleshooting, please read: https://maulwuff.de/research/ssl-debugging.html \ No newline at end of file diff --git a/docs/national-cloud-deployments.md b/docs/national-cloud-deployments.md new file mode 100644 index 000000000..6b348388d --- /dev/null +++ b/docs/national-cloud-deployments.md @@ -0,0 +1,145 @@ +# How to configure access to specific Microsoft Azure deployments +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Process Overview +In some cases it is a requirement to utilise specific Microsoft Azure cloud deployments to conform with data and security reuqirements that requires data to reside within the geographic borders of that country. +Current national clouds that are supported are: +* Microsoft Cloud for US Government +* Microsoft Cloud Germany +* Azure and Office365 operated by 21Vianet in China + +In order to successfully use these specific Microsoft Azure deployments, the following steps are required: +1. Register an application with the Microsoft identity platform using the Azure portal +2. Configure the new application with the appropriate authentication scopes +3. Validate that the authentication / redirect URI is correct for your application registration +4. Configure the onedrive client to use the new application id as provided during application registration +5. Configure the onedrive client to use the right Microsoft Azure deployment region that your application was registered with +6. Authenticate the client + +## Step 1: Register a new application with Microsoft Azure +1. Log into your applicable Microsoft Azure Portal with your applicable Office365 identity: + +| National Cloud Environment | Microsoft Azure Portal | +|---|---| +| Microsoft Cloud for US Government | https://portal.azure.com/ | +| Microsoft Cloud Germany | https://portal.azure.com/ | +| Azure and Office365 operated by 21Vianet | https://portal.azure.cn/ | + +2. Select 'Azure Active Directory' as the service you wish to configure +3. Under 'Manage', select 'App registrations' to register a new application +4. Click 'New registration' +5. Type in the appropriate details required as per below: + +![application_registration](./images/application_registration.jpg) + +6. To save the application registration, click 'Register' and something similar to the following will be displayed: + +![application_registration_done](./images/application_registration_done.jpg) + +**Note:** The Application (client) ID UUID as displayed after client registration, is what is required as the 'application_id' for Step 4 below. + +## Step 2: Configure application authentication scopes +Configure the API permissions as per the following: + +| API / Permissions name | Type | Description | Admin consent required | +|---|---|---|---| +| Files.ReadWrite | Delegated | Have full access to user files | No | +| Files.ReadWrite.All | Delegated | Have full access to all files user can access | No | +| Sites.ReadWrite.All | Delegated | Have full access to all items in all site collections | No | +| offline_access | Delegated | Maintain access to data you have given it access to | No | + +![authentication_scopes](./images/authentication_scopes.jpg) + +## Step 3: Validate that the authentication / redirect URI is correct +Add the appropriate redirect URI for your Azure deployment: + +![authentication_response_uri](./images/authentication_response_uri.jpg) + +A valid entry for the response URI should be one of: +* https://login.microsoftonline.us/common/oauth2/nativeclient (Microsoft Cloud for US Government) +* https://login.microsoftonline.de/common/oauth2/nativeclient (Microsoft Cloud Germany) +* https://login.chinacloudapi.cn/common/oauth2/nativeclient (Azure and Office365 operated by 21Vianet in China) + +For a single-tenant application, it may be necessary to use your specific tenant id instead of "common": +* https://login.microsoftonline.us/example.onmicrosoft.us/oauth2/nativeclient (Microsoft Cloud for US Government) +* https://login.microsoftonline.de/example.onmicrosoft.de/oauth2/nativeclient (Microsoft Cloud Germany) +* https://login.chinacloudapi.cn/example.onmicrosoft.cn/oauth2/nativeclient (Azure and Office365 operated by 21Vianet in China) + +## Step 4: Configure the onedrive client to use new application registration +Update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following: +```text +application_id = "insert valid entry here" +``` + +This will reconfigure the client to use the new application registration you have created. + +**Example:** +```text +application_id = "22c49a0d-d21c-4792-aed1-8f163c982546" +``` + +## Step 5: Configure the onedrive client to use the specific Microsoft Azure deployment +Update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following: +```text +azure_ad_endpoint = "insert valid entry here" +``` + +Valid entries are: +* USL4 (Microsoft Cloud for US Government) +* USL5 (Microsoft Cloud for US Government - DOD) +* DE (Microsoft Cloud Germany) +* CN (Azure and Office365 operated by 21Vianet in China) + +This will configure your client to use the correct Azure AD and Graph endpoints as per [https://docs.microsoft.com/en-us/graph/deployments](https://docs.microsoft.com/en-us/graph/deployments) + +**Example:** +```text +azure_ad_endpoint = "USL4" +``` + +If the Microsoft Azure deployment does not support multi-tenant applications, update to your 'onedrive' configuration file (`~/.config/onedrive/config`) the following: +```text +azure_tenant_id = "insert valid entry here" +``` + +This will configure your client to use the specified tenant id in its Azure AD and Graph endpoint URIs, instead of "common". +The tenant id may be the GUID Directory ID (formatted "00000000-0000-0000-0000-000000000000"), or the fully qualified tenant name (e.g. "example.onmicrosoft.us"). +The GUID Directory ID may be located in the Azure administation page as per [https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id](https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id). Note that you may need to go to your national-deployment-specific administration page, rather than following the links within that document. +The tenant name may be obtained by following the PowerShell instructions on [https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id](https://docs.microsoft.com/en-us/onedrive/find-your-office-365-tenant-id); it is shown as the "TenantDomain" upon completion of the "Connect-AzureAD" command. + +**Example:** +```text +azure_tenant_id = "example.onmicrosoft.us" +# or +azure_tenant_id = "0c4be462-a1ab-499b-99e0-da08ce52a2cc" +``` + +## Step 6: Authenticate the client +Run the application without any additional command switches. + +You will be asked to open a specific URL by using your web browser where you will have to login into your Microsoft Account and give the application the permission to access your files. After giving permission to the application, you will be redirected to a blank page. Copy the URI of the blank page into the application. +```text +[user@hostname ~]$ onedrive + +Authorize this app visiting: + +https://..... + +Enter the response uri: + +``` + +**Example:** +``` +[user@hostname ~]$ onedrive +Authorize this app visiting: + +https://login.microsoftonline.com/common/oauth2/v2.0/authorize?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient + +Enter the response uri: https://login.microsoftonline.com/common/oauth2/nativeclient?code= + +Application has been successfully authorised, however no additional command switches were provided. + +Please use --help for further assistance in regards to running this application. +``` diff --git a/docs/podman.md b/docs/podman.md new file mode 100644 index 000000000..4f3474f34 --- /dev/null +++ b/docs/podman.md @@ -0,0 +1,361 @@ +# Run the OneDrive Client for Linux under Podman +This client can be run as a Podman container, with 3 available container base options for you to choose from: + +| Container Base | Docker Tag | Description | i686 | x86_64 | ARMHF | AARCH64 | +|----------------|-------------|----------------------------------------------------------------|:------:|:------:|:-----:|:-------:| +| Alpine Linux | edge-alpine | Podman container based on Alpine 3.18 using 'master' |❌|✔|❌|✔| +| Alpine Linux | alpine | Podman container based on Alpine 3.18 using latest release |❌|✔|❌|✔| +| Debian | debian | Podman container based on Debian Stable using latest release |✔|✔|✔|✔| +| Debian | edge | Podman container based on Debian Stable using 'master' |✔|✔|✔|✔| +| Debian | edge-debian | Podman container based on Debian Stable using 'master' |✔|✔|✔|✔| +| Debian | latest | Podman container based on Debian Stable using latest release |✔|✔|✔|✔| +| Fedora | edge-fedora | Podman container based on Fedora 38 using 'master' |❌|✔|❌|✔| +| Fedora | fedora | Podman container based on Fedora 38 using latest release |❌|✔|❌|✔| + +These containers offer a simple monitoring-mode service for the OneDrive Client for Linux. + +The instructions below have been validated on: +* Fedora 38 + +The instructions below will utilise the 'edge' tag, however this can be substituted for any of the other docker tags such as 'latest' from the table above if desired. + +The 'edge' Docker Container will align closer to all documentation and features, where as 'latest' is the release version from a static point in time. The 'latest' tag however may contain bugs and/or issues that will have been fixed, and those fixes are contained in 'edge'. + +Additionally there are specific version release tags for each release. Refer to https://hub.docker.com/r/driveone/onedrive/tags for any other Docker tags you may be interested in. + +**Note:** The below instructions for podman has been tested and validated when logging into the system as an unprivileged user (non 'root' user). + +## High Level Configuration Steps +1. Install 'podman' as per your distribution platform's instructions if not already installed. +2. Disable 'SELinux' as per your distribution platform's instructions +3. Test 'podman' by running a test container +4. Prepare the required podman volumes to store the configuration and data +5. Run the 'onedrive' container and perform authorisation +6. Running the 'onedrive' container under 'podman' + +## Configuration Steps + +### 1. Install 'podman' on your platform +Install 'podman' as per your distribution platform's instructions if not already installed. + +### 2. Disable SELinux on your platform +In order to run the Docker container under 'podman', SELinux must be disabled. Without doing this, when the application is authenticated in the steps below, the following error will be presented: +```text +ERROR: The local file system returned an error with the following message: + Error Message: /onedrive/conf/refresh_token: Permission denied + +The database cannot be opened. Please check the permissions of ~/.config/onedrive/items.sqlite3 +``` +The only known work-around for the above problem at present is to disable SELinux. Please refer to your distribution platform's instructions on how to perform this step. + +* Fedora: https://docs.fedoraproject.org/en-US/quick-docs/selinux-changing-states-and-modes/#_disabling_selinux +* Red Hat Enterprise Linux: https://access.redhat.com/solutions/3176 + +Post disabling SELinux and reboot your system, confirm that `getenforce` returns `Disabled`: +```text +$ getenforce +Disabled +``` + +If you are still experiencing permission issues despite disabling SELinux, please read https://www.redhat.com/sysadmin/container-permission-denied-errors + +### 3. Test 'podman' on your platform +Test that 'podman' is operational for your 'non-root' user, as per below: +```bash +[alex@fedora38-podman ~]$ podman pull fedora +Resolved "fedora" as an alias (/etc/containers/registries.conf.d/000-shortnames.conf) +Trying to pull registry.fedoraproject.org/fedora:latest... +Getting image source signatures +Copying blob b30887322388 done | +Copying config a1cd3cbf8a done | +Writing manifest to image destination +a1cd3cbf8adaa422629f2fcdc629fd9297138910a467b11c66e5ddb2c2753dff +[alex@fedora38-podman ~]$ podman run fedora /bin/echo "Welcome to the Podman World" +Welcome to the Podman World +[alex@fedora38-podman ~]$ +``` + +### 4. Configure the required podman volumes +The 'onedrive' Docker container requires 2 podman volumes to operate: +* Config Volume +* Data Volume + +The first volume is the configuration volume that stores all the applicable application configuration + current runtime state. In a non-containerised environment, this normally resides in `~/.config/onedrive` - in a containerised environment this is stored in the volume tagged as `/onedrive/conf` + +The second volume is the data volume, where all your data from Microsoft OneDrive is stored locally. This volume is mapped to an actual directory point on your local filesystem and this is stored in the volume tagged as `/onedrive/data` + +#### 4.1 Prepare the 'config' volume +Create the 'config' volume with the following command: +```bash +podman volume create onedrive_conf +``` + +This will create a podman volume labeled `onedrive_conf`, where all configuration of your onedrive account will be stored. You can add a custom config file in this location at a later point in time if required. + +#### 4.2 Prepare the 'data' volume +Create the 'data' volume with the following command: +```bash +podman volume create onedrive_data +``` + +This will create a podman volume labeled `onedrive_data` and will map to a path on your local filesystem. This is where your data from Microsoft OneDrive will be stored. Keep in mind that: + +* The owner of this specified folder must not be root +* Podman will attempt to change the permissions of the volume to the user the container is configured to run as + +**NOTE:** Issues occur when this target folder is a mounted folder of an external system (NAS, SMB mount, USB Drive etc) as the 'mount' itself is owed by 'root'. If this is your use case, you *must* ensure your normal user can mount your desired target without having the target mounted by 'root'. If you do not fix this, your Podman container will fail to start with the following error message: +```bash +ROOT level privileges prohibited! +``` + +### 5. First run of Docker container under podman and performing authorisation +The 'onedrive' client within the container first needs to be authorised with your Microsoft account. This is achieved by initially running podman in interactive mode. + +Run the podman image with the commands below and make sure to change the value of `ONEDRIVE_DATA_DIR` to the actual onedrive data directory on your filesystem that you wish to use (e.g. `export ONEDRIVE_DATA_DIR="/home/abraunegg/OneDrive"`). + +**Important:** The 'target' folder of `ONEDRIVE_DATA_DIR` must exist before running the podman container. The script below will create 'ONEDRIVE_DATA_DIR' so that it exists locally for the podman volume mapping to occur. + +It is also a requirement that the container be run using a non-root uid and gid, you must insert a non-root UID and GID (e.g.` export ONEDRIVE_UID=1000` and export `ONEDRIVE_GID=1000`). The script below will use `id` to evaluate your system environment to use the correct values. +```bash +export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" +export ONEDRIVE_UID=`id -u` +export ONEDRIVE_GID=`id -g` +mkdir -p ${ONEDRIVE_DATA_DIR} +podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ + -v onedrive_conf:/onedrive/conf:U,Z \ + -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" \ + driveone/onedrive:edge +``` + +**Important:** In some scenarios, 'podman' sets the configuration and data directories to a different UID & GID as specified. To resolve this situation, you must run 'podman' with the `--userns=keep-id` flag to ensure 'podman' uses the UID and GID as specified. The updated script example when using `--userns=keep-id` is below: + +```bash +export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" +export ONEDRIVE_UID=`id -u` +export ONEDRIVE_GID=`id -g` +mkdir -p ${ONEDRIVE_DATA_DIR} +podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ + --userns=keep-id \ + -v onedrive_conf:/onedrive/conf:U,Z \ + -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" \ + driveone/onedrive:edge +``` + + +**Important:** If you plan to use the 'podman' built in auto-updating of container images described in 'Systemd Service & Auto Updating' below, you must pass an additional argument to set a label during the first run. The updated script example to support auto-updating of container images is below: + +```bash +export ONEDRIVE_DATA_DIR="${HOME}/OneDrive" +export ONEDRIVE_UID=`id -u` +export ONEDRIVE_GID=`id -g` +mkdir -p ${ONEDRIVE_DATA_DIR} +podman run -it --name onedrive --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ + --userns=keep-id \ + -v onedrive_conf:/onedrive/conf:U,Z \ + -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" \ + -e PODMAN=1 \ + --label "io.containers.autoupdate=image" \ + driveone/onedrive:edge +``` + +When the Podman container successfully starts: +* You will be asked to open a specific link using your web browser +* Login to your Microsoft Account and give the application the permission +* After giving the permission, you will be redirected to a blank page +* Copy the URI of the blank page into the application prompt to authorise the application + +Once the 'onedrive' application is authorised, the client will automatically start monitoring your `ONEDRIVE_DATA_DIR` for data changes to be uploaded to OneDrive. Files stored on OneDrive will be downloaded to this location. + +If the client is working as expected, you can detach from the container with Ctrl+p, Ctrl+q. + +### 6. Running the 'onedrive' container under 'podman' + +#### 6.1 Check if the monitor service is running +```bash +podman ps -f name=onedrive +``` + +#### 6.2 Show 'onedrive' runtime logs +```bash +podman logs onedrive +``` + +#### 6.3 Stop running 'onedrive' container +```bash +podman stop onedrive +``` + +#### 6.4 Start 'onedrive' container +```bash +podman start onedrive +``` + +#### 6.5 Remove 'onedrive' container +```bash +podman rm -f onedrive +``` + + +## Advanced Usage + +### Systemd Service & Auto Updating + +Podman supports running containers as a systemd service and also auto updating of the container images. Using the existing running container you can generate a systemd unit file to be installed by the **root** user. To have your container image auto-update with podman, it must first be created with the label `"io.containers.autoupdate=image"` mentioned in step 5 above. + +``` +cd /tmp +podman generate systemd --new --restart-policy on-failure --name -f onedrive +/tmp/container-onedrive.service + +# copy the generated systemd unit file to the systemd path and reload the daemon + +cp -Z ~/container-onedrive.service /usr/lib/systemd/system +systemctl daemon-reload + +#optionally enable it to startup on boot + +systemctl enable container-onedrive.service + +#check status + +systemctl status container-onedrive + +#start/stop/restart container as a systemd service + +systemctl stop container-onedrive +systemctl start container-onedrive +``` + +To update the image using podman (Ad-hoc) +``` +podman auto-update +``` + +To update the image using systemd (Automatic/Scheduled) +``` +# Enable the podman-auto-update.timer service at system start: + +systemctl enable podman-auto-update.timer + +# Start the service + +systemctl start podman-auto-update.timer + +# Containers with the autoupdate label will be updated on the next scheduled timer + +systemctl list-timers --all +``` + +### Editing the running configuration and using a 'config' file +The 'onedrive' client should run in default configuration, however you can change this default configuration by placing a custom config file in the `onedrive_conf` podman volume. First download the default config from [here](https://raw.githubusercontent.com/abraunegg/onedrive/master/config) +Then put it into your onedrive_conf volume path, which can be found with: + +```bash +podman volume inspect onedrive_conf +``` +Or you can map your own config folder to the config volume. Make sure to copy all files from the volume into your mapped folder first. + +The detailed document for the config can be found here: [Configuration](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#configuration) + +### Syncing multiple accounts +There are many ways to do this, the easiest is probably to do the following: +1. Create a second podman config volume (replace `work` with your desired name): `podman volume create onedrive_conf_work` +2. And start a second podman monitor container (again replace `work` with your desired name): + +```bash +export ONEDRIVE_DATA_DIR_WORK="/home/abraunegg/OneDriveWork" +export ONEDRIVE_UID=`id -u` +export ONEDRIVE_GID=`id -g` +mkdir -p ${ONEDRIVE_DATA_DIR_WORK} +podman run -it --name onedrive_work --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ + --userns=keep-id \ + -v onedrive_conf_work:/onedrive/conf:U,Z \ + -v "${ONEDRIVE_DATA_DIR_WORK}:/onedrive/data:U,Z" \ + -e PODMAN=1 \ + --label "io.containers.autoupdate=image" \ + driveone/onedrive:edge +``` + +## Supported Podman Environment Variables +| Variable | Purpose | Sample Value | +| ---------------- | --------------------------------------------------- |:-------------:| +| ONEDRIVE_UID | UserID (UID) to run as | 1000 | +| ONEDRIVE_GID | GroupID (GID) to run as | 1000 | +| ONEDRIVE_VERBOSE | Controls "--verbose" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_DEBUG | Controls "--verbose --verbose" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_DEBUG_HTTPS | Controls "--debug-https" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_RESYNC | Controls "--resync" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_DOWNLOADONLY | Controls "--download-only" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_UPLOADONLY | Controls "--upload-only" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_NOREMOTEDELETE | Controls "--no-remote-delete" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_LOGOUT | Controls "--logout" switch. Default is 0 | 1 | +| ONEDRIVE_REAUTH | Controls "--reauth" switch. Default is 0 | 1 | +| ONEDRIVE_AUTHFILES | Controls "--auth-files" option. Default is "" | "authUrl:responseUrl" | +| ONEDRIVE_AUTHRESPONSE | Controls "--auth-response" option. Default is "" | See [here](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md#authorize-the-application-with-your-onedrive-account) | +| ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | +| ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | +| ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | + +### Environment Variables Usage Examples +**Verbose Output:** +```bash +podman run -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge +``` +**Debug Output:** +```bash +podman run -e ONEDRIVE_DEBUG=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge +``` +**Perform a --resync:** +```bash +podman run -e ONEDRIVE_RESYNC=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge +``` +**Perform a --resync and --verbose:** +```bash +podman run -e ONEDRIVE_RESYNC=1 -e ONEDRIVE_VERBOSE=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge +``` +**Perform a --logout and re-authenticate:** +```bash +podman run -it -e ONEDRIVE_LOGOUT=1 -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" driveone/onedrive:edge +``` + +## Building a custom Podman image +You can also build your own image instead of pulling the one from [hub.docker.com](https://hub.docker.com/r/driveone/onedrive): +```bash +git clone https://github.com/abraunegg/onedrive +cd onedrive +podman build . -t local-onedrive -f contrib/docker/Dockerfile +``` + +There are alternate, smaller images available by building +Dockerfile-debian or Dockerfile-alpine. These [multi-stage builder pattern](https://docs.docker.com/develop/develop-images/multistage-build/) +Dockerfiles require Docker version at least 17.05. + +### How to build and run a custom Podman image based on Debian +``` bash +podman build . -t local-ondrive-debian -f contrib/docker/Dockerfile-debian +podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-ondrive-debian:latest +``` + +### How to build and run a custom Podman image based on Alpine Linux +``` bash +podman build . -t local-ondrive-alpine -f contrib/docker/Dockerfile-alpine +podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-ondrive-alpine:latest +``` + +### How to build and run a custom Podman image for ARMHF (Raspberry Pi) +Compatible with: +* Raspberry Pi +* Raspberry Pi 2 +* Raspberry Pi Zero +* Raspberry Pi 3 +* Raspberry Pi 4 +``` bash +podman build . -t local-onedrive-armhf -f contrib/docker/Dockerfile-debian +podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-onedrive-armhf:latest +``` + +### How to build and run a custom Podman image for AARCH64 Platforms +``` bash +podman build . -t local-onedrive-aarch64 -f contrib/docker/Dockerfile-debian +podman run -v onedrive_conf:/onedrive/conf:U,Z -v "${ONEDRIVE_DATA_DIR}:/onedrive/data:U,Z" --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" --userns=keep-id local-onedrive-aarch64:latest +``` diff --git a/docs/privacy-policy.md b/docs/privacy-policy.md new file mode 100644 index 000000000..64fe1dd3c --- /dev/null +++ b/docs/privacy-policy.md @@ -0,0 +1,65 @@ +# Privacy Policy +Effective Date: May 16 2018 + +## Introduction + +This Privacy Policy outlines how OneDrive Client for Linux ("we," "our," or "us") collects, uses, and protects information when you use our software ("OneDrive Client for Linux"). We respect your privacy and are committed to ensuring the confidentiality and security of any information you provide while using the Software. + +## Information We Do Not Collect + +We want to be transparent about the fact that we do not collect any personal data, usage data, or tracking data through the Software. This means: + +1. **No Personal Data**: We do not collect any information that can be used to personally identify you, such as your name, email address, phone number, or physical address. + +2. **No Usage Data**: We do not collect data about how you use the Software, such as the features you use, the duration of your sessions, or any interactions within the Software. + +3. **No Tracking Data**: We do not use cookies or similar tracking technologies to monitor your online behavior or track your activities across websites or apps. + +## How We Use Your Information + +Since we do not collect any personal, usage, or tracking data, there is no information for us to use for any purpose. + +## Third-Party Services + +The Software may include links to third-party websites or services, but we do not have control over the privacy practices or content of these third-party services. We encourage you to review the privacy policies of any third-party services you access through the Software. + +## Children's Privacy + +Since we do not collect any personal, usage, or tracking data, there is no restriction on the use of this application by anyone under the age of 18. + +## Information You Choose to Share + +While we do not collect personal data, usage data, or tracking data through the Software, there may be instances where you voluntarily choose to share information with us, particularly when submitting bug reports. These bug reports may contain sensitive information such as account details, file names, and directory names. It's important to note that these details are included in the logs and debug logs solely for the purpose of diagnosing and resolving technical issues with the Software. + +We want to emphasize that, even in these cases, we do not have access to your actual data. The logs and debug logs provided in bug reports are used exclusively for technical troubleshooting and debugging purposes. We take measures to treat this information with the utmost care, and it is only accessible to our technical support and development teams. We do not use this information for any other purpose, and we have strict security measures in place to protect it. + +## Protecting Your Sensitive Data + +We are committed to safeguarding your sensitive data and maintaining its confidentiality. To ensure its protection: + +1. **Limited Access**: Only authorized personnel within our technical support and development teams have access to the logs and debug logs containing sensitive data, and they are trained in handling this information securely. + +2. **Data Encryption**: We use industry-standard encryption protocols to protect the transmission and storage of sensitive data. + +3. **Data Retention**: We retain bug report data for a limited time necessary for resolving the reported issue. Once the issue is resolved, we promptly delete or anonymize the data. + +4. **Security Measures**: We employ robust security measures to prevent unauthorized access, disclosure, or alteration of sensitive data. + +By submitting a bug report, you acknowledge and consent to the inclusion of sensitive information in logs and debug logs for the sole purpose of addressing technical issues with the Software. + +## Your Responsibilities + +While we take measures to protect your sensitive data, it is essential for you to exercise caution when submitting bug reports. Please refrain from including any sensitive or personally identifiable information that is not directly related to the technical issue you are reporting. You have the option to redact or obfuscate sensitive details in bug reports to further protect your data. + +## Changes to this Privacy Policy + +We may update this Privacy Policy from time to time to reflect changes in our practices or for other operational, legal, or regulatory reasons. We will notify you of any material changes by posting the updated Privacy Policy on our website or through the Software. We encourage you to review this Privacy Policy periodically. + +## Contact Us + +If you have any questions or concerns about this Privacy Policy or our privacy practices, please contact us at support@mynas.com.au or via GitHub (https://github.com/abraunegg/onedrive) + +## Conclusion + +By using the Software, you agree to the terms outlined in this Privacy Policy. If you do not agree with any part of this policy, please discontinue the use of the Software. + diff --git a/docs/sharepoint-libraries.md b/docs/sharepoint-libraries.md new file mode 100644 index 000000000..d1714d4ed --- /dev/null +++ b/docs/sharepoint-libraries.md @@ -0,0 +1,228 @@ +# How to configure OneDrive SharePoint Shared Library sync +**WARNING:** Several users have reported files being overwritten causing data loss as a result of using this client with SharePoint Libraries when running as a systemd service. + +When this has been investigated, the following has been noted as potential root causes: +* File indexing application such as Baloo File Indexer or Tracker3 constantly indexing your OneDrive data +* The use of WPS Office and how it 'saves' files by deleting the existing item and replaces it with the saved data + +Additionally there could be a yet unknown bug with the client, however all debugging and data provided previously shows that an 'external' process to the 'onedrive' application modifies the files triggering the undesirable upload to occur. + +**Possible Preventative Actions:** +* Disable all File Indexing for your SharePoint Library data. It is out of scope to detail on how you should do this. +* Disable using a systemd service for syncing your SharePoint Library data. +* Do not use WPS Office to edit your documents. Use OpenOffice or LibreOffice as these do not exhibit the same 'delete to save' action that WPS Office has. + +Additionally, please use caution when using this client with SharePoint. + +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Process Overview +Syncing a OneDrive SharePoint library requires additional configuration for your 'onedrive' client: +1. Login to OneDrive and under 'Shared Libraries' obtain the shared library name +2. Query that shared library name using the client to obtain the required configuration details +3. Create a unique local folder which will be the SharePoint Library 'root' +4. Configure the client's config file with the required 'drive_id' +5. Test the configuration using '--dry-run' +6. Sync the SharePoint Library as required + +**Note:** The `--get-O365-drive-id` process below requires a fully configured 'onedrive' configuration so that the applicable Drive ID for the given Office 365 SharePoint Shared Library can be determined. It is highly recommended that you do not use the application 'default' configuration directory for any SharePoint Site, and configure separate items for each site you wish to use. + +## 1. Listing available OneDrive SharePoint Libraries +Login to the OneDrive web interface and determine which shared library you wish to configure the client for: +![shared_libraries](./images/SharedLibraries.jpg) + +## 2. Query OneDrive API to obtain required configuration details +Run the following command using the 'onedrive' client to query the OneDrive API to obtain the required 'drive_id' of the SharePoint Library that you wish to sync: +```text +onedrive --get-O365-drive-id '' +``` +This will return something similar to the following: +```text +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Initializing the Synchronization Engine ... +Office 365 Library Name Query: +----------------------------------------------- +Site Name: +Library Name: +drive_id: b!6H_y8B...xU5 +Library URL: +----------------------------------------------- +``` +If there are no matches to the site you are attempting to search, the following will be displayed: +```text +Configuration file successfully loaded +Configuring Global Azure AD Endpoints +Initializing the Synchronization Engine ... +Office 365 Library Name Query: blah + +ERROR: The requested SharePoint site could not be found. Please check it's name and your permissions to access the site. + +The following SharePoint site names were returned: + * + * + ... + * +``` +This list of site names can be used as a basis to search for the correct site for which you are searching + +## 3. Create a new configuration directory and sync location for this SharePoint Library +Create a new configuration directory for this SharePoint Library in the following manner: +```text +mkdir ~/.config/SharePoint_My_Library_Name +``` + +Create a new local folder to store the SharePoint Library data in: +```text +mkdir ~/SharePoint_My_Library_Name +``` + +**Note:** Do not use spaces in the directory name, use '_' as a replacement + +## 4. Configure SharePoint Library config file with the required 'drive_id' & 'sync_dir' options +Download a copy of the default configuration file by downloading this file from GitHub and saving this file in the directory created above: +```text +wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/SharePoint_My_Library_Name/config +``` + +Update your 'onedrive' configuration file (`~/.config/SharePoint_My_Library_Name/config`) with the local folder where you will store your data: +```text +sync_dir = "~/SharePoint_My_Library_Name" +``` + +Update your 'onedrive' configuration file(`~/.config/SharePoint_My_Library_Name/config`) with the 'drive_id' value obtained in the steps above: +```text +drive_id = "insert the drive_id value from above here" +``` +The OneDrive client will now be configured to sync this SharePoint shared library to your local system and the location you have configured. + +**Note:** After changing `drive_id`, you must perform a full re-synchronization by adding `--resync` to your existing command line. + +## 5. Validate and Test the configuration +Validate your new configuration using the `--display-config` option to validate you have configured the application correctly: +```text +onedrive --confdir="~/.config/SharePoint_My_Library_Name" --display-config +``` + +Test your new configuration using the `--dry-run` option to validate the application configuration: +```text +onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbose --dry-run +``` + +**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration. + +## 6. Sync the SharePoint Library as required +Sync the SharePoint Library to your system with either `--synchronize` or `--monitor` operations: +```text +onedrive --confdir="~/.config/SharePoint_My_Library_Name" --synchronize --verbose +``` + +```text +onedrive --confdir="~/.config/SharePoint_My_Library_Name" --monitor --verbose +``` + +**Note:** As this is a *new* configuration, the application will be required to be re-authorised the first time this command is run with the new configuration. + +## 7. Enable custom systemd service for SharePoint Library +Systemd can be used to automatically run this configuration in the background, however, a unique systemd service will need to be setup for this SharePoint Library instance + +In order to automatically start syncing each SharePoint Library, you will need to create a service file for each SharePoint Library. From the applicable 'systemd folder' where the applicable systemd service file exists: +* RHEL / CentOS: `/usr/lib/systemd/system` +* Others: `/usr/lib/systemd/user` and `/lib/systemd/system` + +### Step1: Create a new systemd service file +#### Red Hat Enterprise Linux, CentOS Linux +Copy the required service file to a new name: +```text +sudo cp /usr/lib/systemd/system/onedrive.service /usr/lib/systemd/system/onedrive-SharePoint_My_Library_Name.service +``` +or +```text +sudo cp /usr/lib/systemd/system/onedrive@.service /usr/lib/systemd/system/onedrive-SharePoint_My_Library_Name@.service +``` + +#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora +Copy the required service file to a new name: +```text +sudo cp /usr/lib/systemd/user/onedrive.service /usr/lib/systemd/user/onedrive-SharePoint_My_Library_Name.service +``` +or +```text +sudo cp /lib/systemd/system/onedrive@.service /lib/systemd/system/onedrive-SharePoint_My_Library_Name@.service +``` + +### Step 2: Edit new systemd service file +Edit the new systemd file, updating the line beginning with `ExecStart` so that the confdir mirrors the one you used above: +```text +ExecStart=/usr/local/bin/onedrive --monitor --confdir="/full/path/to/config/dir" +``` + +Example: +```text +ExecStart=/usr/local/bin/onedrive --monitor --confdir="/home/myusername/.config/SharePoint_My_Library_Name" +``` + +**Note:** When running the client manually, `--confdir="~/.config/......` is acceptable. In a systemd configuration file, the full path must be used. The `~` must be expanded. + +### Step 3: Enable the new systemd service +Once the file is correctly editied, you can enable the new systemd service using the following commands. + +#### Red Hat Enterprise Linux, CentOS Linux +```text +systemctl enable onedrive-SharePoint_My_Library_Name +systemctl start onedrive-SharePoint_My_Library_Name +``` + +#### Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora +```text +systemctl --user enable onedrive-SharePoint_My_Library_Name +systemctl --user start onedrive-SharePoint_My_Library_Name +``` +or +```text +systemctl --user enable onedrive-SharePoint_My_Library_Name@myusername.service +systemctl --user start onedrive-SharePoint_My_Library_Name@myusername.service +``` + +### Step 4: Viewing systemd status and logs for the custom service +#### Viewing systemd service status - Red Hat Enterprise Linux, CentOS Linux +```text +systemctl status onedrive-SharePoint_My_Library_Name +``` + +#### Viewing systemd service status - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora +```text +systemctl --user status onedrive-SharePoint_My_Library_Name +``` + +#### Viewing journalctl systemd logs - Red Hat Enterprise Linux, CentOS Linux +```text +journalctl --unit=onedrive-SharePoint_My_Library_Name -f +``` + +#### Viewing journalctl systemd logs - Others such as Arch, Ubuntu, Debian, OpenSuSE, Fedora +```text +journalctl --user --unit=onedrive-SharePoint_My_Library_Name -f +``` + +### Step 5: (Optional) Run custom systemd service at boot without user login +In some cases it may be desirable for the systemd service to start without having to login as your 'user' + +All the systemd steps above that utilise the `--user` option, will run the systemd service as your particular user. As such, the systemd service will not start unless you actually login to your system. + +To avoid this issue, you need to reconfigure your 'user' account so that the systemd services you have created will startup without you having to login to your system: +```text +loginctl enable-linger +``` + +Example: +```text +alex@ubuntu-headless:~$ loginctl enable-linger alex +``` + +## 8. Configuration for a SharePoint Library is complete +The 'onedrive' client configuration for this particular SharePoint Library is now complete. + +# How to configure multiple OneDrive SharePoint Shared Library sync +Create a new configuration as per the process above. Repeat these steps for each SharePoint Library that you wish to use. diff --git a/docs/terms-of-service.md b/docs/terms-of-service.md new file mode 100644 index 000000000..cdf7c4328 --- /dev/null +++ b/docs/terms-of-service.md @@ -0,0 +1,54 @@ +# OneDrive Client for Linux - Software Service Terms of Service + +## 1. Introduction + +These Terms of Service ("Terms") govern your use of the OneDrive Client for Linux ("Application") software and related Microsoft OneDrive services ("Service") provided by Microsoft. By accessing or using the Service, you agree to comply with and be bound by these Terms. If you do not agree to these Terms, please do not use the Service. + +## 2. License Compliance + +The OneDrive Client for Linux software is licensed under the GNU General Public License, version 3.0 (the "GPLv3"). Your use of the software must comply with the terms and conditions of the GPLv3. A copy of the GPLv3 can be found here: https://www.gnu.org/licenses/gpl-3.0.en.html + +## 3. Use of the Service + +### 3.1. Access and Accounts + +You may need to create an account or provide personal information to access certain features of the Service. You are responsible for maintaining the confidentiality of your account information and are solely responsible for all activities that occur under your account. + +### 3.2. Prohibited Activities + +You agree not to: + +- Use the Service in any way that violates applicable laws or regulations. +- Use the Service to engage in any unlawful, harmful, or fraudulent activity. +- Use the Service in any manner that disrupts, damages, or impairs the Service. + +## 4. Intellectual Property + +The OneDrive Client for Linux software is subject to the GPLv3, and you must respect all copyrights, trademarks, and other intellectual property rights associated with the software. Any contributions you make to the software must also comply with the GPLv3. + +## 5. Disclaimer of Warranties + +The OneDrive Client for Linux software is provided "as is" without any warranties, either expressed or implied. We do not guarantee that the use of the Application will be error-free or uninterrupted. + +Microsoft is not responsible for OneDrive Client for Linux. Any issues or problems with OneDrive Client for Linux should be raised on GitHub at https://github.com/abraunegg/onedrive or email support@mynas.com.au + +OneDrive Client for Linux is not responsible for the Microsoft OneDrive Service or the Microsoft Graph API Service that this Application utilizes. Any issue with either Microsoft OneDrive or Microsoft Graph API should be raised with Microsoft via their support channel in your country. + +## 6. Limitation of Liability + +To the fullest extent permitted by law, we shall not be liable for any direct, indirect, incidental, special, consequential, or punitive damages, or any loss of profits or revenues, whether incurred directly or indirectly, or any loss of data, use, goodwill, or other intangible losses, resulting from (a) your use or inability to use the Service, or (b) any other matter relating to the Service. + +This limitiation of liability explicitly relates to the use of the OneDrive Client for Linux software and does not affect your rights under the GPLv3. + +## 7. Changes to Terms + +We reserve the right to update or modify these Terms at any time without prior notice. Any changes will be effective immediately upon posting on GitHub. Your continued use of the Service after the posting of changes constitutes your acceptance of such changes. Changes can be reviewed on GitHub. + +## 8. Governing Law + +These Terms shall be governed by and construed in accordance with the laws of Australia, without regard to its conflict of law principles. + +## 9. Contact Us + +If you have any questions or concerns about these Terms, please contact us at https://github.com/abraunegg/onedrive or email support@mynas.com.au + diff --git a/docs/ubuntu-package-install.md b/docs/ubuntu-package-install.md new file mode 100644 index 000000000..df20db923 --- /dev/null +++ b/docs/ubuntu-package-install.md @@ -0,0 +1,420 @@ +# Installation of 'onedrive' package on Debian and Ubuntu + +This document covers the appropriate steps to install the 'onedrive' client using the provided packages for Debian and Ubuntu. + +#### Important information for all Ubuntu and Ubuntu based distribution users: +This information is specifically for the following platforms and distributions: + +* Lubuntu +* Linux Mint +* POP OS +* Peppermint OS +* Raspbian +* Ubuntu + +Whilst there are [onedrive](https://packages.ubuntu.com/search?keywords=onedrive&searchon=names&suite=all§ion=all) Universe packages available for Ubuntu, do not install 'onedrive' from these Universe packages. The default Ubuntu Universe packages are out-of-date and are not supported and should not be used. + +## Determine which instructions to use +Ubuntu and its clones are based on various different releases, thus, you must use the correct instructions below, otherwise you may run into package dependancy issues and will be unable to install the client. + +### Step 1: Remove any configured PPA and associated 'onedrive' package and systemd service files +Many Internet 'help' pages provide inconsistent details on how to install the OneDrive Client for Linux. A number of these websites continue to point users to install the client via the yann1ck PPA repository however this PPA no longer exists and should not be used. + +To remove the PPA repository and the older client, perform the following actions: +```text +sudo apt remove onedrive +sudo add-apt-repository --remove ppa:yann1ck/onedrive +``` + +Additionally, Ubuntu and its clones have a bad habit of creating a 'default' systemd service file when installing the 'onedrive' package so that the client will automatically run the client post being authenticated. This systemd entry is erroneous and needs to be removed. +``` +Created symlink /etc/systemd/user/default.target.wants/onedrive.service → /usr/lib/systemd/user/onedrive.service. +``` +To remove this symbolic link, run the following command: +``` +sudo rm /etc/systemd/user/default.target.wants/onedrive.service +``` + +### Step 2: Ensure your system is up-to-date +Use a script, similar to the following to ensure your system is updated correctly: +```text +#!/bin/bash +rm -rf /var/lib/dpkg/lock-frontend +rm -rf /var/lib/dpkg/lock +apt-get update +apt-get upgrade -y +apt-get dist-upgrade -y +apt-get autoremove -y +apt-get autoclean -y +``` + +Run this script as 'root' by using `su -` to elevate to 'root'. Example below: +```text +Welcome to Ubuntu 20.04.1 LTS (GNU/Linux 5.4.0-48-generic x86_64) + + * Documentation: https://help.ubuntu.com + * Management: https://landscape.canonical.com + * Support: https://ubuntu.com/advantage + +425 updates can be installed immediately. +208 of these updates are security updates. +To see these additional updates run: apt list --upgradable + +Your Hardware Enablement Stack (HWE) is supported until April 2025. +Last login: Thu Jan 20 14:21:48 2022 from my.ip.address +alex@ubuntu-20-LTS:~$ su - +Password: +root@ubuntu-20-LTS:~# ls -la +total 28 +drwx------ 3 root root 4096 Oct 10 2020 . +drwxr-xr-x 20 root root 4096 Oct 10 2020 .. +-rw------- 1 root root 175 Jan 20 14:23 .bash_history +-rw-r--r-- 1 root root 3106 Dec 6 2019 .bashrc +drwx------ 2 root root 4096 Apr 23 2020 .cache +-rw-r--r-- 1 root root 161 Dec 6 2019 .profile +-rwxr-xr-x 1 root root 174 Oct 10 2020 update-os.sh +root@ubuntu-20-LTS:~# cat update-os.sh +#!/bin/bash +rm -rf /var/lib/dpkg/lock-frontend +rm -rf /var/lib/dpkg/lock +apt-get update +apt-get upgrade -y +apt-get dist-upgrade -y +apt-get autoremove -y +apt-get autoclean -y +root@ubuntu-20-LTS:~# ./update-os.sh +Hit:1 http://au.archive.ubuntu.com/ubuntu focal InRelease +Hit:2 http://au.archive.ubuntu.com/ubuntu focal-updates InRelease +Hit:3 http://au.archive.ubuntu.com/ubuntu focal-backports InRelease +Hit:4 http://security.ubuntu.com/ubuntu focal-security InRelease +Reading package lists... 96% +... +Sourcing file `/etc/default/grub' +Sourcing file `/etc/default/grub.d/init-select.cfg' +Generating grub configuration file ... +Found linux image: /boot/vmlinuz-5.13.0-27-generic +Found initrd image: /boot/initrd.img-5.13.0-27-generic +Found linux image: /boot/vmlinuz-5.4.0-48-generic +Found initrd image: /boot/initrd.img-5.4.0-48-generic +Found memtest86+ image: /boot/memtest86+.elf +Found memtest86+ image: /boot/memtest86+.bin +done +Removing linux-modules-5.4.0-26-generic (5.4.0-26.30) ... +Processing triggers for libc-bin (2.31-0ubuntu9.2) ... +Reading package lists... Done +Building dependency tree +Reading state information... Done +root@ubuntu-20-LTS:~# +``` + +Reboot your system after running this process before continuing with Step 3. +```text +reboot +``` + +### Step 3: Determine what your OS is based on +Determine what your OS is based on. To do this, run the following command: +```text +lsb_release -a +``` +**Example:** +```text +alex@ubuntu-system:~$ lsb_release -a +No LSB modules are available. +Distributor ID: Ubuntu +Description: Ubuntu 22.04 LTS +Release: 22.04 +Codename: jammy +``` + +### Step 4: Pick the correct instructions to use +If required, review the table below based on your 'lsb_release' information to pick the appropriate instructions to use: + +| Release & Codename | Instructions to use | +|--------------------|---------------------| +| Linux Mint 19.x | This platform is End-of-Life (EOL) and no longer supported. You must upgrade to Linux Mint 21.x | +| Linux Mint 20.x | Use [Ubuntu 20.04](#distribution-ubuntu-2004) instructions below | +| Linux Mint 21.x | Use [Ubuntu 22.04](#distribution-ubuntu-2204) instructions below | +| Linux Mint Debian Edition (LMDE) 5 / Elsie | Use [Debian 11](#distribution-debian-11) instructions below | +| Linux Mint Debian Edition (LMDE) 6 / Faye | Use [Debian 12](#distribution-debian-12) instructions below | +| Debian 9 | This platform is End-of-Life (EOL) and no longer supported. You must upgrade to Debian 12 | +| Debian 10 | You must build from source or upgrade your Operating System to Debian 12 | +| Debian 11 | Use [Debian 11](#distribution-debian-11) instructions below | +| Debian 12 | Use [Debian 12](#distribution-debian-12) instructions below | +| Debian Sid | Refer to https://packages.debian.org/sid/onedrive for assistance | +| Raspbian GNU/Linux 10 | You must build from source or upgrade your Operating System to Raspbian GNU/Linux 12 | +| Raspbian GNU/Linux 11 | Use [Debian 11](#distribution-debian-11) instructions below | +| Raspbian GNU/Linux 12 | Use [Debian 12](#distribution-debian-12) instructions below | +| Ubuntu 18.04 / Bionic | This platform is End-of-Life (EOL) and no longer supported. You must upgrade to Ubuntu 22.04 | +| Ubuntu 20.04 / Focal | Use [Ubuntu 20.04](#distribution-ubuntu-2004) instructions below | +| Ubuntu 21.04 / Hirsute | Use [Ubuntu 21.04](#distribution-ubuntu-2104) instructions below | +| Ubuntu 21.10 / Impish | Use [Ubuntu 21.10](#distribution-ubuntu-2110) instructions below | +| Ubuntu 22.04 / Jammy | Use [Ubuntu 22.04](#distribution-ubuntu-2204) instructions below | +| Ubuntu 22.10 / Kinetic | Use [Ubuntu 22.10](#distribution-ubuntu-2210) instructions below | +| Ubuntu 23.04 / Lunar | Use [Ubuntu 23.04](#distribution-ubuntu-2304) instructions below | +| Ubuntu 23.10 / Mantic | Use [Ubuntu 23.10](#distribution-ubuntu-2310) instructions below | + +**Note:** If your Linux distribution and release is not in the table above, you have 2 options: + +1. Compile the application from source. Refer to install.md (Compilation & Installation) for assistance. +2. Raise a support case with your Linux Distribution to provide you with an applicable package you can use. + +## Distribution Package Install Instructions + +### Distribution: Debian 11 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|✔|✔|✔|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_11/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_11/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. + +### Distribution: Debian 12 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|✔|✔|✔|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_12/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/Debian_12/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. + +### Distribution: Ubuntu 20.04 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|❌|✔|✔|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_20.04/Release.key | sudo apt-key add - +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo 'deb https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_20.04/ ./' | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. + +### Distribution: Ubuntu 21.04 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|❌|✔|✔|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. + +### Distribution: Ubuntu 21.10 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|❌|✔|✔|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.10/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_21.10/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. + +### Distribution: Ubuntu 22.04 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|❌|✔|✔|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. + +### Distribution: Ubuntu 22.10 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|❌|✔|✔|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.10/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_22.10/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. + +### Distribution: Ubuntu 23.04 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|❌|✔|✔|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.04/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.04/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. + +### Distribution: Ubuntu 23.10 +The packages support the following platform architectures: +|  i686  | x86_64 | ARMHF | AARCH64 | +|:----:|:------:|:-----:|:-------:| +|❌|✔|❌|✔| + +#### Step 1: Add the OpenSuSE Build Service repository release key +Add the OpenSuSE Build Service repository release key using the following command: +```text +wget -qO - https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.10/Release.key | gpg --dearmor | sudo tee /usr/share/keyrings/obs-onedrive.gpg > /dev/null +``` + +#### Step 2: Add the OpenSuSE Build Service repository +Add the OpenSuSE Build Service repository using the following command: +```text +echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/obs-onedrive.gpg] https://download.opensuse.org/repositories/home:/npreining:/debian-ubuntu-onedrive/xUbuntu_23.10/ ./" | sudo tee /etc/apt/sources.list.d/onedrive.list +``` + +#### Step 3: Update your apt package cache +Run: `sudo apt-get update` + +#### Step 4: Install 'onedrive' +Run: `sudo apt install --no-install-recommends --no-install-suggests onedrive` + +#### Step 5: Read 'Known Issues' with these packages +Read and understand the [known issues](#known-issues-with-installing-from-the-above-packages) with these packages below, taking any action that is needed. + + +## Known Issues with Installing from the above packages + +### 1. The client may segfault | core-dump when exiting +When the client is run in `--monitor` mode manually, or when using the systemd service, the client may segfault on exit. + +This issue is caused by the way the 'onedrive' packages are built using the distribution LDC package & the default distribution compiler options which is the root cause for this issue. Refer to: https://bugs.launchpad.net/ubuntu/+source/ldc/+bug/1895969 + +**Additional references:** +* https://github.com/abraunegg/onedrive/issues/1053 +* https://github.com/abraunegg/onedrive/issues/1609 + +**Resolution Options:** +* Uninstall the package and build client from source diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 000000000..880de9522 --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,943 @@ +# Using the OneDrive Client for Linux +## Application Version +Before reading this document, please ensure you are running application version [![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) or greater. Use `onedrive --version` to determine what application version you are using and upgrade your client if required. + +## Table of Contents + +- [Important Notes](#important-notes) + - [Upgrading from the 'skilion' Client](#upgrading-from-the-sklion-client) + - [Guidelines for Naming Local Files and Folders in the Synchronisation Directory](#guidelines-for-naming-local-files-and-folders-in-the-synchronisation-directory) + - [Compatibility with curl](#compatibility-with-curl) +- [First Steps](#first-steps) + - [Authorise the Application with Your Microsoft OneDrive Account](#authorise-the-application-with-your-microsoft-onedrive-account) + - [Display Your Applicable Runtime Configuration](#display-your-applicable-runtime-configuration) + - [Understanding OneDrive Client for Linux Operational Modes](#understanding-onedrive-client-for-linux-operational-modes) + - [Standalone Synchronisation Operational Mode (Standalone Mode)](#standalone-synchronisation-operational-mode-standalone-mode) + - [Ongoing Synchronisation Operational Mode (Monitor Mode)](#ongoing-synchronisation-operational-mode-monitor-mode) + - [Increasing application logging level](#increasing-application-logging-level) + - [Using 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive](#using-client-side-filtering-rules-to-determine-what-should-be-synced-with-microsoft-onedrive) + - [Testing your configuration](#testing-your-configuration) + - [Performing a sync with Microsoft OneDrive](#performing-a-sync-with-microsoft-onedrive) + - [Performing a single directory synchronisation with Microsoft OneDrive](#performing-a-single-directory-synchronisation-with-microsoft-onedrive) + - [Performing a 'one-way' download synchronisation with Microsoft OneDrive](#performing-a-one-way-download-synchronisation-with-microsoft-onedrive) + - [Performing a 'one-way' upload synchronisation with Microsoft OneDrive](#performing-a-one-way-upload-synchronisation-with-microsoft-onedrive) + - [Performing a selective synchronisation via 'sync_list' file](#performing-a-selective-synchronisation-via-sync_list-file) + - [Performing a --resync](#performing-a---resync) + - [Performing a --force-sync without a --resync or changing your configuration](#performing-a---force-sync-without-a---resync-or-changing-your-configuration) + - [Enabling the Client Activity Log](#enabling-the-client-activity-log) + - [Client Activity Log Example:](#client-activity-log-example) + - [Client Activity Log Differences](#client-activity-log-differences) + - [GUI Notifications](#gui-notifications) + - [Handling a Microsoft OneDrive Account Password Change](#handling-a-microsoft-onedrive-account-password-change) + - [Determining the synchronisation result](#determining-the-synchronisation-result) +- [Frequently Asked Configuration Questions](#frequently-asked-configuration-questions) + - [How to change the default configuration of the client?](#how-to-change-the-default-configuration-of-the-client) + - [How to change where my data from Microsoft OneDrive is stored?](#how-to-change-where-my-data-from-microsoft-onedrive-is-stored) + - [How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive?](#how-to-change-what-file-and-directory-permissions-are-assigned-to-data-that-is-downloaded-from-microsoft-onedrive) + - [How are uploads and downloads managed?](#how-are-uploads-and-downloads-managed) + - [How to only sync a specific directory?](#how-to-only-sync-a-specific-directory) + - [How to 'skip' files from syncing?](#how-to-skip-files-from-syncing) + - [How to 'skip' directories from syncing?](#how-to-skip-directories-from-syncing) + - [How to 'skip' .files and .folders from syncing?](#how-to-skip-files-and-folders-from-syncing) + - [How to 'skip' files larger than a certain size from syncing?](#how-to-skip-files-larger-than-a-certain-size-from-syncing) + - [How to 'rate limit' the application to control bandwidth consumed for upload & download operations?](#how-to-rate-limit-the-application-to-control-bandwidth-consumed-for-upload--download-operations) + - [How can I prevent my local disk from filling up?](#how-can-i-prevent-my-local-disk-from-filling-up) + - [How does the client handle symbolic links?](#how-does-the-client-handle-symbolic-links) + - [How to synchronise shared folders (OneDrive Personal)?](#how-to-synchronise-shared-folders-onedrive-personal) + - [How to synchronise shared folders (OneDrive Business or Office 365)?](#how-to-synchronise-shared-folders-onedrive-business-or-office-365) + - [How to synchronise SharePoint / Office 365 Shared Libraries?](#how-to-synchronise-sharepoint--office-365-shared-libraries) + - [How to Create a Shareable Link?](#how-to-create-a-shareable-link) + - [How to Synchronise Both Personal and Business Accounts at once?](#how-to-synchronise-both-personal-and-business-accounts-at-once) + - [How to Synchronise Multiple SharePoint Libraries simultaneously?](#how-to-synchronise-multiple-sharepoint-libraries-simultaneously) + - [How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period?](#how-to-receive-real-time-changes-from-microsoft-onedrive-service-instead-of-waiting-for-the-next-sync-period) + - [How to initiate the client as a background service?](#how-to-initiate-the-client-as-a-background-service) + - [OneDrive service running as root user via init.d](#onedrive-service-running-as-root-user-via-initd) + - [OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-root-user-via-systemd-arch-ubuntu-debian-opensuse-fedora) + - [OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux)](#onedrive-service-running-as-root-user-via-systemd-red-hat-enterprise-linux-centos-linux) + - [OneDrive service running as a non-root user via systemd (All Linux Distributions)](#onedrive-service-running-as-a-non-root-user-via-systemd-all-linux-distributions) + - [OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora)](#onedrive-service-running-as-a-non-root-user-via-systemd-with-notifications-enabled-arch-ubuntu-debian-opensuse-fedora) + - [OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void)](#onedrive-service-running-as-a-non-root-user-via-runit-antix-devuan-artix-void) + - [How to start a user systemd service at boot without user login?](#how-to-start-a-user-systemd-service-at-boot-without-user-login) + +## Important Notes +### Upgrading from the 'skilion' Client +The 'skilion' version has a significant number of issues in how it manages the local sync state. When upgrading from the 'skilion' client to this client, it's recommended to stop any service or OneDrive process that may be running. Once all OneDrive services are stopped, make sure to remove any old client binaries from your system. + +Furthermore, if you're using a 'config' file within your configuration directory (`~/.config/onedrive/`), please ensure that you update the `skip_file = ` option as shown below: + +**Invalid 'skilion' configuration:** +```text +skip_file = ".*|~*" +``` +**Minimum valid configuration:** +```text +skip_file = "~*" +``` +**Default valid configuration:** +```text +skip_file = "~*|.~*|*.tmp|*.swp|*.partial" +``` + +Avoid using a 'skip_file' entry of `.*` as it may prevent the correct detection of local changes to process. The configuration values for 'skip_file' will be checked for validity, and if there is an issue, the following error message will be displayed: +```text +ERROR: Invalid skip_file entry '.*' detected +``` + +### Guidelines for Naming Local Files and Folders in the Synchronisation Directory +When naming your files and folders in the synchronisation directory, it is important to follow the [Windows naming conventions](https://docs.microsoft.com/windows/win32/fileio/naming-a-file) for your files and folders. + +Moreover, Microsoft OneDrive does not adhere to POSIX standards. As a result, if you have two files with identical names differing only in capitalisation, the OneDrive Client for Linux will try to manage this. However, in cases of naming conflicts, the conflicting file or folder will not synchronise. This is a deliberate design choice and will not be modified. To avoid such issues, you should rename any conflicting local files or folders. + +### Compatibility with curl +If your system uses curl < 7.47.0, curl will default to HTTP/1.1 for HTTPS operations, and the client will follow suit, using HTTP/1.1. + +For systems running curl >= 7.47.0 and < 7.62.0, curl will prefer HTTP/2 for HTTPS, but it will still use HTTP/1.1 as the default for these operations. The client will employ HTTP/1.1 for HTTPS operations as well. + +However, if your system employs curl >= 7.62.0, curl will, by default, prioritise HTTP/2 over HTTP/1.1. In this case, the client will utilise HTTP/2 for most HTTPS operations and stick with HTTP/1.1 for others. Please note that this distinction is governed by the OneDrive platform, not our client. + +If you explicitly want to use HTTP/1.1, you can do so by using the `--force-http-11` flag or setting the configuration option `force_http_11 = "true"`. This will compel the application to exclusively use HTTP/1.1. Otherwise, all client operations will align with the curl default settings for your distribution. + +## First Steps +### Authorise the Application with Your Microsoft OneDrive Account +Once you've installed the application, you'll need to authorise it using your Microsoft OneDrive Account. This can be done by simply running the application without any additional command switches. + +Please be aware that some companies may require you to explicitly add this app to the [Microsoft MyApps portal](https://myapps.microsoft.com/). To add an approved app to your apps, click on the ellipsis in the top-right corner and select "Request new apps." On the next page, you can add this app. If it's not listed, you should make a request through your IT department. + +When you run the application for the first time, you'll be prompted to open a specific URL using your web browser, where you'll need to log in to your Microsoft Account and grant the application permission to access your files. After granting permission to the application, you'll be redirected to a blank page. Simply copy the URI from the blank page and paste it into the application. + +**Example:** +```text +[user@hostname ~]$ onedrive +Authorise this app by visiting: + +https://login.microsoftonline.com/common/oauth2/v2.0/authorise?client_id=22c49a0d-d21c-4792-aed1-8f163c982546&scope=Files.ReadWrite%20Files.ReadWrite.all%20Sites.ReadWrite.All%20offline_access&response_type=code&redirect_uri=https://login.microsoftonline.com/common/oauth2/nativeclient + +Enter the response URI from your browser: https://login.microsoftonline.com/common/oauth2/nativeclient?code= + +The application has been successfully authorised, but no additional command switches were provided. + +Please use 'onedrive --help' for further assistance on how to run this application. +``` + +**Please Note:** Without additional input or configuration, the OneDrive Client for Linux will automatically adhere to default application settings during synchronisation processes with Microsoft OneDrive. + + +### Display Your Applicable Runtime Configuration +To verify the configuration that the application will use, use the following command: +```text +onedrive --display-config +``` +This command will display all the relevant runtime interpretations of the options and configurations you are using. An example output is as follows: +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +onedrive version = vX.Y.Z-A-bcdefghi +Config path = /home/user/.config/onedrive +Config file found in config path = true +Config option 'drive_id' = +Config option 'sync_dir' = ~/OneDrive +... +Config option 'webhook_enabled' = false +``` + +**Important Reminder:** When using multiple OneDrive accounts, it's essential to always use the `--confdir` command followed by the appropriate configuration directory. This ensures that the specific configuration you intend to view is correctly displayed. + +### Understanding OneDrive Client for Linux Operational Modes +There are two modes of operation when using the client: +1. Standalone sync mode that performs a single sync action against Microsoft OneDrive. +2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive. + +**Important Information:** The default setting for the OneDrive Client on Linux will sync all data from your Microsoft OneDrive account to your local device. To avoid this and select specific items for synchronisation, you should explore setting up 'Client Side Filtering' rules. This will help you manage and specify what exactly gets synced with your Microsoft OneDrive account. + +#### Standalone Synchronisation Operational Mode (Standalone Mode) +This method of use can be employed by issuing the following option to the client: +```text +onedrive --sync +``` +For simplicity, this can be shortened to the following: +```text +onedrive -s +``` + +#### Ongoing Synchronisation Operational Mode (Monitor Mode) +This method of use can be utilised by issuing the following option to the client: +```text +onedrive --monitor +``` +For simplicity, this can be shortened to the following: +```text +onedrive -m +``` +**Note:** This method of use is typically employed when enabling a systemd service to run the application in the background. + +Two common errors can occur when using monitor mode: +* Initialisation failure +* Unable to add a new inotify watch + +Both of these errors are local environment issues, where the following system variables need to be increased as the current system values are potentially too low: +* `fs.file-max` +* `fs.inotify.max_user_watches` + +To determine what the existing values are on your system, use the following commands: +```text +sysctl fs.file-max +sysctl fs.inotify.max_user_watches +``` +Alternatively, when running the client with increased verbosity (see below), the client will display what the current configured system maximum values are: +```text +... +All application operations will be performed in: /home/user/OneDrive +OneDrive synchronisation interval (seconds): 300 +Maximum allowed open files: 393370 <-- This is the current operating system fs.file-max value +Maximum allowed inotify watches: 29374 <-- This is the current operating system fs.inotify.max_user_watches value +Initialising filesystem inotify monitoring ... +... +``` +To determine what value to change to, you need to count all the files and folders in your configured 'sync_dir': +```text +cd /path/to/your/sync/dir +ls -laR | wc -l +``` + +To make a change to these variables using your file and folder count, use the following process: +```text +sudo sysctl fs.file-max= +sudo sysctl fs.inotify.max_user_watches= +``` +Once these values are changed, you will need to restart your client so that the new values are detected and used. + +To make these changes permanent on your system, refer to your OS reference documentation. + +### Increasing application logging level +When running a sync (`--sync`) or using monitor mode (`--monitor`), it may be desirable to see additional information regarding the progress and operation of the client. For example, for a `--sync` command, this would be: +```text +onedrive --sync --verbose +``` +Furthermore, for simplicity, this can be simplified to the following: +``` +onedrive -s -v +``` +Adding `--verbose` twice will enable debug logging output. This is generally required when raising a bug report or needing to understand a problem. + +### Using 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive +Client Side Filtering in the context of the OneDrive Client for Linux refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this: + +* **skip_dir:** This option allows the user to specify directories that should not be synchronised with OneDrive. It's particularly useful for omitting large or irrelevant directories from the sync process. + +* **skip_dotfiles:** Dotfiles, usually configuration files or scripts, can be excluded from the sync. This is useful for users who prefer to keep these files local. + +* **skip_file:** Specific files can be excluded from synchronisation using this option. It provides flexibility in selecting which files are essential for cloud storage. + +* **skip_symlinks:** Symlinks often point to files outside the OneDrive directory or to locations that are not relevant for cloud storage. This option prevents them from being included in the sync. + +Additionally, the OneDrive Client for Linux allows the implementation of Client Side Filtering rules through a 'sync_list' file. This file explicitly states which directories or files should be included in the synchronisation. By default, any item not listed in the 'sync_list' file is excluded. This method offers a more granular approach to synchronisation, ensuring that only the necessary data is transferred to and from Microsoft OneDrive. + +These configurable options and the 'sync_list' file provide users with the flexibility to tailor the synchronisation process to their specific needs, conserving bandwidth and storage space while ensuring that important files are always backed up and accessible. + +**Note:** After changing any Client Side Filtering rule, you must perform a full re-synchronisation. + +### Testing your configuration +You can test your configuration by utilising the `--dry-run` CLI option. No files will be downloaded, uploaded, or removed; however, the application will display what 'would' have occurred. For example: +```text +onedrive --sync --verbose --dry-run +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Using 'user' Config Dir: /home/user/.config/onedrive +DRY-RUN Configured. Output below shows what 'would' have occurred. +DRY-RUN: Copying items.sqlite3 to items-dryrun.sqlite3 to use for dry run operations +DRY RUN: Not creating backup config file as --dry-run has been used +DRY RUN: Not updating hash files as --dry-run has been used +Checking Application Version ... +Attempting to initialise the OneDrive API ... +Configuring Global Azure AD Endpoints +The OneDrive API was initialised successfully +Opening the item database ... +Sync Engine Initialised with new Onedrive API instance +Application version: vX.Y.Z-A-bcdefghi +Account Type: +Default Drive ID: +Default Root ID: +Remaining Free Space: 1058488129 KB +All application operations will be performed in: /home/user/OneDrive +Fetching items from the OneDrive API for Drive ID: .. +... +Performing a database consistency and integrity check on locally stored data ... +Processing DB entries for this Drive ID: +Processing ~/OneDrive +The directory has not changed +... +Scanning local filesystem '~/OneDrive' for new data to upload ... +... +Performing a final true-up scan of online data from Microsoft OneDrive +Fetching items from the OneDrive API for Drive ID: .. + +Sync with Microsoft OneDrive is complete +``` + +### Performing a sync with Microsoft OneDrive +By default, all files are downloaded in `~/OneDrive`. This download location is controlled by the 'sync_dir' config option. + +After authorising the application, a sync of your data can be performed by running: +```text +onedrive --sync +``` +This will synchronise files from your Microsoft OneDrive account to your `~/OneDrive` local directory or to your specified 'sync_dir' location. + +If you prefer to use your local files as stored in `~/OneDrive` as your 'source of truth,' use the following sync command: +```text +onedrive --sync --local-first +``` + +### Performing a single directory synchronisation with Microsoft OneDrive +In some cases, it may be desirable to synchronise a single directory under ~/OneDrive without having to change your client configuration. To do this, use the following command: +```text +onedrive --sync --single-directory '' +``` + +**Example:** If the full path is `~/OneDrive/mydir`, the command would be `onedrive --sync --single-directory 'mydir'` + +### Performing a 'one-way' download synchronisation with Microsoft OneDrive +In some cases, it may be desirable to 'download only' from Microsoft OneDrive. To do this, use the following command: +```text +onedrive --sync --download-only +``` +This will download all the content from Microsoft OneDrive to your `~/OneDrive` location. Any files that are deleted online remain locally and will not be removed. + +However, in some circumstances, it may be desirable to clean up local files that have been removed online. To do this, use the following command: + +```text +onedrive --sync --download-only --cleanup-local-files +``` + +### Performing a 'one-way' upload synchronisation with Microsoft OneDrive +In certain scenarios, you might need to perform an 'upload only' operation to Microsoft OneDrive. This means that you'll be uploading data to OneDrive, but not synchronising any changes or additions made elsewhere. Use this command to initiate an upload-only synchronisation: + +```text +onedrive --sync --upload-only +``` + +**Important Points:** +- The 'upload only' mode operates independently of OneDrive's online content. It doesn't check or sync with what's already stored on OneDrive. It only uploads data from the local client. +- If a local file or folder that was previously synchronised with Microsoft OneDrive is now missing locally, it will be deleted from OneDrive during this operation. + +To ensure that all data on Microsoft OneDrive remains intact (e.g., preventing deletion of items on OneDrive if they're deleted locally), use this command instead: + +```text +onedrive --sync --upload-only --no-remote-delete +``` + +**Understanding both Commands:** +- `--upload-only`: This command will only upload local changes to OneDrive. These changes can include additions, modifications, moves, and deletions of files and folders. +- `--no-remote-delete`: Adding this command prevents the deletion of any items on OneDrive, even if they're deleted locally. This creates a one-way archive on OneDrive where files are only added and never removed. + +### Performing a selective synchronisation via 'sync_list' file +Selective synchronisation allows you to sync only specific files and directories. +To enable selective synchronisation, create a file named `sync_list` in your application configuration directory (default is `~/.config/onedrive`). + +Important points to understand before using 'sync_list'. +* 'sync_list' excludes _everything_ by default on OneDrive. +* 'sync_list' follows an _"exclude overrides include"_ rule, and requires **explicit inclusion**. +* Order exclusions before inclusions, so that anything _specifically included_ is included. +* How and where you place your `/` matters for excludes and includes in subdirectories. + +Each line of the file represents a relative path from your `sync_dir`. All files and directories not matching any line of the file will be skipped during all operations. + +Additionally, the use of `/` is critically important to determine how a rule is interpreted. It is very similar to `**` wildcards, for those that are familiar with globbing patterns. +Here is an example of `sync_list`: +```text +# sync_list supports comments +# +# The ordering of entries is highly recommended - exclusions before inclusions +# +# Exclude temp folder(s) or file(s) under Documents folder(s), anywhere in OneDrive +!Documents/temp* +# +# Exclude secret data folder in root directory only +!/Secret_data/* +# +# Include everything else in root directory +/* +# +# Include my Backup folder(s) or file(s) anywhere on OneDrive +Backup +# +# Include my Backup folder in root +/Backup/ +# +# Include Documents folder(s) anywhere in OneDrive +Documents/ +# +# Include all PDF files in Documents folder(s), anywhere in OneDrive +Documents/*.pdf +# +# Include this single document in Documents folder(s), anywhere in OneDrive +Documents/latest_report.docx +# +# Include all Work/Project directories or files, inside 'Work' folder(s), anywhere in OneDrive +Work/Project* +# +# Include all "notes.txt" files, anywhere in OneDrive +notes.txt +# +# Include /Blender in the ~OneDrive root but not if elsewhere in OneDrive +/Blender +# +# Include these directories(or files) in 'Pictures' folder(s), that have a space in their name +Pictures/Camera Roll +Pictures/Saved Pictures +# +# Include these names if they match any file or folder +Cinema Soc +Codes +Textbooks +Year 2 +``` +The following are supported for pattern matching and exclusion rules: +* Use the `*` to wildcard select any characters to match for the item to be included +* Use either `!` or `-` characters at the start of the line to exclude an otherwise included item + +**Note:** When enabling the use of 'sync_list,' utilise the `--display-config` option to validate that your configuration will be used by the application, and test your configuration by adding `--dry-run` to ensure the client will operate as per your requirement. + +**Note:** After changing the sync_list, you must perform a full re-synchronisation by adding `--resync` to your existing command line - for example: `onedrive --sync --resync` + +**Note:** In some circumstances, it may be required to sync all the individual files within the 'sync_dir', but due to frequent name change / addition / deletion of these files, it is not desirable to constantly change the 'sync_list' file to include / exclude these files and force a resync. To assist with this, enable the following in your configuration file: +```text +sync_root_files = "true" +``` +This will tell the application to sync any file that it finds in your 'sync_dir' root by default, negating the need to constantly update your 'sync_list' file. + +### Performing a --resync +If you alter any of the subsequent configuration items, you will be required to execute a `--resync` to make sure your client is syncing your data with the updated configuration: +* drive_id +* sync_dir +* skip_file +* skip_dir +* skip_dotfiles +* skip_symlinks +* sync_business_shared_items +* Creating, Modifying or Deleting the 'sync_list' file + +Additionally, you might opt for a `--resync` if you think it's necessary to ensure your data remains in sync. If you're using this switch simply because you're unsure of the sync status, you can check the actual sync status using `--display-sync-status`. + +When you use `--resync`, you'll encounter the following warning and advice: +```text +Using --resync will delete your local 'onedrive' client state, so there won't be a record of your current 'sync status.' +This may potentially overwrite local versions of files with older versions downloaded from OneDrive, leading to local data loss. +If in doubt, back up your local data before using --resync. + +Are you sure you want to proceed with --resync? [Y/N] +``` + +To proceed with `--resync`, you must type 'y' or 'Y' to allow the application to continue. + +**Note:** It's highly recommended to use `--resync` only if the application prompts you to do so. Don't blindly set the application to start with `--resync` as the default option. + +**Note:** In certain automated environments (assuming you know what you're doing due to automation), to avoid the 'proceed with acknowledgement' requirement, add `--resync-auth` to automatically acknowledge the prompt. + +### Performing a --force-sync without a --resync or changing your configuration +In some cases and situations, you may have configured the application to skip certain files and folders using 'skip_file' and 'skip_dir' configuration. You then may have a requirement to actually sync one of these items, but do not wish to modify your configuration, nor perform an entire `--resync` twice. + +The `--force-sync` option allows you to sync a specific directory, ignoring your 'skip_file' and 'skip_dir' configuration and negating the requirement to perform a `--resync`. + +To use this option, you must run the application manually in the following manner: +```text +onedrive --sync --single-directory '' --force-sync +``` + +When using `--force-sync`, you'll encounter the following warning and advice: +```text +WARNING: Overriding application configuration to use application defaults for skip_dir and skip_file due to --sync --single-directory --force-sync being used + +Using --force-sync will reconfigure the application to use defaults. This may have unknown future impacts. +By proceeding with this option, you accept any impacts, including potential data loss resulting from using --force-sync. + +Are you sure you want to proceed with --force-sync [Y/N] +``` + +To proceed with `--force-sync`, you must type 'y' or 'Y' to allow the application to continue. + +### Enabling the Client Activity Log +When running onedrive, all actions can be logged to a separate log file. This can be enabled by using the `--enable-logging` flag. By default, log files will be written to `/var/log/onedrive/` and will be in the format of `%username%.onedrive.log`, where `%username%` represents the user who ran the client to allow easy sorting of user to client activity log. + +**Note:** You will need to ensure the existence of this directory and that your user has the applicable permissions to write to this directory; otherwise, the following error message will be printed: +```text +ERROR: Unable to access /var/log/onedrive +ERROR: Please manually create '/var/log/onedrive' and set appropriate permissions to allow write access +ERROR: The requested client activity log will instead be located in your user's home directory +``` + +On many systems, this can be achieved by performing the following: +```text +sudo mkdir /var/log/onedrive +sudo chown root:users /var/log/onedrive +sudo chmod 0775 /var/log/onedrive +``` + +Additionally, you need to ensure that your user account is part of the 'users' group: +``` +cat /etc/group | grep users +``` + +If your user is not part of this group, then you need to add your user to this group: +``` +sudo usermod -a -G users +``` + +If you need to make a group modification, you will need to 'logout' of all sessions / SSH sessions to log in again to have the new group access applied. + +If the client is unable to write the client activity log, the following error message will be printed: +```text +ERROR: Unable to write the activity log to /var/log/onedrive/%username%.onedrive.log +ERROR: Please set appropriate permissions to allow write access to the logging directory for your user account +ERROR: The requested client activity log will instead be located in your user's home directory +``` + +If you receive this error message, you will need to diagnose why your system cannot write to the specified file location. + +#### Client Activity Log Example: +An example of a client activity log for the command `onedrive --sync --enable-logging` is below: +```text +2023-Sep-27 08:16:00.1128806 Configuring Global Azure AD Endpoints +2023-Sep-27 08:16:00.1160620 Sync Engine Initialised with new Onedrive API instance +2023-Sep-27 08:16:00.5227122 All application operations will be performed in: /home/user/OneDrive +2023-Sep-27 08:16:00.5227977 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:16:00.7780979 Processing changes and items received from Microsoft OneDrive ... +2023-Sep-27 08:16:00.7781548 Performing a database consistency and integrity check on locally stored data ... +2023-Sep-27 08:16:00.7785889 Scanning the local file system '~/OneDrive' for new data to upload ... +2023-Sep-27 08:16:00.7813710 Performing a final true-up scan of online data from Microsoft OneDrive +2023-Sep-27 08:16:00.7814668 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:16:01.0141776 Processing changes and items received from Microsoft OneDrive ... +2023-Sep-27 08:16:01.0142454 Sync with Microsoft OneDrive is complete +``` +An example of a client activity log for the command `onedrive --sync --verbose --enable-logging` is below: +```text +2023-Sep-27 08:20:05.4600464 Checking Application Version ... +2023-Sep-27 08:20:05.5235017 Attempting to initialise the OneDrive API ... +2023-Sep-27 08:20:05.5237207 Configuring Global Azure AD Endpoints +2023-Sep-27 08:20:05.5238087 The OneDrive API was initialised successfully +2023-Sep-27 08:20:05.5238536 Opening the item database ... +2023-Sep-27 08:20:05.5270612 Sync Engine Initialised with new Onedrive API instance +2023-Sep-27 08:20:05.9226535 Application version: vX.Y.Z-A-bcdefghi +2023-Sep-27 08:20:05.9227079 Account Type: +2023-Sep-27 08:20:05.9227360 Default Drive ID: +2023-Sep-27 08:20:05.9227550 Default Root ID: +2023-Sep-27 08:20:05.9227862 Remaining Free Space: +2023-Sep-27 08:20:05.9228296 All application operations will be performed in: /home/user/OneDrive +2023-Sep-27 08:20:05.9228989 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:20:06.2076569 Performing a database consistency and integrity check on locally stored data ... +2023-Sep-27 08:20:06.2077121 Processing DB entries for this Drive ID: +2023-Sep-27 08:20:06.2078408 Processing ~/OneDrive +2023-Sep-27 08:20:06.2078739 The directory has not changed +2023-Sep-27 08:20:06.2079783 Processing Attachments +2023-Sep-27 08:20:06.2080071 The directory has not changed +2023-Sep-27 08:20:06.2081585 Processing Attachments/file.docx +2023-Sep-27 08:20:06.2082079 The file has not changed +2023-Sep-27 08:20:06.2082760 Processing Documents +2023-Sep-27 08:20:06.2083225 The directory has not changed +2023-Sep-27 08:20:06.2084284 Processing Documents/file.log +2023-Sep-27 08:20:06.2084886 The file has not changed +2023-Sep-27 08:20:06.2085150 Scanning the local file system '~/OneDrive' for new data to upload ... +2023-Sep-27 08:20:06.2087133 Skipping item - excluded by sync_list config: ./random_25k_files +2023-Sep-27 08:20:06.2116235 Performing a final true-up scan of online data from Microsoft OneDrive +2023-Sep-27 08:20:06.2117190 Fetching items from the OneDrive API for Drive ID: +2023-Sep-27 08:20:06.5049743 Sync with Microsoft OneDrive is complete +``` + +#### Client Activity Log Differences +Despite application logging being enabled as early as possible, the following log entries will be missing from the client activity log when compared to console output: + +**No user configuration file:** +```text +No user or system config file found, using application defaults +Using 'user' configuration path for application state data: /home/user/.config/onedrive +Using the following path to store the runtime application log: /var/log/onedrive +``` +**User configuration file:** +```text +Reading configuration file: /home/user/.config/onedrive/config +Configuration file successfully loaded +Using 'user' configuration path for application state data: /home/user/.config/onedrive +Using the following path to store the runtime application log: /var/log/onedrive +``` + +### GUI Notifications +If notification support has been compiled in (refer to GUI Notification Support in install.md .. ADD LINK LATER), the following events will trigger a GUI notification within the display manager session: +* Aborting a sync if .nosync file is found +* Skipping a particular item due to an invalid name +* Skipping a particular item due to an invalid symbolic link +* Skipping a particular item due to an invalid UTF sequence +* Skipping a particular item due to an invalid character encoding sequence +* Cannot create remote directory +* Cannot upload file changes (free space issue, breaches maximum allowed size, breaches maximum OneDrive Account path length) +* Cannot delete remote file / folder +* Cannot move remote file / folder +* When a re-authentication is required +* When a new client version is available +* Files that fail to upload +* Files that fail to download + +### Handling a Microsoft OneDrive Account Password Change +If you change your Microsoft OneDrive Account Password, the client will no longer be authorised to sync, and will generate the following error upon next application run: +```text +AADSTS50173: The provided grant has expired due to it being revoked, a fresh auth token is needed. The user might have changed or reset their password. The grant was issued on '' and the TokensValidFrom date (before which tokens are not valid) for this user is ''. + +ERROR: You will need to issue a --reauth and re-authorise this client to obtain a fresh auth token. +``` + +To re-authorise the client, follow the steps below: +1. If running the client as a system service (init.d or systemd), stop the applicable system service +2. Run the command `onedrive --reauth`. This will clean up the previous authorisation, and will prompt you to re-authorise the client as per initial configuration. Please note, if you are using `--confdir` as part of your application runtime configuration, you must include this when telling the client to re-authenticate. +3. Restart the client if running as a system service or perform the standalone sync operation again + +The application will now sync with OneDrive with the new credentials. + +### Determining the synchronisation result +When the client has finished syncing without errors, the following will be displayed: +``` +Sync with Microsoft OneDrive is complete +``` + +If any items failed to sync, the following will be displayed: +``` +Sync with Microsoft OneDrive has completed, however there are items that failed to sync. +``` +A file list of failed upload or download items will also be listed to allow you to determine your next steps. + +In order to fix the upload or download failures, you may need to: +* Review the application output to determine what happened +* Re-try your command utilising a resync to ensure your system is correctly synced with your Microsoft OneDrive Account + +## Frequently Asked Configuration Questions + +### How to change the default configuration of the client? +Configuration is determined by three layers, and applied in the following order: +* Application default values +* Values that are set in the configuration file +* Values that are passed in via the command line at application runtime. These values will override any configuration file set value. + +The default application values provide a reasonable operational default, and additional configuration is entirely optional. + +If you want to change the application defaults, you can download a copy of the config file into your application configuration directory. Valid default directories for the config file are: +* `~/.config/onedrive` +* `/etc/onedrive` + +**Example:** To download a copy of the config file, use the following: +```text +mkdir -p ~/.config/onedrive +wget https://raw.githubusercontent.com/abraunegg/onedrive/master/config -O ~/.config/onedrive/config +``` + +For full configuration options and CLI switches, please refer to application-config-options.md + +### How to change where my data from Microsoft OneDrive is stored? +By default, the location where your Microsoft OneDrive data is stored, is within your Home Directory under a directory called 'OneDrive'. This replicates as close as possible where the Microsoft Windows OneDrive client stores data. + +To change this location, the application configuration option 'sync_dir' is used to specify a new local directory where your Microsoft OneDrive data should be stored. + +**Important Note:** If your `sync_dir` is pointing to a network mount point (a network share via NFS, Windows Network Share, Samba Network Share) these types of network mount points do not support 'inotify', thus tracking real-time changes via inotify of local files is not possible when using 'Monitor Mode'. Local filesystem changes will be replicated between the local filesystem and Microsoft OneDrive based on the `monitor_interval` value. This is not something (inotify support for NFS, Samba) that this client can fix. + +### How to change what file and directory permissions are assigned to data that is downloaded from Microsoft OneDrive? +The following are the application default permissions for any new directory or file that is created locally when downloaded from Microsoft OneDrive: +* Directories: 700 - This provides the following permissions: `drwx------` +* Files: 600 - This provides the following permissions: `-rw-------` + +These default permissions align to the security principal of 'least privilege' so that only you should have access to your data that you download from Microsoft OneDrive. + +To alter these default permissions, you can adjust the values of two configuration options as follows. You can also use the [Unix Permissions Calculator](https://chmod-calculator.com/) to help you determine the necessary new permissions. +```text +sync_dir_permissions = "700" +sync_file_permissions = "600" +``` + +**Important:** Please note that special permission bits such as setuid, setgid, and the sticky bit are not supported. Valid permission values range from `000` to `777` only. + +### How are uploads and downloads managed? +The system manages downloads and uploads using a multi-threaded approach. Specifically, the application utilises 16 threads for these processes. This thread count is preset and cannot be modified by users. This design ensures efficient handling of data transfers but does not allow for customisation of thread allocation. + +### How to only sync a specific directory? +There are two methods to achieve this: +* Employ the '--single-directory' option to only sync this specific path +* Employ 'sync_list' as part of your 'config' file to configure what files and directories to sync, and what should be excluded + +### How to 'skip' files from syncing? +There are two methods to achieve this: +* Employ 'skip_file' as part of your 'config' file to configure what files to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded + +### How to 'skip' directories from syncing? +There are three methods available to 'skip' a directory from the sync process: +* Employ 'skip_dir' as part of your 'config' file to configure what directories to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded +* Employ 'check_nosync' as part of your 'config' file and a '.nosync' empty file within the directory to exclude to skip that directory + +### How to 'skip' .files and .folders from syncing? +There are three methods to achieve this: +* Employ 'skip_file' or 'skip_dir' to configure what files or folders to skip +* Employ 'sync_list' to configure what files and directories to sync, and what should be excluded +* Employ 'skip_dotfiles' as part of your 'config' file to skip any dot file (for example: `.Trash-1000` or `.xdg-volume-info`) from syncing to OneDrive + +### How to 'skip' files larger than a certain size from syncing? +Use `skip_size = "value"` as part of your 'config' file where files larger than this size (in MB) will be skipped. + +### How to 'rate limit' the application to control bandwidth consumed for upload & download operations? +To optimise Internet bandwidth usage during upload and download processes, include the 'rate_limit' setting in your configuration file. This setting controls the bandwidth allocated to each thread. + +By default, 'rate_limit' is set to '0', indicating that the application will utilise the maximum available bandwidth across all threads. + +To check the current 'rate_limit' value, use the `--display-config` command. + +**Note:** Since downloads and uploads are processed through multiple threads, the 'rate_limit' value applies to each thread separately. For instance, setting 'rate_limit' to 1048576 (1MB) means that during data transfers, the total bandwidth consumption might reach around 16MB, not just the 1MB configured due to the number of threads being used. + +### How can I prevent my local disk from filling up? +By default, the application will reserve 50MB of disk space to prevent your filesystem from running out of disk space. + +This default value can be modified by adding the 'space_reservation' configuration option and the applicable value as part of your 'config' file. + +You can review the value being used when using `--display-config`. + +### How does the client handle symbolic links? +Microsoft OneDrive has no concept or understanding of symbolic links, and attempting to upload a symbolic link to Microsoft OneDrive generates a platform API error. All data (files and folders) that are uploaded to OneDrive must be whole files or actual directories. + +As such, there are only two methods to support symbolic links with this client: +1. Follow the Linux symbolic link and upload whatever the local symbolic link is pointing to to Microsoft OneDrive. This is the default behaviour. +2. Skip symbolic links by configuring the application to do so. When skipping, no data, no link, no reference is uploaded to OneDrive. + +Use 'skip_symlinks' as part of your 'config' file to configure the skipping of all symbolic links while syncing. + +### How to synchronise shared folders (OneDrive Personal)? +Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". + +### How to synchronise shared folders (OneDrive Business or Office 365)? +Folders shared with you can be synchronised by adding them to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the folder you want to synchronise, and then click on "Add to my OneDrive". + +Refer to [./business-shared-folders.md](business-shared-folders.md) for further details. + +### How to synchronise SharePoint / Office 365 Shared Libraries? +There are two methods to achieve this: +* SharePoint library can be directly added to your OneDrive online. To do that, open your OneDrive account online, go to the Shared files list, right-click on the SharePoint Library you want to synchronise, and then click on "Add to my OneDrive". +* Configure a separate application instance to only synchronise that specific SharePoint Library. Refer to [./sharepoint-libraries.md](sharepoint-libraries.md) for configuration assistance. + +### How to Create a Shareable Link? +In certain situations, you might want to generate a shareable file link and provide this link to other users for accessing a specific file. + +To accomplish this, employ the following command: +```text +onedrive --create-share-link +``` +**Note:** By default, this access permissions for the file link will be read-only. + +To make it a read-write link, execute the following command: +```text +onedrive --create-share-link --with-editing-perms +``` +**Note:** The order of the file path and option flag is crucial. + +### How to Synchronise Both Personal and Business Accounts at once? +You need to set up separate instances of the application configuration for each account. + +Refer to [./advanced-usage.md](advanced-usage.md) for guidance on configuration. + +### How to Synchronise Multiple SharePoint Libraries simultaneously? +For each SharePoint Library, configure a separate instance of the application configuration. + +Refer to [./advanced-usage.md](advanced-usage.md) for configuration instructions. + +### How to Receive Real-time Changes from Microsoft OneDrive Service, instead of waiting for the next sync period? +When operating in 'Monitor Mode,' it may be advantageous to receive real-time updates to online data. A 'webhook' is the method to achieve this, so that when in 'Monitor Mode,' the client subscribes to remote updates. + +Remote changes can then be promptly synchronised to your local file system, without waiting for the next synchronisation cycle. + +This is accomplished by: +* Using 'webhook_enabled' as part of your 'config' file to enable this feature +* Using 'webhook_public_url' as part of your 'config' file to configure the URL the webhook will use for subscription updates + +### How to initiate the client as a background service? +There are a few ways to employ onedrive as a service: +* via init.d +* via systemd +* via runit + +#### OneDrive service running as root user via init.d +```text +chkconfig onedrive on +service onedrive start +``` +To view the logs, execute: +```text +tail -f /var/log/onedrive/.onedrive.log +``` +To alter the 'user' under which the client operates (typically root by default), manually modify the init.d service file and adjust `daemon --user root onedrive_service.sh` to match the correct user. + +#### OneDrive service running as root user via systemd (Arch, Ubuntu, Debian, OpenSuSE, Fedora) +Initially, switch to the root user with `su - root`, then activate the systemd service: +```text +systemctl --user enable onedrive +systemctl --user start onedrive +``` +**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms - see below. + +**Note:** This will execute the 'onedrive' process with a UID/GID of '0', which means any files or folders created will be owned by 'root'. + +To monitor the service's status, use the following: +```text +systemctl --user status onedrive.service +``` + +To observe the systemd application logs, use: +```text +journalctl --user-unit=onedrive -f +``` + +**Note:** For systemd to function correctly, it requires the presence of XDG environment variables. If you encounter the following error while enabling the systemd service: +```text +Failed to connect to bus: No such file or directory +``` +The most likely cause is missing XDG environment variables. To resolve this, add the following lines to `.bashrc` or another file executed upon user login: +```text +export XDG_RUNTIME_DIR="/run/user/$UID" +export DBUS_SESSION_BUS_ADDRESS="unix:path=${XDG_RUNTIME_DIR}/bus" +``` + +To apply this change, you must log out of all user accounts where it has been made. + +**Note:** On certain systems (e.g., Raspbian / Ubuntu / Debian on Raspberry Pi), the XDG fix above may not persist after system reboots. An alternative to starting the client via systemd as root is as follows: +1. Create a symbolic link from `/home/root/.config/onedrive` to `/root/.config/onedrive/`. +2. Establish a systemd service using the '@' service file: `systemctl enable onedrive@root.service`. +3. Start the root@service: `systemctl start onedrive@root.service`. + +This ensures that the service correctly restarts upon system reboot. + +To examine the systemd application logs, run: +```text +journalctl --unit=onedrive@ -f +``` + +#### OneDrive service running as root user via systemd (Red Hat Enterprise Linux, CentOS Linux) +```text +systemctl enable onedrive +systemctl start onedrive +``` +**Note:** This will execute the 'onedrive' process with a UID/GID of '0', meaning any files or folders created will be owned by 'root'. + +To view the systemd application logs, execute: +```text +journalctl --unit=onedrive -f +``` + +#### OneDrive service running as a non-root user via systemd (All Linux Distributions) +In some instances, it is preferable to run the OneDrive client as a service without the 'root' user. Follow the instructions below to configure the service for your regular user login. + +1. As the user who will run the service, launch the application in standalone mode, authorize it for use, and verify that synchronization is functioning as expected: +```text +onedrive --sync --verbose +``` +2. After validating the application for your user, switch to the 'root' user, where is your username from step 1 above. +```text +systemctl enable onedrive@.service +systemctl start onedrive@.service +``` +3. To check the service's status for the user, use the following: +```text +systemctl status onedrive@.service +``` + +To observe the systemd application logs, use: +```text +journalctl --unit=onedrive@ -f +``` + +#### OneDrive service running as a non-root user via systemd (with notifications enabled) (Arch, Ubuntu, Debian, OpenSuSE, Fedora) +In some scenarios, you may want to receive GUI notifications when using the client as a non-root user. In this case, follow these steps: + +1. Log in via the graphical UI as the user you want to enable the service for. +2. Disable any `onedrive@` service files for your username, e.g.: +```text +sudo systemctl stop onedrive@alex.service +sudo systemctl disable onedrive@alex.service +``` +3. Enable the service as follows: +```text +systemctl --user enable onedrive +systemctl --user start onedrive +``` + +To check the service's status for the user, use the following: +```text +systemctl --user status onedrive.service +``` + +To view the systemd application logs, execute: +```text +journalctl --user-unit=onedrive -f +``` + +**Note:** The `systemctl --user` command is not applicable to Red Hat Enterprise Linux (RHEL) or CentOS Linux platforms. + +#### OneDrive service running as a non-root user via runit (antiX, Devuan, Artix, Void) + +1. Create the following folder if it doesn't already exist: `/etc/sv/runsvdir-` + + - where `` is the `USER` targeted for the service + - e.g., `# mkdir /etc/sv/runsvdir-nolan` + +2. Create a file called `run` under the previously created folder with executable permissions + + - `# touch /etc/sv/runsvdir-/run` + - `# chmod 0755 /etc/sv/runsvdir-/run` + +3. Edit the `run` file with the following contents (permissions needed): + + ```sh + #!/bin/sh + export USER="" + export HOME="/home/" + + groups="$(id -Gn "${USER}" | tr ' ' ':')" + svdir="${HOME}/service" + + exec chpst -u "${USER}:${groups}" runsvdir "${svdir}" + ``` + + - Ensure you replace `` with the `USER` set in step #1. + +4. Enable the previously created folder as a service + + - `# ln -fs /etc/sv/runsvdir- /var/service/` + +5. Create a subfolder in the `USER`'s `HOME` directory to store the services (or symlinks) + + - `$ mkdir ~/service` + +6. Create a subfolder specifically for OneDrive + + - `$ mkdir ~/service/onedrive/` + +7. Create a file called `run` under the previously created folder with executable permissions + + - `$ touch ~/service/onedrive/run` + - `$ chmod 0755 ~/service/onedrive/run` + +8. Append the following contents to the `run` file + + ```sh + #!/usr/bin/env sh + exec /usr/bin/onedrive --monitor + ``` + + - In some scenarios, the path to the `onedrive` binary may vary. You can obtain it by running `$ command -v onedrive`. + +9. Reboot to apply the changes + +10. Check the status of user-defined services + + - `$ sv status ~/service/*` + +For additional details, you can refer to Void's documentation on [Per-User Services](https://docs.voidlinux.org/config/services/user-services.html). + +### How to start a user systemd service at boot without user login? +In some situations, it may be necessary for the systemd service to start without requiring your 'user' to log in. + +To address this issue, you need to reconfigure your 'user' account so that the systemd services you've created launch without the need for you to log in to your system: +```text +loginctl enable-linger +``` \ No newline at end of file From f93e3a465d65cdf95319e0801edb79c0a0ffbb7b Mon Sep 17 00:00:00 2001 From: abraunegg Date: Tue, 9 Jan 2024 09:27:15 +1100 Subject: [PATCH 004/305] Delete readme & changelog due to POSIX issue * Delete readme & changelog due to POSIX issue --- CHANGELOG.md | 1041 -------------------------------------------------- README.md | 85 ----- 2 files changed, 1126 deletions(-) delete mode 100644 CHANGELOG.md delete mode 100644 README.md diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 8f7f357ad..000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,1041 +0,0 @@ -# Changelog -The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) -and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). - -## 2.5.0 - TBA - - -### Changed -* Renamed various documentation files to align with document content - - -## 2.4.25 - 2023-06-21 -### Fixed -* Fixed that the application was reporting as v2.2.24 when in fact it was v2.4.24 (release tagging issue) -* Fixed that the running version obsolete flag (due to above issue) was causing a false flag as being obsolete -* Fixed that zero-byte files do not have a hash as reported by the OneDrive API thus should not generate an error message - -### Updated -* Update to Debian Docker file to resolve Docker image Operating System reported vulnerabilities -* Update to Alpine Docker file to resolve Docker image Operating System reported vulnerabilities -* Update to Fedora Docker file to resolve Docker image Operating System reported vulnerabilities -* Updated documentation (various) - -## 2.4.24 - 2023-06-20 -### Fixed -* Fix for extra encoded quotation marks surrounding Docker environment variables -* Fix webhook subscription creation for SharePoint Libraries -* Fix that a HTTP 504 - Gateway Timeout causes local files to be deleted when using --download-only & --cleanup-local-files mode -* Fix that folders are renamed despite using --dry-run -* Fix deprecation warnings with dmd 2.103.0 -* Fix error that the application is unable to perform a database vacuum: out of memory when exiting - -### Removed -* Remove sha1 from being used by the client as this is being depreciated by Microsoft in July 2023 -* Complete the removal of crc32 elements - -### Added -* Added ONEDRIVE_SINGLE_DIRECTORY configuration capability to Docker -* Added --get-file-link shell completion -* Added configuration to allow HTTP session timeout(s) tuning via config (taken from v2.5.x) - -### Updated -* Update to Debian Docker file to resolve Docker image Operating System reported vulnerabilities -* Update to Alpine Docker file to resolve Docker image Operating System reported vulnerabilities -* Update to Fedora Docker file to resolve Docker image Operating System reported vulnerabilities -* Updated cgi.d to commit 680003a - last upstream change before requiring `core.d` dependency requirement -* Updated documentation (various) - -## 2.4.23 - 2023-01-06 -### Fixed -* Fixed RHEL7, RHEL8 and RHEL9 Makefile and SPEC file compatibility - -### Removed -* Disable systemd 'PrivateUsers' due to issues with systemd running processes when option is enabled, causes local file deletes on RHEL based systems - -### Updated -* Update --get-O365-drive-id error handling to display a more a more appropriate error message if the API cannot be found -* Update the GitHub version check to utilise the date a release was done, to allow 1 month grace period before generating obsolete version message -* Update Alpine Dockerfile to use Alpine 3.17 and Golang 1.19 -* Update handling of --source-directory and --destination-directory if one is empty or missing and if used with --synchronize or --monitor -* Updated documentation (various) - -## 2.4.22 - 2022-12-06 -### Fixed -* Fix application crash when local file is changed to a symbolic link with non-existent target -* Fix build error with dmd-2.101.0 -* Fix build error with LDC 1.28.1 on Alpine -* Fix issue of silent exit when unable to delete local files when using --cleanup-local-files -* Fix application crash due to access permissions on configured path for sync_dir -* Fix potential application crash when exiting due to failure state and unable to cleanly shutdown the database -* Fix creation of parent empty directories when parent is excluded by sync_list - -### Added -* Added performance output details for key functions - -### Changed -* Switch Docker 'latest' to point at Debian builds rather than Fedora due to ongoing Fedora build failures -* Align application logging events to actual application defaults for --monitor operations -* Performance Improvement: Avoid duplicate costly path calculations and DB operations if not required -* Disable non-working remaining sandboxing options within systemd service files -* Performance Improvement: Only check 'sync_list' if this has been enabled and configured -* Display 'Sync with OneDrive is complete' when using --synchronize -* Change the order of processing between Microsoft OneDrive restrictions and limitations check and skip_file|skip_dir check - -### Removed -* Remove building Fedora ARMv7 builds due to ongoing build failures - -### Updated -* Update config change detection handling -* Updated documentation (various) - -## 2.4.21 - 2022-09-27 -### Fixed -* Fix that the download progress bar doesn't always reach 100% when rate_limit is set -* Fix --resync handling of database file removal -* Fix Makefile to be consistent with permissions that are being used -* Fix that logging output for skipped uploaded files is missing -* Fix to allow non-sync tasks while sync is running -* Fix where --resync is enforced for non-sync operations -* Fix to resolve segfault when running 'onedrive --display-sync-status' when run as 2nd process -* Fix DMD 2.100.2 depreciation warning - -### Added -* Add GitHub Action Test Build Workflow (replacing Travis CI) -* Add option --display-running-config to display the running configuration as used at application startup -* Add 'config' option to request readonly access in oauth authorization step -* Add option --cleanup-local-files to cleanup local files regardless of sync state when using --download-only -* Add option --with-editing-perms to create a read-write shareable link when used with --create-share-link - -### Changed -* Change the exit code of the application to 126 when a --resync is required - -### Updated -* Updated --get-O365-drive-id implementation for data access -* Update what application options require an argument -* Update application logging output for error messages to remove certain \n prefix when logging to a file -* Update onedrive.spec.in to fix error building RPM -* Update GUI notification handling for specific skipped scenarios -* Updated documentation (various) - -## 2.4.20 - 2022-07-20 -### Fixed -* Fix 'foreign key constraint failed' when using OneDrive Business Shared Folders due to change to using /delta query -* Fix various little spelling fixes (check with lintian during Debian packaging) -* Fix handling of a custom configuration directory when using --confdir -* Fix to ensure that any active http instance is shutdown before any application exit -* Fix to enforce that --confdir must be a directory - -### Added -* Added 'force_http_11' configuration option to allow forcing HTTP/1.1 operations - -### Changed -* Increased thread sleep for better process I/O wait handling -* Removed 'force_http_2' configuration option - -### Updated -* Update OneDrive API response handling for National Cloud Deployments -* Updated to switch to using curl defaults for HTTP/2 operations -* Updated documentation (various) - -## 2.4.19 - 2022-06-15 -### Fixed -* Update Business Shared Folders to use a /delta query -* Update when DB is updated by OneDrive API data and update when file hash is required to be generated - -### Added -* Added ONEDRIVE_UPLOADONLY flag for Docker - -### Updated -* Updated GitHub workflows -* Updated documentation (various) - -## 2.4.18 - 2022-06-02 -### Fixed -* Fixed various database related access issues steming from running multiple instances of the application at the same time using the same configuration data -* Fixed --display-config being impacted by --resync flag -* Fixed installation permissions for onedrive man-pages file -* Fixed that in some situations that users try --upload-only and --download-only together which is not possible -* Fixed application crash if unable to read required hash files - -### Added -* Added Feature Request to add an override for skip_dir|skip_file through flag to force sync -* Added a check to validate local filesystem available space before attempting file download -* Added GitHub Actions to build Docker containers and push to DockerHub - -### Updated -* Updated all Docker build files to current distributions, using updated distribution LDC version -* Updated logging output to logfiles when an actual sync process is occuring -* Updated output of --display-config to be more relevant -* Updated manpage to align with application configuration -* Updated documentation and Docker files based on minimum compiler versions to dmd-2.088.0 and ldc-1.18.0 -* Updated documentation (various) - -## 2.4.17 - 2022-04-30 -### Fixed -* Fix docker build, by add missing git package for Fedora builds -* Fix application crash when attempting to sync a broken symbolic link -* Fix Internet connect disruption retry handling and logging output -* Fix local folder creation timestamp with timestamp from OneDrive -* Fix logging output when download failed - -### Added -* Add additional logging specifically for delete event to denote in log output the source of a deletion event when running in --monitor mode - -### Changed -* Improve when the local database integrity check is performed and on what frequency the database integrity check is performed - -### Updated -* Remove application output ambiguity on how to access 'help' for the client -* Update logging output when running in --monitor --verbose mode in regards to the inotify events -* Updated documentation (various) - -## 2.4.16 - 2022-03-10 -### Fixed -* Update application file logging error handling -* Explicitly set libcurl options -* Fix that when a sync_list exclusion is matched, the item needs to be excluded when using --resync -* Fix so that application can be compiled correctly on Android hosts -* Fix the handling of 429 and 5xx responses when they are generated by OneDrive in a self-referencing circular pattern -* Fix applying permissions to volume directories when running in rootless podman -* Fix unhandled errors from OneDrive when initialising subscriptions fail - -### Added -* Enable GitHub Sponsors -* Implement --resync-auth to enable CLI passing in of --rsync approval -* Add function to check client version vs latest GitHub release -* Add --reauth to allow easy re-authentication of the client -* Implement --modified-by to display who last modified a file and when the modification was done -* Implement feature request to mark partially-downloaded files as .partial during download -* Add documentation for Podman support - -### Changed -* Document risk regarding using --resync and force user acceptance of usage risk to proceed -* Use YAML for Bug Reports and Feature Requests -* Update Dockerfiles to use more modern base Linux distribution - -### Updated -* Updated documentation (various) - -## 2.4.15 - 2021-12-31 -### Fixed -* Fix unable to upload to OneDrive Business Shared Folders due to OneDrive API restricting quota information -* Update fixing edge case with OneDrive Personal Shared Folders and --resync --upload-only - -### Added -* Add SystemD hardening -* Add --operation-timeout argument - -### Changed -* Updated minimum compiler versions to dmd-2.087.0 and ldc-1.17.0 - -### Updated -* Updated Dockerfile-alpine to use Apline 3.14 -* Updated documentation (various) - -## 2.4.14 - 2021-11-24 -### Fixed -* Support DMD 2.097.0 as compiler for Docker Builds -* Fix getPathDetailsByDriveId query when using --dry-run and a nested path with --single-directory -* Fix edge case when syncing OneDrive Personal Shared Folders -* Catch unhandled API response errors when querying OneDrive Business Shared Folders -* Catch unhandled API response errors when listing OneDrive Business Shared Folders -* Fix error 'Key not found: remaining' with Business Shared Folders (OneDrive API change) -* Fix overwriting local files with older versions from OneDrive when items.sqlite3 does not exist and --resync is not used - -### Added -* Added operation_timeout as a new configuration to assist in cases where operations take longer that 1h to complete -* Add Real-Time syncing of remote updates via webhooks -* Add --auth-response option and expose through entrypoint.sh for Docker -* Add --disable-download-validation - -### Changed -* Always prompt for credentials for authentication rather than re-using cached browser details -* Do not re-auth on --logout - -### Updated -* Updated documentation (various) - -## 2.4.13 - 2021-7-14 -### Fixed -* Support DMD 2.097.0 as compiler -* Fix to handle OneDrive API Bad Request response when querying if file exists -* Fix application crash and incorrect handling of --single-directory when syncing a OneDrive Business Shared Folder due to using 'Add Shortcut to My Files' -* Fix application crash due to invalid UTF-8 sequence in the pathname for the application configuration -* Fix error message when deleting a large number of files -* Fix Docker build process to source GOSU keys from updated GPG key location -* Fix application crash due to a conversion overflow when calculating file offset for session uploads -* Fix Docker Alpine build failing due to filesystem permissions issue due to Docker build system and Alpine Linux 3.14 incompatibility -* Fix that Business Shared Folders with parentheses are ignored - -### Updated -* Updated Lock Bot to run daily -* Updated documentation (various) - -## 2.4.12 - 2021-5-28 -### Fixed -* Fix an unhandled Error 412 when uploading modified files to OneDrive Business Accounts -* Fix 'sync_list' handling of inclusions when name is included in another folders name -* Fix that options --upload-only & --remove-source-files are ignored on an upload session restore -* Fix to add file check when adding item to database if using --upload-only --remove-source-files -* Fix application crash when SharePoint displayName is being withheld - -### Updated -* Updated Lock Bot to use GitHub Actions -* Updated documentation (various) - -## 2.4.11 - 2021-4-07 -### Fixed -* Fix support for '/*' regardless of location within sync_list file -* Fix 429 response handling correctly check for 'retry-after' response header and use set value -* Fix 'sync_list' path handling for sub item matching, so that items in parent are not implicitly matched when there is no wildcard present -* Fix --get-O365-drive-id to use 'nextLink' value if present when searching for specific SharePoint site names -* Fix OneDrive Business Shared Folder existing name conflict check -* Fix incorrect error message 'Item cannot be deleted from OneDrive because it was not found in the local database' when item is actually present -* Fix application crash when unable to rename folder structure due to unhandled file-system issue -* Fix uploading documents to Shared Business Folders when the shared folder exists on a SharePoint site due to Microsoft Sharepoint 'enrichment' of files -* Fix that a file record is kept in database when using --no-remote-delete & --remove-source-files - -### Added -* Added support in --get-O365-drive-id to provide the 'drive_id' for multiple 'document libraries' within a single Shared Library Site - -### Removed -* Removed the depreciated config option 'force_http_11' which was flagged as depreciated by PR #549 in v2.3.6 (June 2019) - -### Updated -* Updated error output of --get-O365-drive-id to provide more details why an error occurred if a SharePoint site lacks the details we need to perform the match -* Updated Docker build files for Raspberry Pi to dedicated armhf & aarch64 Dockerfiles -* Updated logging output when in --monitor mode, avoid outputting misleading logging when the new or modified item is a file, not a directory -* Updated documentation (various) - -## 2.4.10 - 2021-2-19 -### Fixed -* Catch database assertion when item path cannot be calculated -* Fix alpine Docker build so it uses the same golang alpine version -* Search all distinct drive id's rather than just default drive id for --get-file-link -* Use correct driveId value to query for changes when using --single-directory -* Improve upload handling of files for SharePoint sites and detecting when SharePoint modifies the file post upload -* Correctly handle '~' when present in 'log_dir' configuration option -* Fix logging output when handing downloaded new files -* Fix to use correct path offset for sync_list exclusion matching - -### Added -* Add upload speed metrics when files are uploaded and clarify that 'data to transfer' is what is needed to be downloaded from OneDrive -* Add new config option to rate limit connection to OneDrive -* Support new file maximum upload size of 250GB -* Support sync_list matching full path root wildcard with exclusions to simplify sync_list configuration - -### Updated -* Rename Office365.md --> SharePoint-Shared-Libraries.md which better describes this document -* Updated Dockerfile config for arm64 -* Updated documentation (various) - -## 2.4.9 - 2020-12-27 -### Fixed -* Fix to handle case where API provided deltaLink generates a further API error -* Fix application crash when unable to read a local file due to local file permissions -* Fix application crash when calculating the path length due to invalid UTF characters in local path -* Fix Docker build on Alpine due missing symbols due to using the edge version of ldc and ldc-runtime -* Fix application crash with --get-O365-drive-id when API response is restricted - -### Added -* Add debug log output of the configured URL's which will be used throughout the application to remove any ambiguity as to using incorrect URL's when making API calls -* Improve application startup when using --monitor when there is no network connection to the OneDrive API and only initialise application once OneDrive API is reachable -* Add Docker environment variable to allow --logout for re-authentication - -### Updated -* Remove duplicate code for error output functions and enhance error logging output -* Updated documentation - -## 2.4.8 - 2020-11-30 -### Fixed -* Fix to use config set option for 'remove_source_files' and 'skip_dir_strict_match' rather than ignore if set -* Fix download failure and crash due to incorrect local filesystem permissions when using mounted external devices -* Fix to not change permissions on pre-existing local directories -* Fix logging output when authentication authorisation fails to not say authorisation was successful -* Fix to check application_id before setting redirect URL when using specific Azure endpoints -* Fix application crash in --monitor mode due to 'Failed to stat file' when setgid is used on a directory and data cannot be read - -### Added -* Added advanced-usage.md to document advaced client usage such as multi account configurations and Windows dual-boot - -### Updated -* Updated --verbose logging output for config options when set -* Updated documentation (man page, USAGE.md, Office365.md, BusinessSharedFolders.md) - -## 2.4.7 - 2020-11-09 -### Fixed -* Fix debugging output for /delta changes available queries -* Fix logging output for modification comparison source data -* Fix Business Shared Folder handling to process only Shared Folders, not individually shared files -* Fix cleanup dryrun shm and wal files if they exist -* Fix --list-shared-folders to only show folders -* Fix to check for the presence of .nosync when processing DB entries -* Fix skip_dir matching when using --resync -* Fix uploading data to shared business folders when using --upload-only -* Fix to merge contents of SQLite WAL file into main database file on sync completion -* Fix to check if localModifiedTime is >= than item.mtime to avoid re-upload for equal modified time -* Fix to correctly set config directory permissions at first start - -### Added -* Added environment variable to allow easy HTTPS debug in docker -* Added environment variable to allow download-only mode in Docker -* Implement Feature: Allow config to specify a tenant id for non-multi-tenant applications -* Implement Feature: Adding support for authentication with single tenant custom applications -* Implement Feature: Configure specific File and Folder Permissions - -### Updated -* Updated documentation (readme.md, install.md, usage.md, bug_report.md) - -## 2.4.6 - 2020-10-04 -### Fixed -* Fix flagging of remaining free space when value is being restricted -* Fix --single-directory path handling when path does not exist locally -* Fix checking for 'Icon' path as no longer listed by Microsoft as an invalid file or folder name -* Fix removing child items on OneDrive when parent item responds with access denied -* Fix to handle deletion events for files when inotify events are missing -* Fix uninitialised value error as reported by valgrind -* Fix to handle deletion events for directories when inotify events are missing - -### Added -* Implement Feature: Create shareable link -* Implement Feature: Support wildcard within sync_list entries -* Implement Feature: Support negative patterns in sync_list for fine grained exclusions -* Implement Feature: Multiple skip_dir & skip_file configuration rules -* Add GUI notification to advise users when the client needs to be reauthenticated - -### Updated -* Updated documentation (readme.md, install.md, usage.md, bug_report.md) - -## 2.4.5 - 2020-08-13 -### Fixed -* Fixed fish auto completions installation destination - -## 2.4.4 - 2020-08-11 -### Fixed -* Fix 'skip_dir' & 'skip_file' pattern matching to ensure correct matching is performed -* Fix 'skip_dir' & 'skip_file' so that each directive is only used against directories or files as requried in --monitor -* Fix client hand when attempting to sync a Unix pipe file -* Fix --single-directory & 'sync_list' performance -* Fix erroneous 'return' statements which could prematurely end processing all changes returned from OneDrive -* Fix segfault when attempting to perform a comparison on an inotify event when determining if event path is directory or file -* Fix handling of Shared Folders to ensure these are checked against 'skip_dir' entries -* Fix 'Skipping uploading this new file as parent path is not in the database' when uploading to a Personal Shared Folder -* Fix how available free space is tracked when uploading files to OneDrive and Shared Folders -* Fix --single-directory handling of parent path matching if path is being seen for first time - -### Added -* Added Fish auto completions - -### Updated -* Increase maximum individual file size to 100GB due to Microsoft file limit increase -* Update Docker build files and align version of compiler across all Docker builds -* Update Docker documentation -* Update NixOS build information -* Update the 'Processing XXXX' output to display the full path -* Update logging output when a sync starts and completes when using --monitor -* Update Office 365 / SharePoint site search query and response if query return zero match - -## 2.4.3 - 2020-06-29 -### Fixed -* Check if symbolic link is relative to location path -* When using output logfile, fix inconsistent output spacing -* Perform initial sync at startup in monitor mode -* Handle a 'race' condition to process inotify events generated whilst performing DB or filesystem walk -* Fix segfault when moving folder outside the sync directory when using --monitor on Arch Linux - -### Added -* Added additional inotify event debugging -* Added support for loading system configs if there's no user config -* Added Ubuntu installation details to include installing the client from a PPA -* Added openSUSE installation details to include installing the client from a package -* Added support for comments in sync_list file -* Implement recursive deletion when Retention Policy is enabled on OneDrive Business Accounts -* Implement support for National cloud deployments -* Implement OneDrive Business Shared Folders Support - -### Updated -* Updated documentation files (various) -* Updated log output messaging when a full scan has been set or triggered -* Updated buildNormalizedPath complexity to simplify code -* Updated to only process OneDrive Personal Shared Folders only if account type is 'personal' - -## 2.4.2 - 2020-05-27 -### Fixed -* Fixed the catching of an unhandled exception when inotify throws an error -* Fixed an uncaught '100 Continue' response when files are being uploaded -* Fixed progress bar for uploads to be more accurate regarding percentage complete -* Fixed handling of database query enforcement if item is from a shared folder -* Fixed compiler depreciation of std.digest.digest -* Fixed checking & loading of configuration file sequence -* Fixed multiple issues reported by Valgrind -* Fixed double scan at application startup when using --monitor & --resync together -* Fixed when renaming a file locally, ensure that the target filename is valid before attempting to upload to OneDrive -* Fixed so that if a file is modified locally and --resync is used, rename the local file for data preservation to prevent local data loss - -### Added -* Implement 'bypass_data_preservation' enhancement - -### Changed -* Changed the monitor interval default to 300 seconds - -### Updated -* Updated the handling of out-of-space message when OneDrive is out of space -* Updated debug logging for retry wait times - -## 2.4.1 - 2020-05-02 -### Fixed -* Fixed the handling of renaming files to a name starting with a dot when skip_dotfiles = true -* Fixed the handling of parentheses from path or file names, when doing comparison with regex -* Fixed the handling of renaming dotfiles to another dotfile when skip_dotfile=true in monitor mode -* Fixed the handling of --dry-run and --resync together correctly as current database may be corrupt -* Fixed building on Alpine Linux under Docker -* Fixed the handling of --single-directory for --dry-run and --resync scenarios -* Fixed the handling of .nosync directive when downloading new files into existing directories that is (was) in sync -* Fixed the handling of zero-byte modified files for OneDrive Business -* Fixed skip_dotfiles handling of .folders when in monitor mode to prevent monitoring -* Fixed the handling of '.folder' -> 'folder' move when skip_dotfiles is enabled -* Fixed the handling of folders that cannot be read (permission error) if parent should be skipped -* Fixed the handling of moving folders from skipped directory to non-skipped directory via OneDrive web interface -* Fixed building on CentOS Linux under Docker -* Fixed Codacy reported issues: double quote to prevent globbing and word splitting -* Fixed an assertion when attempting to compute complex path comparison from shared folders -* Fixed the handling of .folders when being skipped via skip_dir - -### Added -* Implement Feature: Implement the ability to set --resync as a config option, default is false - -### Updated -* Update error logging to be consistent when initialising fails -* Update error logging output to handle HTML error response reasoning if present -* Update link to new Microsoft documentation -* Update logging output to differentiate between OneNote objects and other unsupported objects -* Update RHEL/CentOS spec file example -* Update known-issues.md regarding 'SSL_ERROR_SYSCALL, errno 104' -* Update progress bar to be more accurate when downloading large files -* Updated #658 and #865 handling of when to trigger a directory walk when changes occur on OneDrive -* Updated handling of when a full scan is requried due to utilising sync_list -* Updated handling of when OneDrive service throws a 429 or 504 response to retry original request after a delay - -## 2.4.0 - 2020-03-22 -### Fixed -* Fixed how the application handles 429 response codes from OneDrive (critical update) -* Fixed building on Alpine Linux under Docker -* Fixed how the 'username' is determined from the running process for logfile naming -* Fixed file handling when a failed download has occured due to exiting via CTRL-C -* Fixed an unhandled exception when OneDrive throws an error response on initialising -* Fixed the handling of moving files into a skipped .folder when skip_dotfiles = true -* Fixed the regex parsing of response URI to avoid potentially generating a bad request to OneDrive, leading to a 'AADSTS9002313: Invalid request. Request is malformed or invalid.' response. - -### Added -* Added a Dockerfile for building on Rasberry Pi / ARM platforms -* Implement Feature: warning on big deletes to safeguard data on OneDrive -* Implement Feature: delete local files after sync -* Implement Feature: perform skip_dir explicit match only -* Implement Feature: provide config file option for specifying the Client Identifier - -### Changed -* Updated the 'Client Identifier' to a new Application ID - -### Updated -* Updated relevant documentation (README.md, USAGE.md) to add new feature details and clarify existing information -* Update completions to include the --force-http-2 option -* Update to always log when a file is skipped due to the item being invalid -* Update application output when just authorising application to make information clearer -* Update logging output when using sync_list to be clearer as to what is actually being processed and why - -## 2.3.13 - 2019-12-31 -### Fixed -* Change the sync list override flag to false as default when not using sync_list -* Fix --dry-run output when using --upload-only & --no-remote-delete and deleting local files - -### Added -* Add a verbose log entry when a monitor sync loop with OneDrive starts & completes - -### Changed -* Remove logAndNotify for 'processing X changes' as it is excessive for each change bundle to inform the desktop of the number of changes the client is processing - -### Updated -* Updated INSTALL.md with Ubuntu 16.x i386 build instructions to reflect working configuration on legacy hardware -* Updated INSTALL.md with details of Linux packages -* Updated INSTALL.md build instructions for CentOS platforms - -## 2.3.12 - 2019-12-04 -### Fixed -* Retry session upload fragment when transient errors occur to prevent silent upload failure -* Update Microsoft restriction and limitations about windows naming files to include '~' for folder names -* Docker guide fixes, add multiple account setup instructions -* Check database for excluded sync_list items previously in scope -* Catch DNS resolution error -* Fix where an item now out of scope should be flagged for local delete -* Fix rebuilding of onedrive, but ensure version is properly updated -* Update Ubuntu i386 build instructions to use DMD using preferred method - -### Added -* Add debug message to when a message is sent to dbus or notification daemon -* Add i386 instructions for legacy low memory platforms using LDC - -## 2.3.11 - 2019-11-05 -### Fixed -* Fix typo in the documentation regarding invalid config when upgrading from 'skilion' codebase -* Fix handling of skip_dir, skip_file & sync_list config options -* Fix typo in the documentation regarding sync_list -* Fix log output to be consistent with sync_list exclusion -* Fix 'Processing X changes' output to be more reflective of actual activity when using sync_list -* Remove unused and unexported SED variable in Makefile.in -* Handle curl exceptions and timeouts better with backoff/retry logic -* Update skip_dir pattern matching when using wildcards -* Fix when a full rescan is performed when using sync_list -* Fix 'Key not found: name' when computing skip_dir path -* Fix call from --monitor to observe --no-remote-delete -* Fix unhandled exception when monitor initialisation failure occurs due to too many open local files -* Fix unhandled 412 error response from OneDrive API when moving files right after upload -* Fix --monitor when used with --download-only. This fixes a regression introduced in 12947d1. -* Fix if --single-directory is being used, and we are using --monitor, only set inotify watches on the single directory - -### Changed -* Move JSON logging output from error messages to debug output - -## 2.3.10 - 2019-10-01 -### Fixed -* Fix searching for 'name' when deleting a synced item, if the OneDrive API does not return the expected details in the API call -* Fix abnormal termination when no Internet connection -* Fix downloading of files from OneDrive Personal Shared Folders when the OneDrive API responds with unexpected additional path data -* Fix logging of 'initialisation' of client to actually when the attempt to initialise is performed -* Fix when using a sync_list file, using deltaLink will actually 'miss' changes (moves & deletes) on OneDrive as using sync_list discards changes -* Fix OneDrive API status code 500 handling when uploading files as error message is not correct -* Fix crash when resume_upload file is not a valid JSON -* Fix crash when a file system exception is generated when attempting to update the file date & time and this fails - -### Added -* If there is a case-insensitive match error, also return the remote name from the response -* Make user-agent string a configuration option & add to config file -* Set default User-Agent to 'OneDrive Client for Linux v{version}' - -### Changed -* Make verbose logging output optional on Docker -* Enable --resync & debug client output via environment variables on Docker - -## 2.3.9 - 2019-09-01 -### Fixed -* Catch a 403 Forbidden exception when querying Sharepoint Library Names -* Fix unhandled error exceptions that cause application to exit / crash when uploading files -* Fix JSON object validation for queries made against OneDrive where a JSON response is expected and where that response is to be used and expected to be valid -* Fix handling of 5xx responses from OneDrive when uploading via a session - -### Added -* Detect the need for --resync when config changes either via config file or cli override - -### Changed -* Change minimum required version of LDC to v1.12.0 - -### Removed -* Remove redundant logging output due to change in how errors are reported from OneDrive - -## 2.3.8 - 2019-08-04 -### Fixed -* Fix unable to download all files when OneDrive fails to return file level details used to validate file integrity -* Included the flag "-m" to create the home directory when creating the user -* Fix entrypoint.sh to work with "sudo docker run" -* Fix docker build error on stretch -* Fix hidden directories in 'root' from having prefix removed -* Fix Sharepoint Document Library handling for .txt & .csv files -* Fix logging for init.d service -* Fix OneDrive response missing required 'id' element when uploading images -* Fix 'Unexpected character '<'. (Line 1:1)' when OneDrive has an exception error -* Fix error when creating the sync dir fails when there is no permission to create the sync dir - -### Added -* Add explicit check for hashes to be returned in cases where OneDrive API fails to provide them despite requested to do so -* Add comparison with sha1 if OneDrive provides that rather than quickXor -* Add selinux configuration details for a sync folder outside of the home folder -* Add date tag on docker.hub -* Add back CentOS 6 install & uninstall to Makefile -* Add a check to handle moving items out of sync_list sync scope & delete locally if true -* Implement --get-file-link which will return the weburl of a file which has been synced to OneDrive - -### Changed -* Change unauthorized-api exit code to 3 -* Update LDC to v1.16.0 for Travis CI testing -* Use replace function for modified Sharepoint Document Library files rather than delete and upload as new file, preserving file history -* Update Sharepoint modified file handling for files > 4Mb in size - -### Removed -* Remove -d shorthand for --download-only to avoid confusion with other GNU applications where -d stands for 'debug' - -## 2.3.7 - 2019-07-03 -### Fixed -* Fix not all files being downloaded due to OneDrive query failure -* False DB update which potentially could had lead to false data loss on OneDrive - -## 2.3.6 - 2019-07-03 (DO NOT USE) -### Fixed -* Fix JSONValue object validation -* Fix building without git being available -* Fix some spelling/grammatical errors -* Fix OneDrive error response on creating upload session - -### Added -* Add download size & hash check to ensure downloaded files are valid and not corrupt -* Added --force-http-2 to use HTTP/2 if desired - -### Changed -* Depreciated --force-http-1.1 (enabled by default) due to OneDrive inconsistent behavior with HTTP/2 protocol - -## 2.3.5 - 2019-06-19 -### Fixed -* Handle a directory in the sync_dir when no permission to access -* Get rid of forced root necessity during installation -* Fix broken autoconf code for --enable-XXX options -* Fix so that skip_size check should only be used if configured -* Fix a OneDrive Internal Error exception occurring before attempting to download a file - -### Added -* Check for supported version of D compiler - -## 2.3.4 - 2019-06-13 -### Fixed -* Fix 'Local files not deleted' when using bad 'skip_file' entry -* Fix --dry-run logging output for faking downloading new files -* Fix install unit files to correct location on RHEL/CentOS 7 -* Fix up unit file removal on all platforms -* Fix setting times on a file by adding a check to see if the file was actually downloaded before attempting to set the times on the file -* Fix an unhandled curl exception when OneDrive throws an internal timeout error -* Check timestamp to ensure that latest timestamp is used when comparing OneDrive changes -* Fix handling responses where cTag JSON elements are missing -* Fix Docker entrypoint.sh failures when GID is defined but not UID - -### Added -* Add autoconf based build system -* Add an encoding validation check before any path length checks are performed as if the path contains any invalid UTF-8 sequences -* Implement --sync-root-files to sync all files in the OneDrive root when using a sync_list file that would normally exclude these files from being synced -* Implement skip_size feature request -* Implement feature request to support file based OneDrive authorization (request | response) - -### Updated -* Better handle initialisation issues when OneDrive / MS Graph is experiencing problems that generate 401 & 5xx error codes -* Enhance error message when unable to connect to Microsoft OneDrive service when the local CA SSL certificate(s) have issues -* Update Dockerfile to correctly build on Docker Hub -* Rework directory layout and re-factor MD files for readability - -## 2.3.3 - 2019-04-16 -### Fixed -* Fix --upload-only check for Sharepoint uploads -* Fix check to ensure item root we flag as 'root' actually is OneDrive account 'root' -* Handle object error response from OneDrive when uploading to OneDrive Business -* Fix handling of some OneDrive accounts not providing 'quota' details -* Fix 'resume_upload' handling in the event of bad OneDrive response - -### Added -* Add debugging for --get-O365-drive-id function -* Add shell (bash,zsh) completion support -* Add config options for command line switches to allow for better config handling in docker containers - -### Updated -* Implement more meaningful 5xx error responses -* Update onedrive.logrotate indentations and comments -* Update 'min_notif_changes' to 'min_notify_changes' - -## 2.3.2 - 2019-04-02 -### Fixed -* Reduce scanning the entire local system in monitor mode for local changes -* Resolve file creation loop when working directly in the synced folder and Microsoft Sharepoint - -### Added -* Add 'monitor_fullscan_frequency' config option to set the frequency of performing a full disk scan when in monitor mode - -### Updated -* Update default 'skip_file' to include tmp and lock files generated by LibreOffice -* Update database version due to changing defaults of 'skip_file' which will force a rebuild and use of new skip_file default regex - -## 2.3.1 - 2019-03-26 -### Fixed -* Resolve 'make install' issue where rebuild of application would occur due to 'version' being flagged as .PHONY -* Update readme build instructions to include 'make clean;' before build to ensure that 'version' is cleanly removed and can be updated correctly -* Update Debian Travis CI build URL's - -## 2.3.0 - 2019-03-25 -### Fixed -* Resolve application crash if no 'size' value is returned when uploading a new file -* Resolve application crash if a 5xx error is returned when uploading a new file -* Resolve not 'refreshing' version file when rebuilding -* Resolve unexpected application processing by preventing use of --synchronize & --monitor together -* Resolve high CPU usage when performing DB reads -* Update error logging around directory case-insensitive match -* Update Travis CI and ARM dependencies for LDC 1.14.0 -* Update Makefile due to build failure if building from release archive file -* Update logging as to why a OneDrive object was skipped - -### Added -* Implement config option 'skip_dir' - -## 2.2.6 - 2019-03-12 -### Fixed -* Resolve application crash when unable to delete remote folders when business retention policies are enabled -* Resolve deprecation warning: loop index implicitly converted from size_t to int -* Resolve warnings regarding 'bashisms' -* Resolve handling of notification failure is dbus server has not started or available -* Resolve handling of response JSON to ensure that 'id' key element is always checked for -* Resolve excessive & needless logging in monitor mode -* Resolve compiling with LDC on Alpine as musl lacks some standard interfaces -* Resolve notification issues when offline and cannot act on changes -* Resolve Docker entrypoint.sh to accept command line arguments -* Resolve to create a new upload session on reinit -* Resolve where on OneDrive query failure, default root and drive id is used if a response is not returned -* Resolve Key not found: nextExpectedRanges when attempting session uploads and incorrect response is returned -* Resolve application crash when re-using an authentication URI twice after previous --logout -* Resolve creating a folder on a shared personal folder appears successful but returns a JSON error -* Resolve to treat mv of new file as upload of mv target -* Update Debian i386 build dependencies -* Update handling of --get-O365-drive-id to print out all 'site names' that match the explicit search entry rather than just the last match -* Update Docker readme & documentation -* Update handling of validating local file permissions for new file uploads -### Added -* Add support for install & uninstall on RHEL / CentOS 6.x -* Add support for when notifications are enabled, display the number of OneDrive changes to process if any are found -* Add 'config' option 'min_notif_changes' for minimum number of changes to notify on, default = 5 -* Add additional Docker container builds utilising a smaller OS footprint -* Add configurable interval of logging in monitor mode -* Implement new CLI option --skip-dot-files to skip .files and .folders if option is used -* Implement new CLI option --check-for-nosync to ignore folder when special file (.nosync) present -* Implement new CLI option --dry-run - -## 2.2.5 - 2019-01-16 -### Fixed -* Update handling of HTTP 412 - Precondition Failed errors -* Update --display-config to display sync_list if configured -* Add a check for 'id' key on metadata update to prevent 'std.json.JSONException@std/json.d(494): Key not found: id' -* Update handling of 'remote' folder designation as 'root' items -* Ensure that remote deletes are handled correctly -* Handle 'Item not found' exception when unable to query OneDrive 'root' for changes -* Add handling for JSON response error when OneDrive API returns a 404 due to OneDrive API regression -* Fix items highlighted by codacy review -### Added -* Add --force-http-1.1 flag to downgrade any HTTP/2 curl operations to HTTP 1.1 protocol -* Support building with ldc2 and usage of pkg-config for lib finding - -## 2.2.4 - 2018-12-28 -### Fixed -* Resolve JSONException when supplying --get-O365-drive-id option with a string containing spaces -* Resolve 'sync_dir' not read from 'config' file when run in Docker container -* Resolve logic where potentially a 'default' ~/OneDrive sync_dir could be set despite 'config' file configured for an alternate -* Make sure sqlite checkpointing works by properly finalizing statements -* Update logic handling of --single-directory to prevent inadvertent local data loss -* Resolve signal handling and database shutdown on SIGINT and SIGTERM -* Update man page -* Implement better help output formatting -### Added -* Add debug handling for sync_dir operations -* Add debug handling for homePath calculation -* Add debug handling for configDirBase calculation -* Add debug handling if syncDir is created -* Implement Feature Request: Add status command or switch - -## 2.2.3 - 2018-12-20 -### Fixed -* Fix syncdir option is ignored - -## 2.2.2 - 2018-12-20 -### Fixed -* Handle short lived files in monitor mode -* Provide better log messages, less noise on temporary timeouts -* Deal with items that disappear during upload -* Deal with deleted move targets -* Reinitialize sync engine after three failed attempts -* Fix activation of dmd for docker builds -* Fix to check displayName rather than description for --get-O365-drive-id -* Fix checking of config file keys for validity -* Fix exception handling when missing parameter from usage option -### Added -* Notification support via libnotify -* Add very verbose (debug) mode by double -v -v -* Implement option --display-config - -## 2.2.1 - 2018-12-04 -### Fixed -* Gracefully handle connection errors in monitor mode -* Fix renaming of files when syncing -* Installation of doc files, addition of man page -* Adjust timeout values for libcurl -* Continue in monitor mode when sync timed out -* Fix unreachable statements -* Update Makefile to better support packaging -* Allow starting offline in monitor mode -### Added -* Implement --get-O365-drive-id to get correct SharePoint Shared Library (#248) -* Docker buildfiles for onedrive service (#262) - -## 2.2.0 - 2018-11-24 -### Fixed -* Updated client to output additional logging when debugging -* Resolve database assertion failure due to authentication -* Resolve unable to create folders on shared OneDrive Personal accounts -### Added -* Implement feature request to Sync from Microsoft SharePoint -* Implement feature request to specify a logging directory if logging is enabled -### Changed -* Change '--download' to '--download-only' to align with '--upload-only' -* Change logging so that logging to a separate file is no longer the default - -## 2.1.6 - 2018-11-15 -### Fixed -* Updated HTTP/2 transport handling when using curl 7.62.0 for session uploads -### Added -* Added PKGBUILD for makepkg for building packages under Arch Linux - -## 2.1.5 - 2018-11-11 -### Fixed -* Resolve 'Key not found: path' when syncing from some shared folders due to OneDrive API change -* Resolve to only upload changes on remote folder if the item is in the database - dont assert if false -* Resolve files will not download or upload when using curl 7.62.0 due to HTTP/2 being set as default for all curl operations -* Resolve to handle HTTP request returned status code 412 (Precondition Failed) for session uploads to OneDrive Personal Accounts -* Resolve unable to remove '~/.config/onedrive/resume_upload: No such file or directory' if there is a session upload error and the resume file does not get created -* Resolve handling of response codes when using 2 different systems when using '--upload-only' but the same OneDrive account and uploading the same filename to the same location -### Updated -* Updated Travis CI building on LDC v1.11.0 for ARMHF builds -* Updated Makefile to use 'install -D -m 644' rather than 'cp -raf' -* Updated default config to be aligned to code defaults - -## 2.1.4 - 2018-10-10 -### Fixed -* Resolve syncing of OneDrive Personal Shared Folders due to OneDrive API change -* Resolve incorrect systemd installation location(s) in Makefile - -## 2.1.3 - 2018-10-04 -### Fixed -* Resolve File download fails if the file is marked as malware in OneDrive -* Resolve high CPU usage when running in monitor mode -* Resolve how default path is set when running under systemd on headless systems -* Resolve incorrectly nested configDir in X11 systems -* Resolve Key not found: driveType -* Resolve to validate filename length before download to conform with Linux FS limits -* Resolve file handling to look for HTML ASCII codes which will cause uploads to fail -* Resolve Key not found: expirationDateTime on session resume -### Added -* Update Travis CI building to test build on ARM64 - -## 2.1.2 - 2018-08-27 -### Fixed -* Resolve skipping of symlinks in monitor mode -* Resolve Gateway Timeout - JSONValue is not an object -* Resolve systemd/user is not supported on CentOS / RHEL -* Resolve HTTP request returned status code 429 (Too Many Requests) -* Resolve handling of maximum path length calculation -* Resolve 'The parent item is not in the local database' -* Resolve Correctly handle file case sensitivity issues in same folder -* Update unit files documentation link - -## 2.1.1 - 2018-08-14 -### Fixed -* Fix handling no remote delete of remote directories when using --no-remote-delete -* Fix handling of no permission to access a local file / corrupt local file -* Fix application crash when unable to access login.microsoft.com upon application startup -### Added -* Build instructions for openSUSE Leap 15.0 - -## 2.1.0 - 2018-08-10 -### Fixed -* Fix handling of database exit scenarios when there is zero disk space left on drive where the items database resides -* Fix handling of incorrect database permissions -* Fix handling of different database versions to automatically re-create tables if version mis-match -* Fix handling timeout when accessing the Microsoft OneDrive Service -* Fix localFileModifiedTime to not use fraction seconds -### Added -* Implement Feature: Add a progress bar for large uploads & downloads -* Implement Feature: Make checkinterval for monitor configurable -* Implement Feature: Upload Only Option that does not perform remote delete -* Implement Feature: Add ability to skip symlinks -* Add dependency, ebuild and build instructions for Gentoo distributions -### Changed -* Build instructions for x86, x86_64 and ARM32 platforms -* Travis CI files to automate building on x32, x64 and ARM32 architectures -* Travis CI files to test built application against valid, invalid and problem files from previous issues - -## 2.0.2 - 2018-07-18 -### Fixed -* Fix systemd service install for builds with DESTDIR defined -* Fix 'HTTP 412 - Precondition Failed' error handling -* Gracefully handle OneDrive account password change -* Update logic handling of --upload-only and --local-first - -## 2.0.1 - 2018-07-11 -### Fixed -* Resolve computeQuickXorHash generates a different hash when files are > 64Kb - -## 2.0.0 - 2018-07-10 -### Fixed -* Resolve conflict resolution issue during syncing - the client does not handle conflicts very well & keeps on adding the hostname to files -* Resolve skilion #356 by adding additional check for 409 response from OneDrive -* Resolve multiple versions of file shown on website after single upload -* Resolve to gracefully fail when 'onedrive' process cannot get exclusive database lock -* Resolve 'Key not found: fileSystemInfo' when then item is a remote item (OneDrive Personal) -* Resolve skip_file config entry needs to be checked for any characters to escape -* Resolve Microsoft Naming Convention not being followed correctly -* Resolve Error when trying to upload a file with weird non printable characters present -* Resolve Crash if file is locked by online editing (status code 423) -* Resolve Resolve compilation issue with dmd-2.081.0 -* Resolve skip_file configuration doesn't handle spaces or specified directory paths -### Added -* Implement Feature: Add a flag to detect when the sync-folder is missing -* Implement Travis CI for code testing -### Changed -* Update Makefile to use DESTDIR variables -* Update OneDrive Business maximum path length from 256 to 400 -* Update OneDrive Business allowed characters for files and folders -* Update sync_dir handling to use the absolute path for setting parameter to something other than ~/OneDrive via config file or command line -* Update Fedora build instructions - -## 1.1.2 - 2018-05-17 -### Fixed -* Fix 4xx errors including (412 pre-condition, 409 conflict) -* Fix Key not found: lastModifiedDateTime (OneDrive API change) -* Fix configuration directory not found when run via init.d -* Fix skilion Issues #73, #121, #132, #224, #257, #294, #295, #297, #298, #300, #306, #315, #320, #329, #334, #337, #341 -### Added -* Add logging - log client activities to a file (/var/log/onedrive/%username%.onedrive.log or ~/onedrive.log) -* Add https debugging as a flag -* Add `--synchronize` to prevent from syncing when just blindly running the application -* Add individual folder sync -* Add sync from local directory first rather than download first then upload -* Add upload long path check -* Add upload only -* Add check for max upload file size before attempting upload -* Add systemd unit files for single & multi user configuration -* Add init.d file for older init.d based services -* Add Microsoft naming conventions and namespace validation for items that will be uploaded -* Add remaining free space counter at client initialisation to avoid out of space upload issue -* Add large file upload size check to align to OneDrive file size limitations -* Add upload file size validation & retry if does not match -* Add graceful handling of some fatal errors (OneDrive 5xx error handling) - -## Unreleased - 2018-02-19 -### Fixed -* Crash when the delta link is expired -### Changed -* Disabled buffering on stdout - -## 1.1.1 - 2018-01-20 -### Fixed -* Wrong regex for parsing authentication uri - -## 1.1.0 - 2018-01-19 -### Added -* Support for shared folders (OneDrive Personal only) -* `--download` option to only download changes -* `DC` variable in Makefile to chose the compiler -### Changed -* Print logs on stdout instead of stderr -* Improve log messages - -## 1.0.1 - 2017-08-01 -### Added -* `--syncdir` option -### Changed -* `--version` output simplified -* Updated README -### Fixed -* Fix crash caused by remotely deleted and recreated directories - -## 1.0.0 - 2017-07-14 -### Added -* `--version` option diff --git a/README.md b/README.md deleted file mode 100644 index 602e72321..000000000 --- a/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# OneDrive Client for Linux -[![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) -[![Release Date](https://img.shields.io/github/release-date/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) -[![Test Build](https://github.com/abraunegg/onedrive/actions/workflows/testbuild.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/testbuild.yaml) -[![Build Docker Images](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml) -[![Docker Pulls](https://img.shields.io/docker/pulls/driveone/onedrive)](https://hub.docker.com/r/driveone/onedrive) - -Introducing a free Microsoft OneDrive Client that seamlessly supports OneDrive Personal, OneDrive for Business, OneDrive for Office365, and SharePoint Libraries. - -This robust and highly customisable client is compatible with all major Linux distributions and FreeBSD, and can also be deployed as a container using Docker or Podman. It offers both one-way and two-way synchronisation capabilities while ensuring a secure connection to Microsoft OneDrive services. - -Originally derived as a 'fork' from the [skilion](https://github.com/skilion/onedrive) client, it's worth noting that the developer of the original client has explicitly stated they have no intention of maintaining or supporting their work ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). - -This client represents a 100% re-imagining of the original work, addressing numerous notable bugs and issues while incorporating a significant array of new features. This client has been under active development since mid-2018. - -## Features -* Supports 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive -* Sync State Caching -* Real-Time local file monitoring with inotify -* Real-Time syncing of remote updates via webhooks -* File upload / download validation to ensure data integrity -* Resumable uploads -* Support OneDrive for Business (part of Office 365) -* Shared Folder support for OneDrive Personal and OneDrive Business accounts -* SharePoint / Office365 Shared Libraries -* Desktop notifications via libnotify -* Dry-run capability to test configuration changes -* Prevent major OneDrive accidental data deletion after configuration change -* Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China) -* Supports single & multi-tenanted applications -* Supports rate limiting of traffic -* Supports multi-threaded uploads and downloads - -## What's missing -* Ability to encrypt/decrypt files on-the-fly when uploading/downloading files from OneDrive -* Support for Windows 'On-Demand' functionality so file is only downloaded when accessed locally - -## External Enhancements -* A GUI for configuration management: [OneDrive Client for Linux GUI](https://github.com/bpozdena/OneDriveGUI) -* Colorful log output terminal modification: [OneDrive Client for Linux Colorful log Output](https://github.com/zzzdeb/dotfiles/blob/master/scripts/tools/onedrive_log) -* System Tray Icon: [OneDrive Client for Linux System Tray Icon](https://github.com/DanielBorgesOliveira/onedrive_tray) - -## Frequently Asked Questions -Refer to [Frequently Asked Questions](https://github.com/abraunegg/onedrive/wiki/Frequently-Asked-Questions) - -## Have a question -If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions) - -## Reporting an Issue or Bug -If you encounter any bugs you can report them here on Github. Before filing an issue be sure to: - -1. Check the version of the application you are using `onedrive --version` and ensure that you are running either the latest [release](https://github.com/abraunegg/onedrive/releases) or built from master. -2. Fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md) -3. Generate a debug log for support using the following [process](https://github.com/abraunegg/onedrive/wiki/Generate-debug-log-for-support) - * If you are in *any* way concerned regarding the sensitivity of the data contained with in the verbose debug log file, create a new OneDrive account, configure the client to use that, use *dummy* data to simulate your environment and then replicate your original issue - * If you are still concerned, provide an NDA or confidentiality document to sign -4. Upload the debug log to [pastebin](https://pastebin.com/) or archive and email to support@mynas.com.au - * If you are concerned regarding the sensitivity of your debug data, encrypt + password protect the archive file and provide the decryption password via an out-of-band (OOB) mechanism. Email support@mynas.com.au for an OOB method for the password to be sent. - * If you are still concerned, provide an NDA or confidentiality document to sign - -## Known issues -Refer to [docs/known-issues.md](https://github.com/abraunegg/onedrive/blob/master/docs/known-issues.md) - -## Documentation and Configuration Assistance -### Installing from Distribution Packages or Building the OneDrive Client for Linux from source -Refer to [docs/install.md](https://github.com/abraunegg/onedrive/blob/master/docs/install.md) - -### Configuration and Usage -Refer to [docs/usage.md](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md) - -### Configure OneDrive Business Shared Folders -Refer to [docs/business-shared-folders.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-folders.md) - -### Configure SharePoint / Office 365 Shared Libraries (Business or Education) -Refer to [docs/sharepoint-libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/sharepoint-libraries.md) - -### Configure National Cloud support -Refer to [docs/national-cloud-deployments.md](https://github.com/abraunegg/onedrive/blob/master/docs/national-cloud-deployments.md) - -### Docker support -Refer to [docs/docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/docker.md) - -### Podman support -Refer to [docs/podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/podman.md) - From 220943b6ab2c1c71b6f50157aa1f399433ddf959 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Tue, 9 Jan 2024 09:27:57 +1100 Subject: [PATCH 005/305] Re-add documents Re-add documents --- changelog.md | 1041 ++++++++++++++++++++++++++++++++++++++++++++++++++ readme.md | 85 +++++ 2 files changed, 1126 insertions(+) create mode 100644 changelog.md create mode 100644 readme.md diff --git a/changelog.md b/changelog.md new file mode 100644 index 000000000..8f7f357ad --- /dev/null +++ b/changelog.md @@ -0,0 +1,1041 @@ +# Changelog +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## 2.5.0 - TBA + + +### Changed +* Renamed various documentation files to align with document content + + +## 2.4.25 - 2023-06-21 +### Fixed +* Fixed that the application was reporting as v2.2.24 when in fact it was v2.4.24 (release tagging issue) +* Fixed that the running version obsolete flag (due to above issue) was causing a false flag as being obsolete +* Fixed that zero-byte files do not have a hash as reported by the OneDrive API thus should not generate an error message + +### Updated +* Update to Debian Docker file to resolve Docker image Operating System reported vulnerabilities +* Update to Alpine Docker file to resolve Docker image Operating System reported vulnerabilities +* Update to Fedora Docker file to resolve Docker image Operating System reported vulnerabilities +* Updated documentation (various) + +## 2.4.24 - 2023-06-20 +### Fixed +* Fix for extra encoded quotation marks surrounding Docker environment variables +* Fix webhook subscription creation for SharePoint Libraries +* Fix that a HTTP 504 - Gateway Timeout causes local files to be deleted when using --download-only & --cleanup-local-files mode +* Fix that folders are renamed despite using --dry-run +* Fix deprecation warnings with dmd 2.103.0 +* Fix error that the application is unable to perform a database vacuum: out of memory when exiting + +### Removed +* Remove sha1 from being used by the client as this is being depreciated by Microsoft in July 2023 +* Complete the removal of crc32 elements + +### Added +* Added ONEDRIVE_SINGLE_DIRECTORY configuration capability to Docker +* Added --get-file-link shell completion +* Added configuration to allow HTTP session timeout(s) tuning via config (taken from v2.5.x) + +### Updated +* Update to Debian Docker file to resolve Docker image Operating System reported vulnerabilities +* Update to Alpine Docker file to resolve Docker image Operating System reported vulnerabilities +* Update to Fedora Docker file to resolve Docker image Operating System reported vulnerabilities +* Updated cgi.d to commit 680003a - last upstream change before requiring `core.d` dependency requirement +* Updated documentation (various) + +## 2.4.23 - 2023-01-06 +### Fixed +* Fixed RHEL7, RHEL8 and RHEL9 Makefile and SPEC file compatibility + +### Removed +* Disable systemd 'PrivateUsers' due to issues with systemd running processes when option is enabled, causes local file deletes on RHEL based systems + +### Updated +* Update --get-O365-drive-id error handling to display a more a more appropriate error message if the API cannot be found +* Update the GitHub version check to utilise the date a release was done, to allow 1 month grace period before generating obsolete version message +* Update Alpine Dockerfile to use Alpine 3.17 and Golang 1.19 +* Update handling of --source-directory and --destination-directory if one is empty or missing and if used with --synchronize or --monitor +* Updated documentation (various) + +## 2.4.22 - 2022-12-06 +### Fixed +* Fix application crash when local file is changed to a symbolic link with non-existent target +* Fix build error with dmd-2.101.0 +* Fix build error with LDC 1.28.1 on Alpine +* Fix issue of silent exit when unable to delete local files when using --cleanup-local-files +* Fix application crash due to access permissions on configured path for sync_dir +* Fix potential application crash when exiting due to failure state and unable to cleanly shutdown the database +* Fix creation of parent empty directories when parent is excluded by sync_list + +### Added +* Added performance output details for key functions + +### Changed +* Switch Docker 'latest' to point at Debian builds rather than Fedora due to ongoing Fedora build failures +* Align application logging events to actual application defaults for --monitor operations +* Performance Improvement: Avoid duplicate costly path calculations and DB operations if not required +* Disable non-working remaining sandboxing options within systemd service files +* Performance Improvement: Only check 'sync_list' if this has been enabled and configured +* Display 'Sync with OneDrive is complete' when using --synchronize +* Change the order of processing between Microsoft OneDrive restrictions and limitations check and skip_file|skip_dir check + +### Removed +* Remove building Fedora ARMv7 builds due to ongoing build failures + +### Updated +* Update config change detection handling +* Updated documentation (various) + +## 2.4.21 - 2022-09-27 +### Fixed +* Fix that the download progress bar doesn't always reach 100% when rate_limit is set +* Fix --resync handling of database file removal +* Fix Makefile to be consistent with permissions that are being used +* Fix that logging output for skipped uploaded files is missing +* Fix to allow non-sync tasks while sync is running +* Fix where --resync is enforced for non-sync operations +* Fix to resolve segfault when running 'onedrive --display-sync-status' when run as 2nd process +* Fix DMD 2.100.2 depreciation warning + +### Added +* Add GitHub Action Test Build Workflow (replacing Travis CI) +* Add option --display-running-config to display the running configuration as used at application startup +* Add 'config' option to request readonly access in oauth authorization step +* Add option --cleanup-local-files to cleanup local files regardless of sync state when using --download-only +* Add option --with-editing-perms to create a read-write shareable link when used with --create-share-link + +### Changed +* Change the exit code of the application to 126 when a --resync is required + +### Updated +* Updated --get-O365-drive-id implementation for data access +* Update what application options require an argument +* Update application logging output for error messages to remove certain \n prefix when logging to a file +* Update onedrive.spec.in to fix error building RPM +* Update GUI notification handling for specific skipped scenarios +* Updated documentation (various) + +## 2.4.20 - 2022-07-20 +### Fixed +* Fix 'foreign key constraint failed' when using OneDrive Business Shared Folders due to change to using /delta query +* Fix various little spelling fixes (check with lintian during Debian packaging) +* Fix handling of a custom configuration directory when using --confdir +* Fix to ensure that any active http instance is shutdown before any application exit +* Fix to enforce that --confdir must be a directory + +### Added +* Added 'force_http_11' configuration option to allow forcing HTTP/1.1 operations + +### Changed +* Increased thread sleep for better process I/O wait handling +* Removed 'force_http_2' configuration option + +### Updated +* Update OneDrive API response handling for National Cloud Deployments +* Updated to switch to using curl defaults for HTTP/2 operations +* Updated documentation (various) + +## 2.4.19 - 2022-06-15 +### Fixed +* Update Business Shared Folders to use a /delta query +* Update when DB is updated by OneDrive API data and update when file hash is required to be generated + +### Added +* Added ONEDRIVE_UPLOADONLY flag for Docker + +### Updated +* Updated GitHub workflows +* Updated documentation (various) + +## 2.4.18 - 2022-06-02 +### Fixed +* Fixed various database related access issues steming from running multiple instances of the application at the same time using the same configuration data +* Fixed --display-config being impacted by --resync flag +* Fixed installation permissions for onedrive man-pages file +* Fixed that in some situations that users try --upload-only and --download-only together which is not possible +* Fixed application crash if unable to read required hash files + +### Added +* Added Feature Request to add an override for skip_dir|skip_file through flag to force sync +* Added a check to validate local filesystem available space before attempting file download +* Added GitHub Actions to build Docker containers and push to DockerHub + +### Updated +* Updated all Docker build files to current distributions, using updated distribution LDC version +* Updated logging output to logfiles when an actual sync process is occuring +* Updated output of --display-config to be more relevant +* Updated manpage to align with application configuration +* Updated documentation and Docker files based on minimum compiler versions to dmd-2.088.0 and ldc-1.18.0 +* Updated documentation (various) + +## 2.4.17 - 2022-04-30 +### Fixed +* Fix docker build, by add missing git package for Fedora builds +* Fix application crash when attempting to sync a broken symbolic link +* Fix Internet connect disruption retry handling and logging output +* Fix local folder creation timestamp with timestamp from OneDrive +* Fix logging output when download failed + +### Added +* Add additional logging specifically for delete event to denote in log output the source of a deletion event when running in --monitor mode + +### Changed +* Improve when the local database integrity check is performed and on what frequency the database integrity check is performed + +### Updated +* Remove application output ambiguity on how to access 'help' for the client +* Update logging output when running in --monitor --verbose mode in regards to the inotify events +* Updated documentation (various) + +## 2.4.16 - 2022-03-10 +### Fixed +* Update application file logging error handling +* Explicitly set libcurl options +* Fix that when a sync_list exclusion is matched, the item needs to be excluded when using --resync +* Fix so that application can be compiled correctly on Android hosts +* Fix the handling of 429 and 5xx responses when they are generated by OneDrive in a self-referencing circular pattern +* Fix applying permissions to volume directories when running in rootless podman +* Fix unhandled errors from OneDrive when initialising subscriptions fail + +### Added +* Enable GitHub Sponsors +* Implement --resync-auth to enable CLI passing in of --rsync approval +* Add function to check client version vs latest GitHub release +* Add --reauth to allow easy re-authentication of the client +* Implement --modified-by to display who last modified a file and when the modification was done +* Implement feature request to mark partially-downloaded files as .partial during download +* Add documentation for Podman support + +### Changed +* Document risk regarding using --resync and force user acceptance of usage risk to proceed +* Use YAML for Bug Reports and Feature Requests +* Update Dockerfiles to use more modern base Linux distribution + +### Updated +* Updated documentation (various) + +## 2.4.15 - 2021-12-31 +### Fixed +* Fix unable to upload to OneDrive Business Shared Folders due to OneDrive API restricting quota information +* Update fixing edge case with OneDrive Personal Shared Folders and --resync --upload-only + +### Added +* Add SystemD hardening +* Add --operation-timeout argument + +### Changed +* Updated minimum compiler versions to dmd-2.087.0 and ldc-1.17.0 + +### Updated +* Updated Dockerfile-alpine to use Apline 3.14 +* Updated documentation (various) + +## 2.4.14 - 2021-11-24 +### Fixed +* Support DMD 2.097.0 as compiler for Docker Builds +* Fix getPathDetailsByDriveId query when using --dry-run and a nested path with --single-directory +* Fix edge case when syncing OneDrive Personal Shared Folders +* Catch unhandled API response errors when querying OneDrive Business Shared Folders +* Catch unhandled API response errors when listing OneDrive Business Shared Folders +* Fix error 'Key not found: remaining' with Business Shared Folders (OneDrive API change) +* Fix overwriting local files with older versions from OneDrive when items.sqlite3 does not exist and --resync is not used + +### Added +* Added operation_timeout as a new configuration to assist in cases where operations take longer that 1h to complete +* Add Real-Time syncing of remote updates via webhooks +* Add --auth-response option and expose through entrypoint.sh for Docker +* Add --disable-download-validation + +### Changed +* Always prompt for credentials for authentication rather than re-using cached browser details +* Do not re-auth on --logout + +### Updated +* Updated documentation (various) + +## 2.4.13 - 2021-7-14 +### Fixed +* Support DMD 2.097.0 as compiler +* Fix to handle OneDrive API Bad Request response when querying if file exists +* Fix application crash and incorrect handling of --single-directory when syncing a OneDrive Business Shared Folder due to using 'Add Shortcut to My Files' +* Fix application crash due to invalid UTF-8 sequence in the pathname for the application configuration +* Fix error message when deleting a large number of files +* Fix Docker build process to source GOSU keys from updated GPG key location +* Fix application crash due to a conversion overflow when calculating file offset for session uploads +* Fix Docker Alpine build failing due to filesystem permissions issue due to Docker build system and Alpine Linux 3.14 incompatibility +* Fix that Business Shared Folders with parentheses are ignored + +### Updated +* Updated Lock Bot to run daily +* Updated documentation (various) + +## 2.4.12 - 2021-5-28 +### Fixed +* Fix an unhandled Error 412 when uploading modified files to OneDrive Business Accounts +* Fix 'sync_list' handling of inclusions when name is included in another folders name +* Fix that options --upload-only & --remove-source-files are ignored on an upload session restore +* Fix to add file check when adding item to database if using --upload-only --remove-source-files +* Fix application crash when SharePoint displayName is being withheld + +### Updated +* Updated Lock Bot to use GitHub Actions +* Updated documentation (various) + +## 2.4.11 - 2021-4-07 +### Fixed +* Fix support for '/*' regardless of location within sync_list file +* Fix 429 response handling correctly check for 'retry-after' response header and use set value +* Fix 'sync_list' path handling for sub item matching, so that items in parent are not implicitly matched when there is no wildcard present +* Fix --get-O365-drive-id to use 'nextLink' value if present when searching for specific SharePoint site names +* Fix OneDrive Business Shared Folder existing name conflict check +* Fix incorrect error message 'Item cannot be deleted from OneDrive because it was not found in the local database' when item is actually present +* Fix application crash when unable to rename folder structure due to unhandled file-system issue +* Fix uploading documents to Shared Business Folders when the shared folder exists on a SharePoint site due to Microsoft Sharepoint 'enrichment' of files +* Fix that a file record is kept in database when using --no-remote-delete & --remove-source-files + +### Added +* Added support in --get-O365-drive-id to provide the 'drive_id' for multiple 'document libraries' within a single Shared Library Site + +### Removed +* Removed the depreciated config option 'force_http_11' which was flagged as depreciated by PR #549 in v2.3.6 (June 2019) + +### Updated +* Updated error output of --get-O365-drive-id to provide more details why an error occurred if a SharePoint site lacks the details we need to perform the match +* Updated Docker build files for Raspberry Pi to dedicated armhf & aarch64 Dockerfiles +* Updated logging output when in --monitor mode, avoid outputting misleading logging when the new or modified item is a file, not a directory +* Updated documentation (various) + +## 2.4.10 - 2021-2-19 +### Fixed +* Catch database assertion when item path cannot be calculated +* Fix alpine Docker build so it uses the same golang alpine version +* Search all distinct drive id's rather than just default drive id for --get-file-link +* Use correct driveId value to query for changes when using --single-directory +* Improve upload handling of files for SharePoint sites and detecting when SharePoint modifies the file post upload +* Correctly handle '~' when present in 'log_dir' configuration option +* Fix logging output when handing downloaded new files +* Fix to use correct path offset for sync_list exclusion matching + +### Added +* Add upload speed metrics when files are uploaded and clarify that 'data to transfer' is what is needed to be downloaded from OneDrive +* Add new config option to rate limit connection to OneDrive +* Support new file maximum upload size of 250GB +* Support sync_list matching full path root wildcard with exclusions to simplify sync_list configuration + +### Updated +* Rename Office365.md --> SharePoint-Shared-Libraries.md which better describes this document +* Updated Dockerfile config for arm64 +* Updated documentation (various) + +## 2.4.9 - 2020-12-27 +### Fixed +* Fix to handle case where API provided deltaLink generates a further API error +* Fix application crash when unable to read a local file due to local file permissions +* Fix application crash when calculating the path length due to invalid UTF characters in local path +* Fix Docker build on Alpine due missing symbols due to using the edge version of ldc and ldc-runtime +* Fix application crash with --get-O365-drive-id when API response is restricted + +### Added +* Add debug log output of the configured URL's which will be used throughout the application to remove any ambiguity as to using incorrect URL's when making API calls +* Improve application startup when using --monitor when there is no network connection to the OneDrive API and only initialise application once OneDrive API is reachable +* Add Docker environment variable to allow --logout for re-authentication + +### Updated +* Remove duplicate code for error output functions and enhance error logging output +* Updated documentation + +## 2.4.8 - 2020-11-30 +### Fixed +* Fix to use config set option for 'remove_source_files' and 'skip_dir_strict_match' rather than ignore if set +* Fix download failure and crash due to incorrect local filesystem permissions when using mounted external devices +* Fix to not change permissions on pre-existing local directories +* Fix logging output when authentication authorisation fails to not say authorisation was successful +* Fix to check application_id before setting redirect URL when using specific Azure endpoints +* Fix application crash in --monitor mode due to 'Failed to stat file' when setgid is used on a directory and data cannot be read + +### Added +* Added advanced-usage.md to document advaced client usage such as multi account configurations and Windows dual-boot + +### Updated +* Updated --verbose logging output for config options when set +* Updated documentation (man page, USAGE.md, Office365.md, BusinessSharedFolders.md) + +## 2.4.7 - 2020-11-09 +### Fixed +* Fix debugging output for /delta changes available queries +* Fix logging output for modification comparison source data +* Fix Business Shared Folder handling to process only Shared Folders, not individually shared files +* Fix cleanup dryrun shm and wal files if they exist +* Fix --list-shared-folders to only show folders +* Fix to check for the presence of .nosync when processing DB entries +* Fix skip_dir matching when using --resync +* Fix uploading data to shared business folders when using --upload-only +* Fix to merge contents of SQLite WAL file into main database file on sync completion +* Fix to check if localModifiedTime is >= than item.mtime to avoid re-upload for equal modified time +* Fix to correctly set config directory permissions at first start + +### Added +* Added environment variable to allow easy HTTPS debug in docker +* Added environment variable to allow download-only mode in Docker +* Implement Feature: Allow config to specify a tenant id for non-multi-tenant applications +* Implement Feature: Adding support for authentication with single tenant custom applications +* Implement Feature: Configure specific File and Folder Permissions + +### Updated +* Updated documentation (readme.md, install.md, usage.md, bug_report.md) + +## 2.4.6 - 2020-10-04 +### Fixed +* Fix flagging of remaining free space when value is being restricted +* Fix --single-directory path handling when path does not exist locally +* Fix checking for 'Icon' path as no longer listed by Microsoft as an invalid file or folder name +* Fix removing child items on OneDrive when parent item responds with access denied +* Fix to handle deletion events for files when inotify events are missing +* Fix uninitialised value error as reported by valgrind +* Fix to handle deletion events for directories when inotify events are missing + +### Added +* Implement Feature: Create shareable link +* Implement Feature: Support wildcard within sync_list entries +* Implement Feature: Support negative patterns in sync_list for fine grained exclusions +* Implement Feature: Multiple skip_dir & skip_file configuration rules +* Add GUI notification to advise users when the client needs to be reauthenticated + +### Updated +* Updated documentation (readme.md, install.md, usage.md, bug_report.md) + +## 2.4.5 - 2020-08-13 +### Fixed +* Fixed fish auto completions installation destination + +## 2.4.4 - 2020-08-11 +### Fixed +* Fix 'skip_dir' & 'skip_file' pattern matching to ensure correct matching is performed +* Fix 'skip_dir' & 'skip_file' so that each directive is only used against directories or files as requried in --monitor +* Fix client hand when attempting to sync a Unix pipe file +* Fix --single-directory & 'sync_list' performance +* Fix erroneous 'return' statements which could prematurely end processing all changes returned from OneDrive +* Fix segfault when attempting to perform a comparison on an inotify event when determining if event path is directory or file +* Fix handling of Shared Folders to ensure these are checked against 'skip_dir' entries +* Fix 'Skipping uploading this new file as parent path is not in the database' when uploading to a Personal Shared Folder +* Fix how available free space is tracked when uploading files to OneDrive and Shared Folders +* Fix --single-directory handling of parent path matching if path is being seen for first time + +### Added +* Added Fish auto completions + +### Updated +* Increase maximum individual file size to 100GB due to Microsoft file limit increase +* Update Docker build files and align version of compiler across all Docker builds +* Update Docker documentation +* Update NixOS build information +* Update the 'Processing XXXX' output to display the full path +* Update logging output when a sync starts and completes when using --monitor +* Update Office 365 / SharePoint site search query and response if query return zero match + +## 2.4.3 - 2020-06-29 +### Fixed +* Check if symbolic link is relative to location path +* When using output logfile, fix inconsistent output spacing +* Perform initial sync at startup in monitor mode +* Handle a 'race' condition to process inotify events generated whilst performing DB or filesystem walk +* Fix segfault when moving folder outside the sync directory when using --monitor on Arch Linux + +### Added +* Added additional inotify event debugging +* Added support for loading system configs if there's no user config +* Added Ubuntu installation details to include installing the client from a PPA +* Added openSUSE installation details to include installing the client from a package +* Added support for comments in sync_list file +* Implement recursive deletion when Retention Policy is enabled on OneDrive Business Accounts +* Implement support for National cloud deployments +* Implement OneDrive Business Shared Folders Support + +### Updated +* Updated documentation files (various) +* Updated log output messaging when a full scan has been set or triggered +* Updated buildNormalizedPath complexity to simplify code +* Updated to only process OneDrive Personal Shared Folders only if account type is 'personal' + +## 2.4.2 - 2020-05-27 +### Fixed +* Fixed the catching of an unhandled exception when inotify throws an error +* Fixed an uncaught '100 Continue' response when files are being uploaded +* Fixed progress bar for uploads to be more accurate regarding percentage complete +* Fixed handling of database query enforcement if item is from a shared folder +* Fixed compiler depreciation of std.digest.digest +* Fixed checking & loading of configuration file sequence +* Fixed multiple issues reported by Valgrind +* Fixed double scan at application startup when using --monitor & --resync together +* Fixed when renaming a file locally, ensure that the target filename is valid before attempting to upload to OneDrive +* Fixed so that if a file is modified locally and --resync is used, rename the local file for data preservation to prevent local data loss + +### Added +* Implement 'bypass_data_preservation' enhancement + +### Changed +* Changed the monitor interval default to 300 seconds + +### Updated +* Updated the handling of out-of-space message when OneDrive is out of space +* Updated debug logging for retry wait times + +## 2.4.1 - 2020-05-02 +### Fixed +* Fixed the handling of renaming files to a name starting with a dot when skip_dotfiles = true +* Fixed the handling of parentheses from path or file names, when doing comparison with regex +* Fixed the handling of renaming dotfiles to another dotfile when skip_dotfile=true in monitor mode +* Fixed the handling of --dry-run and --resync together correctly as current database may be corrupt +* Fixed building on Alpine Linux under Docker +* Fixed the handling of --single-directory for --dry-run and --resync scenarios +* Fixed the handling of .nosync directive when downloading new files into existing directories that is (was) in sync +* Fixed the handling of zero-byte modified files for OneDrive Business +* Fixed skip_dotfiles handling of .folders when in monitor mode to prevent monitoring +* Fixed the handling of '.folder' -> 'folder' move when skip_dotfiles is enabled +* Fixed the handling of folders that cannot be read (permission error) if parent should be skipped +* Fixed the handling of moving folders from skipped directory to non-skipped directory via OneDrive web interface +* Fixed building on CentOS Linux under Docker +* Fixed Codacy reported issues: double quote to prevent globbing and word splitting +* Fixed an assertion when attempting to compute complex path comparison from shared folders +* Fixed the handling of .folders when being skipped via skip_dir + +### Added +* Implement Feature: Implement the ability to set --resync as a config option, default is false + +### Updated +* Update error logging to be consistent when initialising fails +* Update error logging output to handle HTML error response reasoning if present +* Update link to new Microsoft documentation +* Update logging output to differentiate between OneNote objects and other unsupported objects +* Update RHEL/CentOS spec file example +* Update known-issues.md regarding 'SSL_ERROR_SYSCALL, errno 104' +* Update progress bar to be more accurate when downloading large files +* Updated #658 and #865 handling of when to trigger a directory walk when changes occur on OneDrive +* Updated handling of when a full scan is requried due to utilising sync_list +* Updated handling of when OneDrive service throws a 429 or 504 response to retry original request after a delay + +## 2.4.0 - 2020-03-22 +### Fixed +* Fixed how the application handles 429 response codes from OneDrive (critical update) +* Fixed building on Alpine Linux under Docker +* Fixed how the 'username' is determined from the running process for logfile naming +* Fixed file handling when a failed download has occured due to exiting via CTRL-C +* Fixed an unhandled exception when OneDrive throws an error response on initialising +* Fixed the handling of moving files into a skipped .folder when skip_dotfiles = true +* Fixed the regex parsing of response URI to avoid potentially generating a bad request to OneDrive, leading to a 'AADSTS9002313: Invalid request. Request is malformed or invalid.' response. + +### Added +* Added a Dockerfile for building on Rasberry Pi / ARM platforms +* Implement Feature: warning on big deletes to safeguard data on OneDrive +* Implement Feature: delete local files after sync +* Implement Feature: perform skip_dir explicit match only +* Implement Feature: provide config file option for specifying the Client Identifier + +### Changed +* Updated the 'Client Identifier' to a new Application ID + +### Updated +* Updated relevant documentation (README.md, USAGE.md) to add new feature details and clarify existing information +* Update completions to include the --force-http-2 option +* Update to always log when a file is skipped due to the item being invalid +* Update application output when just authorising application to make information clearer +* Update logging output when using sync_list to be clearer as to what is actually being processed and why + +## 2.3.13 - 2019-12-31 +### Fixed +* Change the sync list override flag to false as default when not using sync_list +* Fix --dry-run output when using --upload-only & --no-remote-delete and deleting local files + +### Added +* Add a verbose log entry when a monitor sync loop with OneDrive starts & completes + +### Changed +* Remove logAndNotify for 'processing X changes' as it is excessive for each change bundle to inform the desktop of the number of changes the client is processing + +### Updated +* Updated INSTALL.md with Ubuntu 16.x i386 build instructions to reflect working configuration on legacy hardware +* Updated INSTALL.md with details of Linux packages +* Updated INSTALL.md build instructions for CentOS platforms + +## 2.3.12 - 2019-12-04 +### Fixed +* Retry session upload fragment when transient errors occur to prevent silent upload failure +* Update Microsoft restriction and limitations about windows naming files to include '~' for folder names +* Docker guide fixes, add multiple account setup instructions +* Check database for excluded sync_list items previously in scope +* Catch DNS resolution error +* Fix where an item now out of scope should be flagged for local delete +* Fix rebuilding of onedrive, but ensure version is properly updated +* Update Ubuntu i386 build instructions to use DMD using preferred method + +### Added +* Add debug message to when a message is sent to dbus or notification daemon +* Add i386 instructions for legacy low memory platforms using LDC + +## 2.3.11 - 2019-11-05 +### Fixed +* Fix typo in the documentation regarding invalid config when upgrading from 'skilion' codebase +* Fix handling of skip_dir, skip_file & sync_list config options +* Fix typo in the documentation regarding sync_list +* Fix log output to be consistent with sync_list exclusion +* Fix 'Processing X changes' output to be more reflective of actual activity when using sync_list +* Remove unused and unexported SED variable in Makefile.in +* Handle curl exceptions and timeouts better with backoff/retry logic +* Update skip_dir pattern matching when using wildcards +* Fix when a full rescan is performed when using sync_list +* Fix 'Key not found: name' when computing skip_dir path +* Fix call from --monitor to observe --no-remote-delete +* Fix unhandled exception when monitor initialisation failure occurs due to too many open local files +* Fix unhandled 412 error response from OneDrive API when moving files right after upload +* Fix --monitor when used with --download-only. This fixes a regression introduced in 12947d1. +* Fix if --single-directory is being used, and we are using --monitor, only set inotify watches on the single directory + +### Changed +* Move JSON logging output from error messages to debug output + +## 2.3.10 - 2019-10-01 +### Fixed +* Fix searching for 'name' when deleting a synced item, if the OneDrive API does not return the expected details in the API call +* Fix abnormal termination when no Internet connection +* Fix downloading of files from OneDrive Personal Shared Folders when the OneDrive API responds with unexpected additional path data +* Fix logging of 'initialisation' of client to actually when the attempt to initialise is performed +* Fix when using a sync_list file, using deltaLink will actually 'miss' changes (moves & deletes) on OneDrive as using sync_list discards changes +* Fix OneDrive API status code 500 handling when uploading files as error message is not correct +* Fix crash when resume_upload file is not a valid JSON +* Fix crash when a file system exception is generated when attempting to update the file date & time and this fails + +### Added +* If there is a case-insensitive match error, also return the remote name from the response +* Make user-agent string a configuration option & add to config file +* Set default User-Agent to 'OneDrive Client for Linux v{version}' + +### Changed +* Make verbose logging output optional on Docker +* Enable --resync & debug client output via environment variables on Docker + +## 2.3.9 - 2019-09-01 +### Fixed +* Catch a 403 Forbidden exception when querying Sharepoint Library Names +* Fix unhandled error exceptions that cause application to exit / crash when uploading files +* Fix JSON object validation for queries made against OneDrive where a JSON response is expected and where that response is to be used and expected to be valid +* Fix handling of 5xx responses from OneDrive when uploading via a session + +### Added +* Detect the need for --resync when config changes either via config file or cli override + +### Changed +* Change minimum required version of LDC to v1.12.0 + +### Removed +* Remove redundant logging output due to change in how errors are reported from OneDrive + +## 2.3.8 - 2019-08-04 +### Fixed +* Fix unable to download all files when OneDrive fails to return file level details used to validate file integrity +* Included the flag "-m" to create the home directory when creating the user +* Fix entrypoint.sh to work with "sudo docker run" +* Fix docker build error on stretch +* Fix hidden directories in 'root' from having prefix removed +* Fix Sharepoint Document Library handling for .txt & .csv files +* Fix logging for init.d service +* Fix OneDrive response missing required 'id' element when uploading images +* Fix 'Unexpected character '<'. (Line 1:1)' when OneDrive has an exception error +* Fix error when creating the sync dir fails when there is no permission to create the sync dir + +### Added +* Add explicit check for hashes to be returned in cases where OneDrive API fails to provide them despite requested to do so +* Add comparison with sha1 if OneDrive provides that rather than quickXor +* Add selinux configuration details for a sync folder outside of the home folder +* Add date tag on docker.hub +* Add back CentOS 6 install & uninstall to Makefile +* Add a check to handle moving items out of sync_list sync scope & delete locally if true +* Implement --get-file-link which will return the weburl of a file which has been synced to OneDrive + +### Changed +* Change unauthorized-api exit code to 3 +* Update LDC to v1.16.0 for Travis CI testing +* Use replace function for modified Sharepoint Document Library files rather than delete and upload as new file, preserving file history +* Update Sharepoint modified file handling for files > 4Mb in size + +### Removed +* Remove -d shorthand for --download-only to avoid confusion with other GNU applications where -d stands for 'debug' + +## 2.3.7 - 2019-07-03 +### Fixed +* Fix not all files being downloaded due to OneDrive query failure +* False DB update which potentially could had lead to false data loss on OneDrive + +## 2.3.6 - 2019-07-03 (DO NOT USE) +### Fixed +* Fix JSONValue object validation +* Fix building without git being available +* Fix some spelling/grammatical errors +* Fix OneDrive error response on creating upload session + +### Added +* Add download size & hash check to ensure downloaded files are valid and not corrupt +* Added --force-http-2 to use HTTP/2 if desired + +### Changed +* Depreciated --force-http-1.1 (enabled by default) due to OneDrive inconsistent behavior with HTTP/2 protocol + +## 2.3.5 - 2019-06-19 +### Fixed +* Handle a directory in the sync_dir when no permission to access +* Get rid of forced root necessity during installation +* Fix broken autoconf code for --enable-XXX options +* Fix so that skip_size check should only be used if configured +* Fix a OneDrive Internal Error exception occurring before attempting to download a file + +### Added +* Check for supported version of D compiler + +## 2.3.4 - 2019-06-13 +### Fixed +* Fix 'Local files not deleted' when using bad 'skip_file' entry +* Fix --dry-run logging output for faking downloading new files +* Fix install unit files to correct location on RHEL/CentOS 7 +* Fix up unit file removal on all platforms +* Fix setting times on a file by adding a check to see if the file was actually downloaded before attempting to set the times on the file +* Fix an unhandled curl exception when OneDrive throws an internal timeout error +* Check timestamp to ensure that latest timestamp is used when comparing OneDrive changes +* Fix handling responses where cTag JSON elements are missing +* Fix Docker entrypoint.sh failures when GID is defined but not UID + +### Added +* Add autoconf based build system +* Add an encoding validation check before any path length checks are performed as if the path contains any invalid UTF-8 sequences +* Implement --sync-root-files to sync all files in the OneDrive root when using a sync_list file that would normally exclude these files from being synced +* Implement skip_size feature request +* Implement feature request to support file based OneDrive authorization (request | response) + +### Updated +* Better handle initialisation issues when OneDrive / MS Graph is experiencing problems that generate 401 & 5xx error codes +* Enhance error message when unable to connect to Microsoft OneDrive service when the local CA SSL certificate(s) have issues +* Update Dockerfile to correctly build on Docker Hub +* Rework directory layout and re-factor MD files for readability + +## 2.3.3 - 2019-04-16 +### Fixed +* Fix --upload-only check for Sharepoint uploads +* Fix check to ensure item root we flag as 'root' actually is OneDrive account 'root' +* Handle object error response from OneDrive when uploading to OneDrive Business +* Fix handling of some OneDrive accounts not providing 'quota' details +* Fix 'resume_upload' handling in the event of bad OneDrive response + +### Added +* Add debugging for --get-O365-drive-id function +* Add shell (bash,zsh) completion support +* Add config options for command line switches to allow for better config handling in docker containers + +### Updated +* Implement more meaningful 5xx error responses +* Update onedrive.logrotate indentations and comments +* Update 'min_notif_changes' to 'min_notify_changes' + +## 2.3.2 - 2019-04-02 +### Fixed +* Reduce scanning the entire local system in monitor mode for local changes +* Resolve file creation loop when working directly in the synced folder and Microsoft Sharepoint + +### Added +* Add 'monitor_fullscan_frequency' config option to set the frequency of performing a full disk scan when in monitor mode + +### Updated +* Update default 'skip_file' to include tmp and lock files generated by LibreOffice +* Update database version due to changing defaults of 'skip_file' which will force a rebuild and use of new skip_file default regex + +## 2.3.1 - 2019-03-26 +### Fixed +* Resolve 'make install' issue where rebuild of application would occur due to 'version' being flagged as .PHONY +* Update readme build instructions to include 'make clean;' before build to ensure that 'version' is cleanly removed and can be updated correctly +* Update Debian Travis CI build URL's + +## 2.3.0 - 2019-03-25 +### Fixed +* Resolve application crash if no 'size' value is returned when uploading a new file +* Resolve application crash if a 5xx error is returned when uploading a new file +* Resolve not 'refreshing' version file when rebuilding +* Resolve unexpected application processing by preventing use of --synchronize & --monitor together +* Resolve high CPU usage when performing DB reads +* Update error logging around directory case-insensitive match +* Update Travis CI and ARM dependencies for LDC 1.14.0 +* Update Makefile due to build failure if building from release archive file +* Update logging as to why a OneDrive object was skipped + +### Added +* Implement config option 'skip_dir' + +## 2.2.6 - 2019-03-12 +### Fixed +* Resolve application crash when unable to delete remote folders when business retention policies are enabled +* Resolve deprecation warning: loop index implicitly converted from size_t to int +* Resolve warnings regarding 'bashisms' +* Resolve handling of notification failure is dbus server has not started or available +* Resolve handling of response JSON to ensure that 'id' key element is always checked for +* Resolve excessive & needless logging in monitor mode +* Resolve compiling with LDC on Alpine as musl lacks some standard interfaces +* Resolve notification issues when offline and cannot act on changes +* Resolve Docker entrypoint.sh to accept command line arguments +* Resolve to create a new upload session on reinit +* Resolve where on OneDrive query failure, default root and drive id is used if a response is not returned +* Resolve Key not found: nextExpectedRanges when attempting session uploads and incorrect response is returned +* Resolve application crash when re-using an authentication URI twice after previous --logout +* Resolve creating a folder on a shared personal folder appears successful but returns a JSON error +* Resolve to treat mv of new file as upload of mv target +* Update Debian i386 build dependencies +* Update handling of --get-O365-drive-id to print out all 'site names' that match the explicit search entry rather than just the last match +* Update Docker readme & documentation +* Update handling of validating local file permissions for new file uploads +### Added +* Add support for install & uninstall on RHEL / CentOS 6.x +* Add support for when notifications are enabled, display the number of OneDrive changes to process if any are found +* Add 'config' option 'min_notif_changes' for minimum number of changes to notify on, default = 5 +* Add additional Docker container builds utilising a smaller OS footprint +* Add configurable interval of logging in monitor mode +* Implement new CLI option --skip-dot-files to skip .files and .folders if option is used +* Implement new CLI option --check-for-nosync to ignore folder when special file (.nosync) present +* Implement new CLI option --dry-run + +## 2.2.5 - 2019-01-16 +### Fixed +* Update handling of HTTP 412 - Precondition Failed errors +* Update --display-config to display sync_list if configured +* Add a check for 'id' key on metadata update to prevent 'std.json.JSONException@std/json.d(494): Key not found: id' +* Update handling of 'remote' folder designation as 'root' items +* Ensure that remote deletes are handled correctly +* Handle 'Item not found' exception when unable to query OneDrive 'root' for changes +* Add handling for JSON response error when OneDrive API returns a 404 due to OneDrive API regression +* Fix items highlighted by codacy review +### Added +* Add --force-http-1.1 flag to downgrade any HTTP/2 curl operations to HTTP 1.1 protocol +* Support building with ldc2 and usage of pkg-config for lib finding + +## 2.2.4 - 2018-12-28 +### Fixed +* Resolve JSONException when supplying --get-O365-drive-id option with a string containing spaces +* Resolve 'sync_dir' not read from 'config' file when run in Docker container +* Resolve logic where potentially a 'default' ~/OneDrive sync_dir could be set despite 'config' file configured for an alternate +* Make sure sqlite checkpointing works by properly finalizing statements +* Update logic handling of --single-directory to prevent inadvertent local data loss +* Resolve signal handling and database shutdown on SIGINT and SIGTERM +* Update man page +* Implement better help output formatting +### Added +* Add debug handling for sync_dir operations +* Add debug handling for homePath calculation +* Add debug handling for configDirBase calculation +* Add debug handling if syncDir is created +* Implement Feature Request: Add status command or switch + +## 2.2.3 - 2018-12-20 +### Fixed +* Fix syncdir option is ignored + +## 2.2.2 - 2018-12-20 +### Fixed +* Handle short lived files in monitor mode +* Provide better log messages, less noise on temporary timeouts +* Deal with items that disappear during upload +* Deal with deleted move targets +* Reinitialize sync engine after three failed attempts +* Fix activation of dmd for docker builds +* Fix to check displayName rather than description for --get-O365-drive-id +* Fix checking of config file keys for validity +* Fix exception handling when missing parameter from usage option +### Added +* Notification support via libnotify +* Add very verbose (debug) mode by double -v -v +* Implement option --display-config + +## 2.2.1 - 2018-12-04 +### Fixed +* Gracefully handle connection errors in monitor mode +* Fix renaming of files when syncing +* Installation of doc files, addition of man page +* Adjust timeout values for libcurl +* Continue in monitor mode when sync timed out +* Fix unreachable statements +* Update Makefile to better support packaging +* Allow starting offline in monitor mode +### Added +* Implement --get-O365-drive-id to get correct SharePoint Shared Library (#248) +* Docker buildfiles for onedrive service (#262) + +## 2.2.0 - 2018-11-24 +### Fixed +* Updated client to output additional logging when debugging +* Resolve database assertion failure due to authentication +* Resolve unable to create folders on shared OneDrive Personal accounts +### Added +* Implement feature request to Sync from Microsoft SharePoint +* Implement feature request to specify a logging directory if logging is enabled +### Changed +* Change '--download' to '--download-only' to align with '--upload-only' +* Change logging so that logging to a separate file is no longer the default + +## 2.1.6 - 2018-11-15 +### Fixed +* Updated HTTP/2 transport handling when using curl 7.62.0 for session uploads +### Added +* Added PKGBUILD for makepkg for building packages under Arch Linux + +## 2.1.5 - 2018-11-11 +### Fixed +* Resolve 'Key not found: path' when syncing from some shared folders due to OneDrive API change +* Resolve to only upload changes on remote folder if the item is in the database - dont assert if false +* Resolve files will not download or upload when using curl 7.62.0 due to HTTP/2 being set as default for all curl operations +* Resolve to handle HTTP request returned status code 412 (Precondition Failed) for session uploads to OneDrive Personal Accounts +* Resolve unable to remove '~/.config/onedrive/resume_upload: No such file or directory' if there is a session upload error and the resume file does not get created +* Resolve handling of response codes when using 2 different systems when using '--upload-only' but the same OneDrive account and uploading the same filename to the same location +### Updated +* Updated Travis CI building on LDC v1.11.0 for ARMHF builds +* Updated Makefile to use 'install -D -m 644' rather than 'cp -raf' +* Updated default config to be aligned to code defaults + +## 2.1.4 - 2018-10-10 +### Fixed +* Resolve syncing of OneDrive Personal Shared Folders due to OneDrive API change +* Resolve incorrect systemd installation location(s) in Makefile + +## 2.1.3 - 2018-10-04 +### Fixed +* Resolve File download fails if the file is marked as malware in OneDrive +* Resolve high CPU usage when running in monitor mode +* Resolve how default path is set when running under systemd on headless systems +* Resolve incorrectly nested configDir in X11 systems +* Resolve Key not found: driveType +* Resolve to validate filename length before download to conform with Linux FS limits +* Resolve file handling to look for HTML ASCII codes which will cause uploads to fail +* Resolve Key not found: expirationDateTime on session resume +### Added +* Update Travis CI building to test build on ARM64 + +## 2.1.2 - 2018-08-27 +### Fixed +* Resolve skipping of symlinks in monitor mode +* Resolve Gateway Timeout - JSONValue is not an object +* Resolve systemd/user is not supported on CentOS / RHEL +* Resolve HTTP request returned status code 429 (Too Many Requests) +* Resolve handling of maximum path length calculation +* Resolve 'The parent item is not in the local database' +* Resolve Correctly handle file case sensitivity issues in same folder +* Update unit files documentation link + +## 2.1.1 - 2018-08-14 +### Fixed +* Fix handling no remote delete of remote directories when using --no-remote-delete +* Fix handling of no permission to access a local file / corrupt local file +* Fix application crash when unable to access login.microsoft.com upon application startup +### Added +* Build instructions for openSUSE Leap 15.0 + +## 2.1.0 - 2018-08-10 +### Fixed +* Fix handling of database exit scenarios when there is zero disk space left on drive where the items database resides +* Fix handling of incorrect database permissions +* Fix handling of different database versions to automatically re-create tables if version mis-match +* Fix handling timeout when accessing the Microsoft OneDrive Service +* Fix localFileModifiedTime to not use fraction seconds +### Added +* Implement Feature: Add a progress bar for large uploads & downloads +* Implement Feature: Make checkinterval for monitor configurable +* Implement Feature: Upload Only Option that does not perform remote delete +* Implement Feature: Add ability to skip symlinks +* Add dependency, ebuild and build instructions for Gentoo distributions +### Changed +* Build instructions for x86, x86_64 and ARM32 platforms +* Travis CI files to automate building on x32, x64 and ARM32 architectures +* Travis CI files to test built application against valid, invalid and problem files from previous issues + +## 2.0.2 - 2018-07-18 +### Fixed +* Fix systemd service install for builds with DESTDIR defined +* Fix 'HTTP 412 - Precondition Failed' error handling +* Gracefully handle OneDrive account password change +* Update logic handling of --upload-only and --local-first + +## 2.0.1 - 2018-07-11 +### Fixed +* Resolve computeQuickXorHash generates a different hash when files are > 64Kb + +## 2.0.0 - 2018-07-10 +### Fixed +* Resolve conflict resolution issue during syncing - the client does not handle conflicts very well & keeps on adding the hostname to files +* Resolve skilion #356 by adding additional check for 409 response from OneDrive +* Resolve multiple versions of file shown on website after single upload +* Resolve to gracefully fail when 'onedrive' process cannot get exclusive database lock +* Resolve 'Key not found: fileSystemInfo' when then item is a remote item (OneDrive Personal) +* Resolve skip_file config entry needs to be checked for any characters to escape +* Resolve Microsoft Naming Convention not being followed correctly +* Resolve Error when trying to upload a file with weird non printable characters present +* Resolve Crash if file is locked by online editing (status code 423) +* Resolve Resolve compilation issue with dmd-2.081.0 +* Resolve skip_file configuration doesn't handle spaces or specified directory paths +### Added +* Implement Feature: Add a flag to detect when the sync-folder is missing +* Implement Travis CI for code testing +### Changed +* Update Makefile to use DESTDIR variables +* Update OneDrive Business maximum path length from 256 to 400 +* Update OneDrive Business allowed characters for files and folders +* Update sync_dir handling to use the absolute path for setting parameter to something other than ~/OneDrive via config file or command line +* Update Fedora build instructions + +## 1.1.2 - 2018-05-17 +### Fixed +* Fix 4xx errors including (412 pre-condition, 409 conflict) +* Fix Key not found: lastModifiedDateTime (OneDrive API change) +* Fix configuration directory not found when run via init.d +* Fix skilion Issues #73, #121, #132, #224, #257, #294, #295, #297, #298, #300, #306, #315, #320, #329, #334, #337, #341 +### Added +* Add logging - log client activities to a file (/var/log/onedrive/%username%.onedrive.log or ~/onedrive.log) +* Add https debugging as a flag +* Add `--synchronize` to prevent from syncing when just blindly running the application +* Add individual folder sync +* Add sync from local directory first rather than download first then upload +* Add upload long path check +* Add upload only +* Add check for max upload file size before attempting upload +* Add systemd unit files for single & multi user configuration +* Add init.d file for older init.d based services +* Add Microsoft naming conventions and namespace validation for items that will be uploaded +* Add remaining free space counter at client initialisation to avoid out of space upload issue +* Add large file upload size check to align to OneDrive file size limitations +* Add upload file size validation & retry if does not match +* Add graceful handling of some fatal errors (OneDrive 5xx error handling) + +## Unreleased - 2018-02-19 +### Fixed +* Crash when the delta link is expired +### Changed +* Disabled buffering on stdout + +## 1.1.1 - 2018-01-20 +### Fixed +* Wrong regex for parsing authentication uri + +## 1.1.0 - 2018-01-19 +### Added +* Support for shared folders (OneDrive Personal only) +* `--download` option to only download changes +* `DC` variable in Makefile to chose the compiler +### Changed +* Print logs on stdout instead of stderr +* Improve log messages + +## 1.0.1 - 2017-08-01 +### Added +* `--syncdir` option +### Changed +* `--version` output simplified +* Updated README +### Fixed +* Fix crash caused by remotely deleted and recreated directories + +## 1.0.0 - 2017-07-14 +### Added +* `--version` option diff --git a/readme.md b/readme.md new file mode 100644 index 000000000..602e72321 --- /dev/null +++ b/readme.md @@ -0,0 +1,85 @@ +# OneDrive Client for Linux +[![Version](https://img.shields.io/github/v/release/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) +[![Release Date](https://img.shields.io/github/release-date/abraunegg/onedrive)](https://github.com/abraunegg/onedrive/releases) +[![Test Build](https://github.com/abraunegg/onedrive/actions/workflows/testbuild.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/testbuild.yaml) +[![Build Docker Images](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml/badge.svg)](https://github.com/abraunegg/onedrive/actions/workflows/docker.yaml) +[![Docker Pulls](https://img.shields.io/docker/pulls/driveone/onedrive)](https://hub.docker.com/r/driveone/onedrive) + +Introducing a free Microsoft OneDrive Client that seamlessly supports OneDrive Personal, OneDrive for Business, OneDrive for Office365, and SharePoint Libraries. + +This robust and highly customisable client is compatible with all major Linux distributions and FreeBSD, and can also be deployed as a container using Docker or Podman. It offers both one-way and two-way synchronisation capabilities while ensuring a secure connection to Microsoft OneDrive services. + +Originally derived as a 'fork' from the [skilion](https://github.com/skilion/onedrive) client, it's worth noting that the developer of the original client has explicitly stated they have no intention of maintaining or supporting their work ([reference](https://github.com/skilion/onedrive/issues/518#issuecomment-717604726)). + +This client represents a 100% re-imagining of the original work, addressing numerous notable bugs and issues while incorporating a significant array of new features. This client has been under active development since mid-2018. + +## Features +* Supports 'Client Side Filtering' rules to determine what should be synced with Microsoft OneDrive +* Sync State Caching +* Real-Time local file monitoring with inotify +* Real-Time syncing of remote updates via webhooks +* File upload / download validation to ensure data integrity +* Resumable uploads +* Support OneDrive for Business (part of Office 365) +* Shared Folder support for OneDrive Personal and OneDrive Business accounts +* SharePoint / Office365 Shared Libraries +* Desktop notifications via libnotify +* Dry-run capability to test configuration changes +* Prevent major OneDrive accidental data deletion after configuration change +* Support for National cloud deployments (Microsoft Cloud for US Government, Microsoft Cloud Germany, Azure and Office 365 operated by 21Vianet in China) +* Supports single & multi-tenanted applications +* Supports rate limiting of traffic +* Supports multi-threaded uploads and downloads + +## What's missing +* Ability to encrypt/decrypt files on-the-fly when uploading/downloading files from OneDrive +* Support for Windows 'On-Demand' functionality so file is only downloaded when accessed locally + +## External Enhancements +* A GUI for configuration management: [OneDrive Client for Linux GUI](https://github.com/bpozdena/OneDriveGUI) +* Colorful log output terminal modification: [OneDrive Client for Linux Colorful log Output](https://github.com/zzzdeb/dotfiles/blob/master/scripts/tools/onedrive_log) +* System Tray Icon: [OneDrive Client for Linux System Tray Icon](https://github.com/DanielBorgesOliveira/onedrive_tray) + +## Frequently Asked Questions +Refer to [Frequently Asked Questions](https://github.com/abraunegg/onedrive/wiki/Frequently-Asked-Questions) + +## Have a question +If you have a question or need something clarified, please raise a new disscussion post [here](https://github.com/abraunegg/onedrive/discussions) + +## Reporting an Issue or Bug +If you encounter any bugs you can report them here on Github. Before filing an issue be sure to: + +1. Check the version of the application you are using `onedrive --version` and ensure that you are running either the latest [release](https://github.com/abraunegg/onedrive/releases) or built from master. +2. Fill in a new bug report using the [issue template](https://github.com/abraunegg/onedrive/issues/new?template=bug_report.md) +3. Generate a debug log for support using the following [process](https://github.com/abraunegg/onedrive/wiki/Generate-debug-log-for-support) + * If you are in *any* way concerned regarding the sensitivity of the data contained with in the verbose debug log file, create a new OneDrive account, configure the client to use that, use *dummy* data to simulate your environment and then replicate your original issue + * If you are still concerned, provide an NDA or confidentiality document to sign +4. Upload the debug log to [pastebin](https://pastebin.com/) or archive and email to support@mynas.com.au + * If you are concerned regarding the sensitivity of your debug data, encrypt + password protect the archive file and provide the decryption password via an out-of-band (OOB) mechanism. Email support@mynas.com.au for an OOB method for the password to be sent. + * If you are still concerned, provide an NDA or confidentiality document to sign + +## Known issues +Refer to [docs/known-issues.md](https://github.com/abraunegg/onedrive/blob/master/docs/known-issues.md) + +## Documentation and Configuration Assistance +### Installing from Distribution Packages or Building the OneDrive Client for Linux from source +Refer to [docs/install.md](https://github.com/abraunegg/onedrive/blob/master/docs/install.md) + +### Configuration and Usage +Refer to [docs/usage.md](https://github.com/abraunegg/onedrive/blob/master/docs/usage.md) + +### Configure OneDrive Business Shared Folders +Refer to [docs/business-shared-folders.md](https://github.com/abraunegg/onedrive/blob/master/docs/business-shared-folders.md) + +### Configure SharePoint / Office 365 Shared Libraries (Business or Education) +Refer to [docs/sharepoint-libraries.md](https://github.com/abraunegg/onedrive/blob/master/docs/sharepoint-libraries.md) + +### Configure National Cloud support +Refer to [docs/national-cloud-deployments.md](https://github.com/abraunegg/onedrive/blob/master/docs/national-cloud-deployments.md) + +### Docker support +Refer to [docs/docker.md](https://github.com/abraunegg/onedrive/blob/master/docs/docker.md) + +### Podman support +Refer to [docs/podman.md](https://github.com/abraunegg/onedrive/blob/master/docs/podman.md) + From 859cd5d267fbe76c1bf2e6532fa3a32b5246b942 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Wed, 10 Jan 2024 19:13:28 +1100 Subject: [PATCH 006/305] Update application-config-options.md * Update docs --- docs/application-config-options.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/docs/application-config-options.md b/docs/application-config-options.md index 31b50614b..ba1509ff1 100644 --- a/docs/application-config-options.md +++ b/docs/application-config-options.md @@ -613,14 +613,7 @@ Files can be skipped in the following fashion: * Explicitly specify the filename only and skip every instance of this filename, eg: 'filename.ext' ```text -# When changing a config option below, remove the '#' from the start of the line -# For explanations of all config options below see docs/USAGE.md or the man page. -# -# sync_dir = "~/OneDrive" skip_file = "~*|/Documents/OneNote*|/Documents/config.xlaunch|myfile.ext|/Documents/keepass.kdbx" -# monitor_interval = "300" -# skip_dir = "" -# log_dir = "/var/log/onedrive/" ``` The 'skip_file' option can be specified multiple times within your config file, for example: ```text From ca8c2d1bdc5248b6618b52151cc28091aa6b3702 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Thu, 11 Jan 2024 10:51:18 +1100 Subject: [PATCH 007/305] Fix zero byte file handling * Fix zero byte file handling --- src/sync.d | 11 ++++++++--- src/util.d | 5 ++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/src/sync.d b/src/sync.d index 05c9a5a8b..4e4af4a4e 100644 --- a/src/sync.d +++ b/src/sync.d @@ -4643,13 +4643,18 @@ class SyncEngine { parentItem.driveId = appConfig.defaultDriveId; } + // Get the new file size + // Even if the permissions on the file are: -rw-------. 1 root root 8 Jan 11 09:42 + // We can obtain the file size + thisFileSize = getSize(fileToUpload); + // Can we read the file - as a permissions issue or actual file corruption will cause a failure // Resolves: https://github.com/abraunegg/onedrive/issues/113 - if (readLocalFile(fileToUpload)) { + // readLocalFile cannot 'read' 1 byte of data from a zero byte file size .. + if (readLocalFile(fileToUpload) || (thisFileSize == 0)) { if (parentPathFoundInDB) { // The local file can be read - so we can read it to attemtp to upload it in this thread - // Get the file size - thisFileSize = getSize(fileToUpload); + // Does this file exceed the maximum filesize for OneDrive // Resolves: https://github.com/skilion/onedrive/issues/121 , https://github.com/skilion/onedrive/issues/294 , https://github.com/skilion/onedrive/issues/329 if (thisFileSize <= maxUploadFileSize) { diff --git a/src/util.d b/src/util.d index 684981128..04fa25c26 100644 --- a/src/util.d +++ b/src/util.d @@ -268,7 +268,10 @@ bool readLocalFile(string path) { // Check if the read operation was successful if (data.length != 1) { - addLogEntry("Failed to read the required amount from the file: " ~ path); + // What is the file size? + if (getSize(path) != 0) { + addLogEntry("Failed to read the required amount from the file: " ~ path); + } return false; } } catch (std.file.FileException e) { From 34407b451f83e57deb22637d9e74a994ab6a0e6b Mon Sep 17 00:00:00 2001 From: abraunegg Date: Thu, 11 Jan 2024 12:26:38 +1100 Subject: [PATCH 008/305] Update sync.d * Just sync the triggered file, no scan of parent directory --- src/sync.d | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/sync.d b/src/sync.d index 4e4af4a4e..30d4fb67d 100644 --- a/src/sync.d +++ b/src/sync.d @@ -4196,8 +4196,7 @@ class SyncEngine { // This is a new file as it is not in the database // Log that the file has been added locally addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]); - // Scan the parent path for any new data, not just this this item - scanLocalFilesystemPathForNewData(dirName(localFilePath)); + scanLocalFilesystemPathForNewData(localFilePath); } else { // This is a potentially modified file, needs to be handled as such. Is the item truly modified? if (!testFileHash(localFilePath, databaseItem)) { From e4a6d620727cc74e137505b5063c8ae6c3a34730 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Thu, 11 Jan 2024 20:52:41 +1100 Subject: [PATCH 009/305] Update sync.d * Cleanup array memory before we start adding files --- src/sync.d | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/sync.d b/src/sync.d index 30d4fb67d..4a9487b08 100644 --- a/src/sync.d +++ b/src/sync.d @@ -3875,6 +3875,9 @@ class SyncEngine { // Perform a filesystem walk to uncover new data to upload to OneDrive void scanLocalFilesystemPathForNewData(string path) { + + // Cleanup array memory before we start adding files + newLocalFilesToUploadToOneDrive = []; // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? string logPath; @@ -3954,7 +3957,7 @@ class SyncEngine { // Perform the upload uploadNewLocalFileItems(); - // Cleanup array memory + // Cleanup array memory after uploading all files newLocalFilesToUploadToOneDrive = []; } } From 9396478caa87416e11a7831068ac5d4cb24db17a Mon Sep 17 00:00:00 2001 From: abraunegg Date: Fri, 12 Jan 2024 06:01:42 +1100 Subject: [PATCH 010/305] Update monitor.d * Update inotify event handling where files might be temporarily moved by applications. This scenario is common with certain text editors (like Vim with specific configurations), which can lead to misleading file deletion detections. --- src/monitor.d | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/monitor.d b/src/monitor.d index d5481dc30..d046829bd 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -175,6 +175,9 @@ final class Monitor { void delegate(string path) onDelete; void delegate(string from, string to) onMove; + // List of paths that were moved, not deleted + bool[string] movedNotDeleted; + // Configure the class varaible to consume the application configuration including selective sync this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) { this.appConfig = appConfig; @@ -459,6 +462,7 @@ final class Monitor { if (event.mask & IN_MOVED_FROM) { addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]); cookieToPath[event.cookie] = path; + movedNotDeleted[path] = true; // Mark as moved, not deleted } else if (event.mask & IN_MOVED_TO) { addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]); if (event.mask & IN_ISDIR) addRecursive(path); @@ -466,8 +470,9 @@ final class Monitor { if (from) { cookieToPath.remove(event.cookie); if (useCallbacks) onMove(*from, path); + movedNotDeleted.remove(*from); // Clear moved status } else { - // item moved from the outside + // Handle file moved in from outside if (event.mask & IN_ISDIR) { if (useCallbacks) onDirCreated(path); } else { @@ -481,8 +486,12 @@ final class Monitor { if (useCallbacks) onDirCreated(path); } } else if (event.mask & IN_DELETE) { - addLogEntry("event IN_DELETE: " ~ path, ["debug"]); - if (useCallbacks) onDelete(path); + if (path in movedNotDeleted) { + movedNotDeleted.remove(path); // Ignore delete for moved files + } else { + addLogEntry("event IN_DELETE: " ~ path, ["debug"]); + if (useCallbacks) onDelete(path); + } } else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) { addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]); if (useCallbacks) onFileChanged(path); From 3e86fe4f2a44bdaa180b1de1e07ac4d4a4bb8945 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Fri, 12 Jan 2024 06:05:28 +1100 Subject: [PATCH 011/305] Update config.d * Add logging output for what IP protocol is going to be used to access Microsoft OneDrive --- src/config.d | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/config.d b/src/config.d index 2b7927903..e3b93630b 100644 --- a/src/config.d +++ b/src/config.d @@ -547,6 +547,11 @@ class ApplicationConfig { } } + // What IP protocol is going to be used to access Microsoft OneDrive + if (getValueLong("ip_protocol_version") == 0) addLogEntry("Using IPv4 and IPv6 (if configured) to access Microsoft OneDrive"); + if (getValueLong("ip_protocol_version") == 1) addLogEntry("Forcing client to use IPv4 connections only"); + if (getValueLong("ip_protocol_version") == 2) addLogEntry("Forcing client to use IPv6 connections only"); + // return if the configuration was initialised return configurationInitialised; } @@ -1633,7 +1638,7 @@ class ApplicationConfig { addLogEntry("Using this configBackupFile: " ~ configBackupFile, ["debug"]); if (exists(configBackupFile)) { - // check backup config what has changed for these configuration options if anything + // Check backup config what has changed for these configuration options if anything // # drive_id = "" // # sync_dir = "~/OneDrive" // # skip_file = "~*|.~*|*.tmp|*.swp|*.partial" From a089b85bffeae3684c6aafafbe98f1f13b777743 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Fri, 12 Jan 2024 11:21:45 +1100 Subject: [PATCH 012/305] Update config.d Implement refactored and optimised applicationChangeWhereResyncRequired() function --- src/config.d | 316 +++++++++++++++------------------------------------ 1 file changed, 90 insertions(+), 226 deletions(-) diff --git a/src/config.d b/src/config.d index e3b93630b..9cd595e7c 100644 --- a/src/config.d +++ b/src/config.d @@ -961,7 +961,7 @@ class ApplicationConfig { addLogEntry("Please review the revised documentation on how to configure this application feature. You must update your client configuration and make any necessary online adjustments accordingly."); addLogEntry(); } - // Return false + // Return ignore_depreciation return ignore_depreciation; } } @@ -1598,54 +1598,40 @@ class ApplicationConfig { bool applicationChangeWhereResyncRequired() { // Default is that no resync is required bool resyncRequired = false; + + // Consolidate the flags for different configuration changes + bool[9] configOptionsDifferent; + + // Handle multiple entries of skip_file + string backupConfigFileSkipFile; - // Configuration File Flags - bool configFileOptionsDifferent = false; - bool syncListFileDifferent = false; - bool syncDirDifferent = false; - bool skipFileDifferent = false; - bool skipDirDifferent = false; - bool skipDotFilesDifferent = false; - bool skipSymbolicLinksDifferent = false; - bool driveIdDifferent = false; - bool syncBusinessSharedItemsDifferent = false; - bool businessSharedItemsFileDifferent = false; - - // Create the required initial hash files - createRequiredInitialConfigurationHashFiles(); + // Handle multiple entries of skip_dir + string backupConfigFileSkipDir; + // Create and read the required initial hash files + createRequiredInitialConfigurationHashFiles(); // Read in the existing hash file values readExistingConfigurationHashFiles(); - - // Was the 'sync_list' file updated? - if (currentSyncListHash != previousSyncListHash) { - // Debugging output to assist what changed - addLogEntry("sync_list file has been updated, --resync needed", ["debug"]); - syncListFileDifferent = true; - } - - // Was the 'business_shared_items' file updated? - if (currentBusinessSharedItemsHash != previousBusinessSharedItemsHash) { - // Debugging output to assist what changed - addLogEntry("business_shared_folders file has been updated, --resync needed", ["debug"]); - businessSharedItemsFileDifferent = true; - } - - // Was the 'config' file updated between last execution and this execution? + + // Helper lambda for logging and setting the difference flag + auto logAndSetDifference = (string message, size_t index) { + addLogEntry(message, ["debug"]); + configOptionsDifferent[index] = true; + }; + + // Check for changes in the sync_list and business_shared_items files + if (currentSyncListHash != previousSyncListHash) + logAndSetDifference("sync_list file has been updated, --resync needed", 0); + + if (currentBusinessSharedItemsHash != previousBusinessSharedItemsHash) + logAndSetDifference("business_shared_folders file has been updated, --resync needed", 1); + + // Check for updates in the config file if (currentConfigHash != previousConfigHash) { - // config file was updated, however we only want to trigger a --resync requirement if sync_dir, skip_dir, skip_file or drive_id was modified addLogEntry("Application configuration file has been updated, checking if --resync needed"); addLogEntry("Using this configBackupFile: " ~ configBackupFile, ["debug"]); - + if (exists(configBackupFile)) { - // Check backup config what has changed for these configuration options if anything - // # drive_id = "" - // # sync_dir = "~/OneDrive" - // # skip_file = "~*|.~*|*.tmp|*.swp|*.partial" - // # skip_dir = "" - // # skip_dotfiles = "" - // # skip_symlinks = "" - // # sync_business_shared_items = "" string[string] backupConfigStringValues; backupConfigStringValues["drive_id"] = ""; backupConfigStringValues["sync_dir"] = ""; @@ -1654,10 +1640,7 @@ class ApplicationConfig { backupConfigStringValues["skip_dotfiles"] = ""; backupConfigStringValues["skip_symlinks"] = ""; backupConfigStringValues["sync_business_shared_items"] = ""; - - // bool flags to trigger if the entries that trigger a --resync were found in the backup config file - // if these were not in the backup file, they may have been added ... thus new, thus we need to double check the existing - // config file to see if this was a newly added config option + bool drive_id_present = false; bool sync_dir_present = false; bool skip_file_present = false; @@ -1665,19 +1648,19 @@ class ApplicationConfig { bool skip_dotfiles_present = false; bool skip_symlinks_present = false; bool sync_business_shared_items_present = false; - - // Common debug message if an element is different + string configOptionModifiedMessage = " was modified since the last time the application was successfully run, --resync required"; auto configBackupFileHandle = File(configBackupFile, "r"); + scope(exit) { + if (configBackupFileHandle.isOpen()) { + configBackupFileHandle.close(); + } + } + string lineBuffer; - - // read configBackupFile line by line auto range = configBackupFileHandle.byLine(); - // for each line foreach (line; range) { - addLogEntry("Backup Config Line: " ~ lineBuffer, ["debug"]); - lineBuffer = stripLeft(line).to!string; if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; auto c = lineBuffer.matchFirst(configRegex); @@ -1685,228 +1668,109 @@ class ApplicationConfig { c.popFront(); // skip the whole match string key = c.front.dup; addLogEntry("Backup Config Key: " ~ key, ["debug"]); - + auto p = key in backupConfigStringValues; if (p) { c.popFront(); - // compare this key + string value = c.front.dup; + // Compare each key value with current config if (key == "drive_id") { drive_id_present = true; - if (c.front.dup != getValueString("drive_id")) { - addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); - configFileOptionsDifferent = true; + if (value != getValueString("drive_id")) { + logAndSetDifference(key ~ configOptionModifiedMessage, 2); } } - if (key == "sync_dir") { sync_dir_present = true; - if (c.front.dup != getValueString("sync_dir")) { - addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); - configFileOptionsDifferent = true; + if (value != getValueString("sync_dir")) { + logAndSetDifference(key ~ configOptionModifiedMessage, 3); } } + // skip_file handling if (key == "skip_file") { skip_file_present = true; - string computedBackupSkipFile = defaultSkipFile ~ "|" ~ to!string(c.front.dup); - if (computedBackupSkipFile != getValueString("skip_file")) { - addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); - configFileOptionsDifferent = true; + // Handle multiple entries of skip_file + if (backupConfigFileSkipFile.empty) { + // currently no entry exists + backupConfigFileSkipFile = c.front.dup; + } else { + // add to existing backupConfigFileSkipFile entry + backupConfigFileSkipFile = backupConfigFileSkipFile ~ "|" ~ to!string(c.front.dup); } } + // skip_dir handling if (key == "skip_dir") { skip_dir_present = true; - if (c.front.dup != getValueString("skip_dir")) { - addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); - configFileOptionsDifferent = true; + // Handle multiple entries of skip_dir + if (backupConfigFileSkipDir.empty) { + // currently no entry exists + backupConfigFileSkipDir = c.front.dup; + } else { + // add to existing backupConfigFileSkipDir entry + backupConfigFileSkipDir = backupConfigFileSkipDir ~ "|" ~ to!string(c.front.dup); } } if (key == "skip_dotfiles") { skip_dotfiles_present = true; - if (c.front.dup != to!string(getValueBool("skip_dotfiles"))) { - addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); - configFileOptionsDifferent = true; + if (value != to!string(getValueBool("skip_dotfiles"))) { + logAndSetDifference(key ~ configOptionModifiedMessage, 6); } } - if (key == "skip_symlinks") { skip_symlinks_present = true; - if (c.front.dup != to!string(getValueBool("skip_symlinks"))) { - addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); - configFileOptionsDifferent = true; + if (value != to!string(getValueBool("skip_symlinks"))) { + logAndSetDifference(key ~ configOptionModifiedMessage, 7); } } - if (key == "sync_business_shared_items") { sync_business_shared_items_present = true; - if (c.front.dup != to!string(getValueBool("sync_business_shared_items"))) { - addLogEntry(key ~ configOptionModifiedMessage, ["debug"]); - configFileOptionsDifferent = true; + if (value != to!string(getValueBool("sync_business_shared_items"))) { + logAndSetDifference(key ~ configOptionModifiedMessage, 8); } } } } } - // close file if open - if (configBackupFileHandle.isOpen()) { - // close open file - configBackupFileHandle.close(); - } - - // Were any of the items that trigger a --resync not in the existing backup 'config' file .. thus newly added? - if ((!drive_id_present) || (!sync_dir_present) || (! skip_file_present) || (!skip_dir_present) || (!skip_dotfiles_present) || (!skip_symlinks_present)) { - addLogEntry("drive_id present in config backup: " ~ drive_id_present, ["debug"]); - addLogEntry("sync_dir present in config backup: " ~ sync_dir_present, ["debug"]); - addLogEntry("skip_file present in config backup: " ~ skip_file_present, ["debug"]); - addLogEntry("skip_dir present in config backup: " ~ skip_dir_present, ["debug"]); - addLogEntry("skip_dotfiles present in config backup: " ~ skip_dotfiles_present, ["debug"]); - addLogEntry("skip_symlinks present in config backup: " ~ skip_symlinks_present, ["debug"]); - addLogEntry("sync_business_shared_items present in config backup: " ~ sync_business_shared_items_present, ["debug"]); - - if ((!drive_id_present) && (configFileDriveId != "")) { - addLogEntry("drive_id newly added ... --resync needed"); - configFileOptionsDifferent = true; - driveIdDifferent = true; - } - - if ((!sync_dir_present) && (configFileSyncDir != defaultSyncDir)) { - addLogEntry("sync_dir newly added ... --resync needed"); - configFileOptionsDifferent = true; - syncDirDifferent = true; - } - - if ((!skip_file_present) && (configFileSkipFile != defaultSkipFile)) { - addLogEntry("skip_file newly added ... --resync needed"); - configFileOptionsDifferent = true; - skipFileDifferent = true; - } - - if ((!skip_dir_present) && (configFileSkipDir != "")) { - addLogEntry("skip_dir newly added ... --resync needed"); - configFileOptionsDifferent = true; - skipFileDifferent = true; - } - - if ((!skip_dotfiles_present) && (configFileSkipDotfiles)) { - addLogEntry("skip_dotfiles newly added ... --resync needed"); - configFileOptionsDifferent = true; - skipDotFilesDifferent = true; - } - - if ((!skip_symlinks_present) && (configFileSkipSymbolicLinks)) { - addLogEntry("skip_symlinks newly added ... --resync needed"); - configFileOptionsDifferent = true; - skipSymbolicLinksDifferent = true; - } - - if ((!sync_business_shared_items_present) && (configFileSyncBusinessSharedItems)) { - addLogEntry("sync_business_shared_items newly added ... --resync needed"); - configFileOptionsDifferent = true; - syncBusinessSharedItemsDifferent = true; - } - } + // skip_file can be specified multiple times + if (skip_file_present && backupConfigFileSkipFile != configFileSkipFile) logAndSetDifference("skip_file" ~ configOptionModifiedMessage, 4); - object.destroy(configBackupFileHandle); - object.destroy(range); - object.destroy(lineBuffer); + // skip_dir can be specified multiple times + if (skip_dir_present && backupConfigFileSkipDir != configFileSkipDir) logAndSetDifference("skip_dir" ~ configOptionModifiedMessage, 5); + // Check for newly added configuration options + if (!drive_id_present && configFileDriveId != "") logAndSetDifference("drive_id newly added ... --resync needed", 2); + if (!sync_dir_present && configFileSyncDir != defaultSyncDir) logAndSetDifference("sync_dir newly added ... --resync needed", 3); + if (!skip_file_present && configFileSkipFile != defaultSkipFile) logAndSetDifference("skip_file newly added ... --resync needed", 4); + if (!skip_dir_present && configFileSkipDir != "") logAndSetDifference("skip_dir newly added ... --resync needed", 5); + if (!skip_dotfiles_present && configFileSkipDotfiles) logAndSetDifference("skip_dotfiles newly added ... --resync needed", 6); + if (!skip_symlinks_present && configFileSkipSymbolicLinks) logAndSetDifference("skip_symlinks newly added ... --resync needed", 7); + if (!sync_business_shared_items_present && configFileSyncBusinessSharedItems) logAndSetDifference("sync_business_shared_items newly added ... --resync needed", 8); } else { - // no backup to check addLogEntry("WARNING: no backup config file was found, unable to validate if any changes made"); } } - - // config file set options can be changed via CLI input, specifically these will impact sync and a --resync will be needed: - // --syncdir ARG - // --skip-file ARG - // --skip-dir ARG - // --skip-dot-files - // --skip-symlinks - + + // Check CLI options if (exists(applicableConfigFilePath)) { - // config file exists - // was the sync_dir updated by CLI? - if (configFileSyncDir != "") { - // sync_dir was set in config file - if (configFileSyncDir != getValueString("sync_dir")) { - // config file was set and CLI input changed this - - // Is this potentially running as a Docker container? - if (entrypointExists) { - // entrypoint.sh exists - addLogEntry("sync_dir: CLI override of config file option, however entrypoint.sh exists, thus most likely first run of Docker container", ["debug"]); - } else { - // entrypoint.sh does not exist - addLogEntry("sync_dir: CLI override of config file option, --resync needed", ["debug"]); - syncDirDifferent = true; - } - } - } - - // was the skip_file updated by CLI? - if (configFileSkipFile != "") { - // skip_file was set in config file - if (configFileSkipFile != getValueString("skip_file")) { - // config file was set and CLI input changed this - addLogEntry("skip_file: CLI override of config file option, --resync needed", ["debug"]); - skipFileDifferent = true; - } - } + if (configFileSyncDir != "" && configFileSyncDir != getValueString("sync_dir")) logAndSetDifference("sync_dir: CLI override of config file option, --resync needed", 3); + if (configFileSkipFile != "" && configFileSkipFile != getValueString("skip_file")) logAndSetDifference("skip_file: CLI override of config file option, --resync needed", 4); + if (configFileSkipDir != "" && configFileSkipDir != getValueString("skip_dir")) logAndSetDifference("skip_dir: CLI override of config file option, --resync needed", 5); + if (!configFileSkipDotfiles && getValueBool("skip_dotfiles")) logAndSetDifference("skip_dotfiles: CLI override of config file option, --resync needed", 6); + if (!configFileSkipSymbolicLinks && getValueBool("skip_symlinks")) logAndSetDifference("skip_symlinks: CLI override of config file option, --resync needed", 7); + } - // was the skip_dir updated by CLI? - if (configFileSkipDir != "") { - // skip_dir was set in config file - if (configFileSkipDir != getValueString("skip_dir")) { - // config file was set and CLI input changed this - addLogEntry("skip_dir: CLI override of config file option, --resync needed", ["debug"]); - skipDirDifferent = true; - } + // Aggregate the result to determine if a resync is required + foreach (optionDifferent; configOptionsDifferent) { + if (optionDifferent) { + resyncRequired = true; + break; } - - // was skip_dotfiles updated by --skip-dot-files ? - if (!configFileSkipDotfiles) { - // was not set in config file - if (getValueBool("skip_dotfiles")) { - // --skip-dot-files passed in - addLogEntry("skip_dotfiles: CLI override of config file option, --resync needed", ["debug"]); - skipDotFilesDifferent = true; - } - } - - // was skip_symlinks updated by --skip-symlinks ? - if (!configFileSkipSymbolicLinks) { - // was not set in config file - if (getValueBool("skip_symlinks")) { - // --skip-symlinks passed in - addLogEntry("skip_symlinks: CLI override of config file option, --resync needed", ["debug"]); - skipSymbolicLinksDifferent = true; - } - } - } - - // Did any of the config files or CLI options trigger a --resync requirement? - addLogEntry("configFileOptionsDifferent: " ~ to!string(configFileOptionsDifferent), ["debug"]); - - // Options - addLogEntry("driveIdDifferent: " ~ to!string(driveIdDifferent), ["debug"]); - addLogEntry("syncDirDifferent: " ~ to!string(syncDirDifferent), ["debug"]); - addLogEntry("skipFileDifferent: " ~ to!string(skipFileDifferent), ["debug"]); - addLogEntry("skipDirDifferent: " ~ to!string(skipDirDifferent), ["debug"]); - addLogEntry("skipDotFilesDifferent: " ~ to!string(skipDotFilesDifferent), ["debug"]); - addLogEntry("skipSymbolicLinksDifferent: " ~ to!string(skipSymbolicLinksDifferent), ["debug"]); - addLogEntry("syncBusinessSharedItemsDifferent: " ~ to!string(syncBusinessSharedItemsDifferent), ["debug"]); - - // Files with change - addLogEntry("syncListFileDifferent: " ~ to!string(syncListFileDifferent), ["debug"]); - addLogEntry("businessSharedItemsFileDifferent: " ~ to!string(businessSharedItemsFileDifferent), ["debug"]); - - if ((configFileOptionsDifferent) || (syncListFileDifferent) || (businessSharedItemsFileDifferent) || (syncDirDifferent) || (skipFileDifferent) || (skipDirDifferent) || (driveIdDifferent) || (skipDotFilesDifferent) || (skipSymbolicLinksDifferent) || (syncBusinessSharedItemsDifferent) ) { - // set the flag - resyncRequired = true; } + return resyncRequired; } From b5988f7affec6cb3d863f903aff9ab22cf21dad3 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Fri, 12 Jan 2024 11:44:57 +1100 Subject: [PATCH 013/305] Update curlEngine.d * Re-introduce CURLOPT_FORBID_REUSE setting from 'alpha-2' --- src/curlEngine.d | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/curlEngine.d b/src/curlEngine.d index 1a0db204e..3fdf07a99 100644 --- a/src/curlEngine.d +++ b/src/curlEngine.d @@ -84,6 +84,14 @@ class CurlEngine { // Ensure that TCP_NODELAY is set to 0 to ensure that TCP NAGLE is enabled http.handle.set(CurlOption.tcp_nodelay,0); + // https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html + // CURLOPT_FORBID_REUSE - make connection get closed at once after use + // Ensure that we ARE NOT reusing TCP sockets connections - setting to 0 ensures that we ARE reusing connections (we did this in v2.4.xx) to ensure connections remained open and usable + // Setting this to 1 ensures that when we close the curl instance, any open sockets are closed - which we need to do when running + // multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly + // The libcurl default is 1 - ensure we are configuring not to reuse connections and leave unused sockets open + http.handle.set(CurlOption.forbid_reuse,1); + if (httpsDebug) { // Output what options we are using so that in the debug log this can be tracked addLogEntry("http.dnsTimeout = " ~ to!string(dnsTimeout), ["debug"]); From d20b62da5701285d7573dd7441cd225416b4278e Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 13 Jan 2024 07:46:47 +1100 Subject: [PATCH 014/305] Fix 'config' file parsing and option handling Fix that 'config' does not load when a deprecated option is at the beginning of the config file --- src/config.d | 453 +++++++++++++++++++-------------------------------- src/main.d | 3 + src/sync.d | 7 - src/util.d | 7 + 4 files changed, 176 insertions(+), 294 deletions(-) diff --git a/src/config.d b/src/config.d index 9cd595e7c..be0462591 100644 --- a/src/config.d +++ b/src/config.d @@ -547,11 +547,6 @@ class ApplicationConfig { } } - // What IP protocol is going to be used to access Microsoft OneDrive - if (getValueLong("ip_protocol_version") == 0) addLogEntry("Using IPv4 and IPv6 (if configured) to access Microsoft OneDrive"); - if (getValueLong("ip_protocol_version") == 1) addLogEntry("Forcing client to use IPv4 connections only"); - if (getValueLong("ip_protocol_version") == 2) addLogEntry("Forcing client to use IPv6 connections only"); - // return if the configuration was initialised return configurationInitialised; } @@ -675,313 +670,190 @@ class ApplicationConfig { // Load a configuration file from the provided filename private bool loadConfigFile(string filename) { - // configure function variables try { addLogEntry("Reading configuration file: " ~ filename); readText(filename); } catch (std.file.FileException e) { - // Unable to access required file addLogEntry("ERROR: Unable to access " ~ e.msg); - // Use exit scopes to shutdown API return false; } - // We were able to readText the config file - so, we should be able to open and read it auto file = File(filename, "r"); - string lineBuffer; - - // configure scopes - // - failure - scope(failure) { - // close file if open - if (file.isOpen()){ - // close open file - file.close(); + scope(exit) file.close(); + scope(failure) file.close(); + + foreach (line; file.byLine()) { + string lineBuffer = stripLeft(line).to!string; + if (lineBuffer.empty || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; + auto c = lineBuffer.matchFirst(configRegex); + if (c.empty) { + addLogEntry("Malformed config line: " ~ lineBuffer); + continue; } - } - // - exit - scope(exit) { - // close file if open - if (file.isOpen()){ - // close open file - file.close(); + + c.popFront(); // skip the whole match + string key = c.front.dup; + c.popFront(); + + // Handle deprecated keys + switch (key) { + case "min_notify_changes": + case "force_http_2": + addLogEntry("The option '" ~ key ~ "' has been depreciated and will be ignored. Please read the updated documentation and update your client configuration to remove this option."); + continue; + case "sync_business_shared_folders": + addLogEntry(); + addLogEntry("The option 'sync_business_shared_folders' has been depreciated and the process for synchronising Microsoft OneDrive Business Shared Folders has changed."); + addLogEntry("Please review the revised documentation on how to correctly configure this application feature."); + addLogEntry("You must update your client configuration and make changes to your local filesystem and online data to use this capability."); + return false; + default: + break; } - } - // read file line by line - auto range = file.byLine(); - foreach (line; range) { - lineBuffer = stripLeft(line).to!string; - if (lineBuffer.length == 0 || lineBuffer[0] == ';' || lineBuffer[0] == '#') continue; - auto c = lineBuffer.matchFirst(configRegex); - if (!c.empty) { - c.popFront(); // skip the whole match - string key = c.front.dup; - auto p = key in boolValues; - if (p) { - c.popFront(); - // only accept "true" as true value. TODO Should we support other formats? - setValueBool(key, c.front.dup == "true" ? true : false); - - // skip_dotfiles tracking for change - if (key == "skip_dotfiles") { - configFileSkipDotfiles = true; + // Process other keys + if (key in boolValues) { + // Only accept "true" as true value. + setValueBool(key, c.front.dup == "true" ? true : false); + if (key == "skip_dotfiles") configFileSkipDotfiles = true; + if (key == "skip_symlinks") configFileSkipSymbolicLinks = true; + if (key == "sync_business_shared_items") configFileSyncBusinessSharedItems = true; + } else if (key in stringValues) { + string value = c.front.dup; + setValueString(key, value); + if (key == "sync_dir") { + if (!strip(value).empty) { + configFileSyncDir = value; + } else { + addLogEntry(); + addLogEntry("Invalid value for key in config file: " ~ key); + addLogEntry("ERROR: sync_dir in config file cannot be empty - this is a fatal error and must be corrected"); + addLogEntry(); + forceExit(); } - - // skip_symlinks tracking for change - if (key == "skip_symlinks") { - configFileSkipSymbolicLinks = true; + } else if (key == "skip_file") { + // Handle multiple 'config' file entries of skip_file + if (configFileSkipFile.empty) { + // currently no entry exists + configFileSkipFile = c.front.dup; + } else { + // add to existing entry + configFileSkipFile = configFileSkipFile ~ "|" ~ to!string(c.front.dup); + setValueString("skip_file", configFileSkipFile); } - - // sync_business_shared_items tracking for change - if (key == "sync_business_shared_items") { - configFileSyncBusinessSharedItems = true; + } else if (key == "skip_dir") { + // Handle multiple entries of skip_dir + if (configFileSkipDir.empty) { + // currently no entry exists + configFileSkipDir = c.front.dup; + } else { + // add to existing entry + configFileSkipDir = configFileSkipDir ~ "|" ~ to!string(c.front.dup); + setValueString("skip_dir", configFileSkipDir); } - - } else { - auto pp = key in stringValues; - if (pp) { - c.popFront(); - setValueString(key, c.front.dup); - // detect need for --resync for these: - // --syncdir ARG - // --skip-file ARG - // --skip-dir ARG - - // sync_dir - if (key == "sync_dir") { - // configure a temp variable - string tempSyncDirValue = c.front.dup; - // is this empty ? - if (!strip(tempSyncDirValue).empty) { - configFileSyncDir = tempSyncDirValue; - } else { - // sync_dir cannot be empty - addLogEntry("Invalid value for key in config file: " ~ key); - addLogEntry("ERROR: sync_dir in config file cannot be empty - this is a fatal error and must be corrected"); - exit(EXIT_FAILURE); - } - } - - // skip_file - if (key == "skip_file") { - // Handle multiple entries of skip_file - if (configFileSkipFile.empty) { - // currently no entry exists - configFileSkipFile = c.front.dup; - } else { - // add to existing entry - configFileSkipFile = configFileSkipFile ~ "|" ~ to!string(c.front.dup); - setValueString("skip_file", configFileSkipFile); - } - } - - // skip_dir - if (key == "skip_dir") { - // Handle multiple entries of skip_dir - if (configFileSkipDir.empty) { - // currently no entry exists - configFileSkipDir = c.front.dup; - } else { - // add to existing entry - configFileSkipDir = configFileSkipDir ~ "|" ~ to!string(c.front.dup); - setValueString("skip_dir", configFileSkipDir); - } - } - - // --single-directory Strip quotation marks from path - // This is an issue when using ONEDRIVE_SINGLE_DIRECTORY with Docker - if (key == "single_directory") { - // Strip quotation marks from provided path - string configSingleDirectory = strip(to!string(c.front.dup), "\""); - setValueString("single_directory", configSingleDirectory); - } - - // Azure AD Configuration - if (key == "azure_ad_endpoint") { - string azureConfigValue = strip(c.front.dup); - switch(azureConfigValue) { - case "": - addLogEntry("Using detault config option for Global Azure AD Endpoints"); - break; - case "USL4": - addLogEntry("Using config option for Azure AD for US Government Endpoints"); - break; - case "USL5": - addLogEntry("Using config option for Azure AD for US Government Endpoints (DOD)"); - break; - case "DE": - addLogEntry("Using config option for Azure AD Germany"); - break; - case "CN": - addLogEntry("Using config option for Azure AD China operated by 21Vianet"); - break; - // Default - all other entries - default: - addLogEntry("Unknown Azure AD Endpoint - using Global Azure AD Endpoints"); - } - } - - // Application ID - if (key == "application_id") { - // This key cannot be empty - string tempApplicationId = strip(c.front.dup); - if (tempApplicationId.empty) { - addLogEntry("Invalid value for key in config file - using default value: " ~ key); - addLogEntry("application_id in config file cannot be empty - using default application_id", ["debug"]); - setValueString("application_id", defaultApplicationId); - } else { - setValueString("application_id", tempApplicationId); - } - } - - // Drive ID - if (key == "drive_id") { - // This key cannot be empty - string tempApplicationId = strip(c.front.dup); - if (tempApplicationId.empty) { - addLogEntry("Invalid value for key in config file: " ~ key); - addLogEntry("drive_id in config file cannot be empty - this is a fatal error and must be corrected by removing this entry from your config file", ["debug"]); - exit(EXIT_FAILURE); - } else { - setValueString("drive_id", tempApplicationId); - configFileDriveId = tempApplicationId; - } - } - - // Log Directory - if (key == "log_dir") { - // This key cannot be empty - string tempLogDir = strip(c.front.dup); - if (tempLogDir.empty) { - addLogEntry("Invalid value for key in config file - using default value: " ~ key); - addLogEntry("log_dir in config file cannot be empty - using default log_dir", ["debug"]); - setValueString("log_dir", defaultLogFileDir); - } else { - setValueString("log_dir", tempLogDir); - } - } - + } else if (key == "single_directory") { + string configFileSingleDirectory = strip(value, "\""); + setValueString("single_directory", configFileSingleDirectory); + } else if (key == "azure_ad_endpoint") { + switch (value) { + case "": + addLogEntry("Using default config option for Global Azure AD Endpoints"); + break; + case "USL4": + addLogEntry("Using config option for Azure AD for US Government Endpoints"); + break; + case "USL5": + addLogEntry("Using config option for Azure AD for US Government Endpoints (DOD)"); + break; + case "DE": + addLogEntry("Using config option for Azure AD Germany"); + break; + case "CN": + addLogEntry("Using config option for Azure AD China operated by 21Vianet"); + break; + default: + addLogEntry("Unknown Azure AD Endpoint - using Global Azure AD Endpoints"); + } + } else if (key == "application_id") { + string tempApplicationId = strip(value); + if (tempApplicationId.empty) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + addLogEntry("application_id in config file cannot be empty - using default application_id", ["debug"]); + setValueString("application_id", defaultApplicationId); + } + } else if (key == "drive_id") { + string tempDriveId = strip(value); + if (tempDriveId.empty) { + addLogEntry(); + addLogEntry("Invalid value for key in config file: " ~ key); + addLogEntry("drive_id in config file cannot be empty - this is a fatal error and must be corrected by removing this entry from your config file.", ["debug"]); + addLogEntry(); + forceExit(); } else { - auto ppp = key in longValues; - if (ppp) { - c.popFront(); - ulong thisConfigValue; - - // Can this value actually be converted to an integer? - try { - thisConfigValue = to!long(c.front.dup); - } catch (std.conv.ConvException) { - addLogEntry("Invalid value for key in config file: " ~ key); - return false; - } - - setValueLong(key, thisConfigValue); - - // if key is 'monitor_interval' the value must be 300 or greater - if (key == "monitor_interval") { - // temp value - ulong tempValue = thisConfigValue; - // the temp value needs to be greater than 300 - if (tempValue < 300) { - addLogEntry("Invalid value for key in config file - using default value: " ~ key); - tempValue = 300; - } - setValueLong("monitor_interval", to!long(tempValue)); - } - - // if key is 'monitor_fullscan_frequency' the value must be 12 or greater - if (key == "monitor_fullscan_frequency") { - // temp value - ulong tempValue = thisConfigValue; - // the temp value needs to be greater than 12 - if (tempValue < 12) { - // If this is not set to zero (0) then we are not disabling 'monitor_fullscan_frequency' - if (tempValue != 0) { - // invalid value - addLogEntry("Invalid value for key in config file - using default value: " ~ key); - tempValue = 12; - } - } - setValueLong("monitor_fullscan_frequency", to!long(tempValue)); - } - - // if key is 'space_reservation' we have to calculate MB -> bytes - if (key == "space_reservation") { - // temp value - ulong tempValue = thisConfigValue; - // a value of 0 needs to be made at least 1MB .. - if (tempValue == 0) { - addLogEntry("Invalid value for key in config file - using 1MB: " ~ key); - tempValue = 1; - } - setValueLong("space_reservation", to!long(tempValue * 2^^20)); - } - - // if key is 'ip_protocol_version' this has to be a value of 0 or 1 or 2 .. nothing else - if (key == "ip_protocol_version") { - // temp value - ulong tempValue = thisConfigValue; - // If greater than 2, set to default - if (tempValue > 2) { - addLogEntry("Invalid value for key in config file - using default value: " ~ key); - // Set to default of 0 - tempValue = 0; - } - setValueLong("ip_protocol_version", to!long(tempValue)); - } - - } else { - // unknown key - addLogEntry("Unknown key in config file: " ~ key); - - // handle depreciation - bool ignore_depreciation = false; - - // min_notify_changes has been depreciated - if (key == "min_notify_changes") { - addLogEntry(); - addLogEntry("The option 'min_notify_changes' has been depreciated and will be ignored. Please read the updated documentation and update your client configuration."); - addLogEntry(); - ignore_depreciation = true; - } - - // force_http_2 has been depreciated - if (key == "force_http_2") { - addLogEntry(); - addLogEntry("The option 'force_http_2' has been depreciated and will be ignored. Please read the updated documentation and update your client configuration."); - addLogEntry(); - ignore_depreciation = true; - } - - // Application configuration update required for Business Shared Folders - if (key == "sync_business_shared_folders") { - addLogEntry(); - addLogEntry("The process for synchronising Microsoft OneDrive Business Shared Folders has changed."); - addLogEntry("Please review the revised documentation on how to configure this application feature. You must update your client configuration and make any necessary online adjustments accordingly."); - addLogEntry(); - } - // Return ignore_depreciation - return ignore_depreciation; + configFileDriveId = tempDriveId; + } + } else if (key == "log_dir") { + string tempLogDir = strip(value); + if (tempLogDir.empty) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + addLogEntry("log_dir in config file cannot be empty - using default log_dir", ["debug"]); + setValueString("log_dir", defaultLogFileDir); + } + } + } else if (key in longValues) { + ulong thisConfigValue; + try { + thisConfigValue = to!ulong(c.front.dup); + } catch (std.conv.ConvException) { + addLogEntry("Invalid value for key in config file: " ~ key); + return false; + } + setValueLong(key, thisConfigValue); + if (key == "monitor_interval") { // if key is 'monitor_interval' the value must be 300 or greater + ulong tempValue = thisConfigValue; + // the temp value needs to be 300 or greater + if (tempValue < 300) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = 300; + } + setValueLong("monitor_interval", tempValue); + } else if (key == "monitor_fullscan_frequency") { // if key is 'monitor_fullscan_frequency' the value must be 12 or greater + ulong tempValue = thisConfigValue; + // the temp value needs to be 12 or greater + if (tempValue < 12) { + // If this is not set to zero (0) then we are not disabling 'monitor_fullscan_frequency' + if (tempValue != 0) { + // invalid value + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = 12; } } + setValueLong("monitor_fullscan_frequency", tempValue); + } else if (key == "space_reservation") { // if key is 'space_reservation' we have to calculate MB -> bytes + ulong tempValue = thisConfigValue; + // a value of 0 needs to be made at least 1MB .. + if (tempValue == 0) { + addLogEntry("Invalid value for key in config file - using 1MB: " ~ key); + tempValue = 1; + } + setValueLong("space_reservation", tempValue * 2^^20); + } else if (key == "ip_protocol_version") { + ulong tempValue = thisConfigValue; + if (tempValue > 2) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = 0; + } + setValueLong("ip_protocol_version", tempValue); } } else { - // malformed config line - addLogEntry("Malformed config line: " ~ lineBuffer); + addLogEntry("Unknown key in config file: " ~ key); return false; } } - - // Close the file access - file.close(); - // Free object and memory - object.destroy(file); - object.destroy(range); - object.destroy(lineBuffer); + // Return that we were able to read in the config file and parse the options without issue return true; } - + // Update the application configuration based on CLI passed in parameters void updateFromArgs(string[] cliArgs) { // Add additional options that are NOT configurable via config file @@ -2223,6 +2095,13 @@ class ApplicationConfig { debugLogging = debugLoggingInput; verbosityCount = verbosityCountInput; } + + // What IP protocol is going to be used to access Microsoft OneDrive + void displayIPProtocol() { + if (getValueLong("ip_protocol_version") == 0) addLogEntry("Using IPv4 and IPv6 (if configured) for all network operations"); + if (getValueLong("ip_protocol_version") == 1) addLogEntry("Forcing client to use IPv4 connections only"); + if (getValueLong("ip_protocol_version") == 2) addLogEntry("Forcing client to use IPv6 connections only"); + } } // Output the full application help when --help is passed in diff --git a/src/main.d b/src/main.d index 120aaccfa..4cdecc8ad 100644 --- a/src/main.d +++ b/src/main.d @@ -373,6 +373,9 @@ int main(string[] cliArgs) { } } + // What IP Protocol are we going to use to access the network with + appConfig.displayIPProtocol(); + // Test if OneDrive service can be reached, exit if it cant be reached addLogEntry("Testing network to ensure network connectivity to Microsoft OneDrive Service", ["debug"]); online = testInternetReachability(appConfig); diff --git a/src/sync.d b/src/sync.d index 4a9487b08..a7d2e1621 100644 --- a/src/sync.d +++ b/src/sync.d @@ -61,13 +61,6 @@ class SyncException: Exception { } } -void forceExit() { - // Allow logging to flush and complete - Thread.sleep(dur!("msecs")(500)); - // Force Exit - exit(EXIT_FAILURE); -} - class SyncEngine { // Class Variables ApplicationConfig appConfig; diff --git a/src/util.d b/src/util.d index 04fa25c26..518b608ac 100644 --- a/src/util.d +++ b/src/util.d @@ -1050,4 +1050,11 @@ int calc_eta(size_t counter, size_t iterations, ulong start_time) { // Return the average time per iteration for the last iteration return cast(int) ceil(avg_time_per_iteration); } +} + +void forceExit() { + // Allow logging to flush and complete + Thread.sleep(dur!("msecs")(500)); + // Force Exit + exit(EXIT_FAILURE); } \ No newline at end of file From 0ce088f0e26c04cc4db617f3cdb5a7d93d6a83e6 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 13 Jan 2024 16:03:43 +1100 Subject: [PATCH 015/305] Update 'root:' removal for 'skip_dir' path checking * Update 'root:' removal for 'skip_dir' path checking --- src/sync.d | 35 ++++++++++++++++++++++------------- src/util.d | 2 +- 2 files changed, 23 insertions(+), 14 deletions(-) diff --git a/src/sync.d b/src/sync.d index a7d2e1621..f431a2206 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1379,16 +1379,15 @@ class SyncEngine { simplePathToCheck = onedriveJSONItem["name"].str; } - // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder + // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder // then isDirNameExcluded matching will not work - // Clean up 'root:' if present - if (startsWith(simplePathToCheck, "root:")){ + if (simplePathToCheck.canFind(":")) { addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]); - simplePathToCheck = strip(simplePathToCheck, "root:"); + simplePathToCheck = processPathToRemoveRootReference(simplePathToCheck); } - if (startsWith(complexPathToCheck, "root:")){ + if (complexPathToCheck.canFind(":")) { addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]); - complexPathToCheck = strip(complexPathToCheck, "root:"); + complexPathToCheck = processPathToRemoveRootReference(complexPathToCheck); } // OK .. what checks are we doing? @@ -3233,16 +3232,15 @@ class SyncEngine { simplePathToCheck = onedriveJSONItem["name"].str; } - // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder + // If 'simplePathToCheck' or 'complexPathToCheck' is of the following format: root:/folder // then isDirNameExcluded matching will not work - // Clean up 'root:' if present - if (startsWith(simplePathToCheck, "root:")){ + if (simplePathToCheck.canFind(":")) { addLogEntry("Updating simplePathToCheck to remove 'root:'", ["debug"]); - simplePathToCheck = strip(simplePathToCheck, "root:"); + simplePathToCheck = processPathToRemoveRootReference(simplePathToCheck); } - if (startsWith(complexPathToCheck, "root:")){ + if (complexPathToCheck.canFind(":")) { addLogEntry("Updating complexPathToCheck to remove 'root:'", ["debug"]); - complexPathToCheck = strip(complexPathToCheck, "root:"); + complexPathToCheck = processPathToRemoveRootReference(complexPathToCheck); } // OK .. what checks are we doing? @@ -3266,7 +3264,7 @@ class SyncEngine { } } // End Result - addLogEntry("skip_dir exclude result (directory based): " ~ clientSideRuleExcludesPath, ["debug"]); + addLogEntry("skip_dir exclude result (directory based): " ~ to!string(clientSideRuleExcludesPath), ["debug"]); if (clientSideRuleExcludesPath) { // This path should be skipped addLogEntry("Skipping item - excluded by skip_dir config: " ~ matchDisplay, ["verbose"]); @@ -7541,4 +7539,15 @@ class SyncEngine { object.destroy(uploadFileOneDriveApiInstance); } } + + // Function to process the path by removing prefix up to ':' - remove '/drive/root:' from a path string + string processPathToRemoveRootReference(ref string pathToCheck) { + long colonIndex = pathToCheck.indexOf(":"); + if (colonIndex != -1) { + addLogEntry("Updating " ~ pathToCheck ~ " to remove prefix up to ':'", ["debug"]); + pathToCheck = pathToCheck[colonIndex + 1 .. $]; + addLogEntry("Updated path for 'skip_dir' check: " ~ pathToCheck, ["debug"]); + } + return pathToCheck; + } } \ No newline at end of file diff --git a/src/util.d b/src/util.d index 518b608ac..68ae41829 100644 --- a/src/util.d +++ b/src/util.d @@ -167,7 +167,7 @@ Regex!char wild2regex(const(char)[] pattern) { str ~= "\\+"; break; case ' ': - str ~= "\\s"; // Changed to match exactly one whitespace. str ~= "\\s+"; + str ~= "\\s"; // Changed to match exactly one whitespace. Was: str ~= "\\s+"; break; case '/': str ~= "\\/"; From aab6c87d692ddc015d5402141f986614efabc348 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 13 Jan 2024 18:08:55 +1100 Subject: [PATCH 016/305] Malformed config line handling Malformed config line should cause the application to exit. This then prevents malformed lines which might be valid configuration items, from not being read in, thus, causing incorrect application function based on expectations. --- src/config.d | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/config.d b/src/config.d index be0462591..e4aa3adb9 100644 --- a/src/config.d +++ b/src/config.d @@ -688,7 +688,9 @@ class ApplicationConfig { auto c = lineBuffer.matchFirst(configRegex); if (c.empty) { addLogEntry("Malformed config line: " ~ lineBuffer); - continue; + addLogEntry(); + addLogEntry("Please review the documentation on how to correctly configure this application."); + forceExit(); } c.popFront(); // skip the whole match From 5dee97700ec035be8664ea90200163a630d25d1a Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 14 Jan 2024 06:54:15 +1100 Subject: [PATCH 017/305] If requesting --help, dont attempt to load any 'config' file * If requesting --help, dont attempt to load any 'config' file --- src/config.d | 85 +++++++++++++++++++++++++++------------------------- src/main.d | 5 +++- 2 files changed, 49 insertions(+), 41 deletions(-) diff --git a/src/config.d b/src/config.d index e4aa3adb9..5b5f4dd8b 100644 --- a/src/config.d +++ b/src/config.d @@ -199,7 +199,7 @@ class ApplicationConfig { bool shellEnvironmentSet = false; // Initialise the application configuration - bool initialise(string confdirOption) { + bool initialise(string confdirOption, bool helpRequested) { // Default runtime configuration - entries in config file ~/.config/onedrive/config or derived from variables above // An entry here means it can be set via the config file if there is a coresponding entry, read from config and set via update_from_args() @@ -494,56 +494,61 @@ class ApplicationConfig { auto convertedValue = parse!long(valueToConvert, 8); convertedPermissionValue = to!int(convertedValue); - // Initialise the application using the configuration file if it exists - if (!exists(userConfigFilePath)) { - // 'user' configuration file does not exist - // Is there a system configuration file? - if (!exists(systemConfigFilePath)) { - // 'system' configuration file does not exist - addLogEntry("No user or system config file found, using application defaults", ["verbose"]); - applicableConfigFilePath = userConfigFilePath; - configurationInitialised = true; + // Do not try and load any user configuration file if --help was used + if (helpRequested) { + return true; + } else { + // Initialise the application using the configuration file if it exists + if (!exists(userConfigFilePath)) { + // 'user' configuration file does not exist + // Is there a system configuration file? + if (!exists(systemConfigFilePath)) { + // 'system' configuration file does not exist + addLogEntry("No user or system config file found, using application defaults", ["verbose"]); + applicableConfigFilePath = userConfigFilePath; + configurationInitialised = true; + } else { + // 'system' configuration file exists + // can we load the configuration file without error? + if (loadConfigFile(systemConfigFilePath)) { + // configuration file loaded without error + addLogEntry("System configuration file successfully loaded"); + + // Set 'applicableConfigFilePath' to equal the 'config' we loaded + applicableConfigFilePath = systemConfigFilePath; + // Update the configHashFile path value to ensure we are using the system 'config' file for the hash + configHashFile = buildNormalizedPath(buildPath(systemConfigDirName, ".config.hash")); + configurationInitialised = true; + } else { + // there was a problem loading the configuration file + addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering + addLogEntry("System configuration file has errors - please check your configuration"); + } + } } else { - // 'system' configuration file exists + // 'user' configuration file exists // can we load the configuration file without error? - if (loadConfigFile(systemConfigFilePath)) { + if (loadConfigFile(userConfigFilePath)) { // configuration file loaded without error - addLogEntry("System configuration file successfully loaded"); + addLogEntry("Configuration file successfully loaded"); // Set 'applicableConfigFilePath' to equal the 'config' we loaded - applicableConfigFilePath = systemConfigFilePath; - // Update the configHashFile path value to ensure we are using the system 'config' file for the hash - configHashFile = buildNormalizedPath(buildPath(systemConfigDirName, ".config.hash")); + applicableConfigFilePath = userConfigFilePath; configurationInitialised = true; } else { // there was a problem loading the configuration file addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering - addLogEntry("System configuration file has errors - please check your configuration"); + addLogEntry("Configuration file has errors - please check your configuration"); } - } - } else { - // 'user' configuration file exists - // can we load the configuration file without error? - if (loadConfigFile(userConfigFilePath)) { - // configuration file loaded without error - addLogEntry("Configuration file successfully loaded"); - - // Set 'applicableConfigFilePath' to equal the 'config' we loaded - applicableConfigFilePath = userConfigFilePath; - configurationInitialised = true; - } else { - // there was a problem loading the configuration file - addLogEntry(); // used instead of an empty 'writeln();' to ensure the line break is correct in the buffered console output ordering - addLogEntry("Configuration file has errors - please check your configuration"); } - } - - // Advise the user path that we will use for the application state data - if (canFind(applicableConfigFilePath, configDirName)) { - addLogEntry("Using 'user' configuration path for application state data: " ~ configDirName, ["verbose"]); - } else { - if (canFind(applicableConfigFilePath, systemConfigDirName)) { - addLogEntry("Using 'system' configuration path for application state data: " ~ systemConfigDirName, ["verbose"]); + + // Advise the user path that we will use for the application state data + if (canFind(applicableConfigFilePath, configDirName)) { + addLogEntry("Using 'user' configuration path for application state data: " ~ configDirName, ["verbose"]); + } else { + if (canFind(applicableConfigFilePath, systemConfigDirName)) { + addLogEntry("Using 'system' configuration path for application state data: " ~ systemConfigDirName, ["verbose"]); + } } } diff --git a/src/main.d b/src/main.d index 4cdecc8ad..5776045a8 100644 --- a/src/main.d +++ b/src/main.d @@ -71,6 +71,8 @@ int main(string[] cliArgs) { bool debugLogging = false; // Monitor loop failures bool monitorFailures = false; + // Help requested + bool helpRequested = false; // DEVELOPER OPTIONS OUTPUT VARIABLES bool displayMemoryUsage = false; @@ -111,6 +113,7 @@ int main(string[] cliArgs) { // Print help and exit if (cliOptions.helpWanted) { cliArgs ~= "--help"; + helpRequested = true; } // Print the version and exit if (printVersion) { @@ -176,7 +179,7 @@ int main(string[] cliArgs) { // Initialise the application configuration, utilising --confdir if it was passed in // Otherwise application defaults will be used to configure the application - if (!appConfig.initialise(confdirOption)) { + if (!appConfig.initialise(confdirOption, helpRequested)) { // There was an error loading the user specified application configuration // Error message already printed return EXIT_FAILURE; From 635102e8afd017553d44ebf9a39fea206d88f71d Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 14 Jan 2024 07:17:25 +1100 Subject: [PATCH 018/305] Perform DB scan earlier and reuse result * Remove potentially redundant applicability check of a path if this is already in the database. This is a manual merge of #2525 --- src/sync.d | 71 +++++++++++++++++++++++++++--------------------------- 1 file changed, 36 insertions(+), 35 deletions(-) diff --git a/src/sync.d b/src/sync.d index f431a2206..c8c943934 100644 --- a/src/sync.d +++ b/src/sync.d @@ -4011,45 +4011,48 @@ class SyncEngine { if (canFind(pathFakeDeletedArray, path)) return; } - // This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly - // Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252 - if (!unwanted) { - if(!isValid(path)) { - // Path is not valid according to https://dlang.org/phobos/std_encoding.html - addLogEntry("Skipping item - invalid character encoding sequence: " ~ path, ["info", "notify"]); - unwanted = true; + // Check if item if found in database + bool itemFoundInDB = pathFoundInDatabase(path); + + // If the item is already found in the database, it is redundant to perform these checks + if (!itemFoundInDB) { + // This not a Client Side Filtering check, nor a Microsoft Check, but is a sanity check that the path provided is UTF encoded correctly + // Check the std.encoding of the path against: Unicode 5.0, ASCII, ISO-8859-1, ISO-8859-2, WINDOWS-1250, WINDOWS-1251, WINDOWS-1252 + if (!unwanted) { + if(!isValid(path)) { + // Path is not valid according to https://dlang.org/phobos/std_encoding.html + addLogEntry("Skipping item - invalid character encoding sequence: " ~ path, ["info", "notify"]); + unwanted = true; + } + } + + // Check this path against the Client Side Filtering Rules + // - check_nosync + // - skip_dotfiles + // - skip_symlinks + // - skip_file + // - skip_dir + // - sync_list + // - skip_size + if (!unwanted) { + unwanted = checkPathAgainstClientSideFiltering(path); + } + + // Check this path against the Microsoft Naming Conventions & Restristions + // - Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders + // - Check path for bad whitespace items + // - Check path for HTML ASCII Codes + // - Check path for ASCII Control Codes + if (!unwanted) { + unwanted = checkPathAgainstMicrosoftNamingRestrictions(path); } - } - - // Check this path against the Client Side Filtering Rules - // - check_nosync - // - skip_dotfiles - // - skip_symlinks - // - skip_file - // - skip_dir - // - sync_list - // - skip_size - if (!unwanted) { - unwanted = checkPathAgainstClientSideFiltering(path); - } - - // Check this path against the Microsoft Naming Conventions & Restristions - // - Check path against Microsoft OneDrive restriction and limitations about Windows naming for files and folders - // - Check path for bad whitespace items - // - Check path for HTML ASCII Codes - // - Check path for ASCII Control Codes - if (!unwanted) { - unwanted = checkPathAgainstMicrosoftNamingRestrictions(path); } if (!unwanted) { // At this point, this path, we want to scan for new data as it is not excluded if (isDir(path)) { - // Check if this path in the database - bool directoryFoundInDB = pathFoundInDatabase(path); - // Was the path found in the database? - if (!directoryFoundInDB) { + if (!itemFoundInDB) { // Path not found in database when searching all drive id's if (!cleanupLocalFiles) { // --download-only --cleanup-local-files not used @@ -4136,10 +4139,8 @@ class SyncEngine { // pipes - whilst technically valid files, are not valid for this client // prw-rw-r--. 1 user user 0 Jul 7 05:55 my_pipe if (isFile(path)) { - // Path is a valid file, not a pipe - bool fileFoundInDB = pathFoundInDatabase(path); // Was the file found in the database? - if (!fileFoundInDB) { + if (!itemFoundInDB) { // File not found in database when searching all drive id's // Do we upload the file or clean up the file? if (!cleanupLocalFiles) { From 23f2e2f8350fe7df621edefc5983e88ca6b364ca Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 14 Jan 2024 07:39:51 +1100 Subject: [PATCH 019/305] Implement Docker capability to disable validations * Implement Docker options to utilise --disable-download-validation and --disable-upload-validation --- contrib/docker/entrypoint.sh | 14 ++++++++++++++ docs/docker.md | 2 ++ docs/podman.md | 2 ++ 3 files changed, 18 insertions(+) diff --git a/contrib/docker/entrypoint.sh b/contrib/docker/entrypoint.sh index f75f937d3..30e922c97 100755 --- a/contrib/docker/entrypoint.sh +++ b/contrib/docker/entrypoint.sh @@ -125,6 +125,20 @@ if [ "${ONEDRIVE_DRYRUN:=0}" == "1" ]; then ARGS=(--dry-run ${ARGS[@]}) fi +# Tell client to disable download validation +if [ "${ONEDRIVE_DISABLE_DOWNLOAD_VALIDATION:=0}" == "1" ]; then + echo "# We are disabling the download integrity checks performed by this client" + echo "# Adding --disable-download-validation" + ARGS=(--disable-download-validation ${ARGS[@]}) +fi + +# Tell client to disable upload validation +if [ "${ONEDRIVE_DISABLE_UPLOAD_VALIDATION:=0}" == "1" ]; then + echo "# We are disabling the upload integrity checks performed by this client" + echo "# Adding --disable-upload-validation" + ARGS=(--disable-upload-validation ${ARGS[@]}) +fi + if [ ${#} -gt 0 ]; then ARGS=("${@}") fi diff --git a/docs/docker.md b/docs/docker.md index 1bf6251ff..7c904b092 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -275,6 +275,8 @@ docker run $firstRun --restart unless-stopped --name onedrive -v onedrive_conf:/ | ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | | ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | +| ONEDRIVE_DISABLE_DOWNLOAD_VALIDATION | Controls "--disable-download-validation" option. Default is 0 | 1 | +| ONEDRIVE_DISABLE_UPLOAD_VALIDATION | Controls "--disable-upload-validation" option. Default is 0 | 1 | ### Environment Variables Usage Examples **Verbose Output:** diff --git a/docs/podman.md b/docs/podman.md index 4f3474f34..57d2d9f01 100644 --- a/docs/podman.md +++ b/docs/podman.md @@ -295,6 +295,8 @@ podman run -it --name onedrive_work --user "${ONEDRIVE_UID}:${ONEDRIVE_GID}" \ | ONEDRIVE_DISPLAY_CONFIG | Controls "--display-running-config" switch on onedrive sync. Default is 0 | 1 | | ONEDRIVE_SINGLE_DIRECTORY | Controls "--single-directory" option. Default = "" | "mydir" | | ONEDRIVE_DRYRUN | Controls "--dry-run" option. Default is 0 | 1 | +| ONEDRIVE_DISABLE_DOWNLOAD_VALIDATION | Controls "--disable-download-validation" option. Default is 0 | 1 | +| ONEDRIVE_DISABLE_UPLOAD_VALIDATION | Controls "--disable-upload-validation" option. Default is 0 | 1 | ### Environment Variables Usage Examples **Verbose Output:** From 67a2b4bed4d67ece787892115932cbfcb9614119 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 14 Jan 2024 08:05:26 +1100 Subject: [PATCH 020/305] Update config.d * Ensure that we load the 'skip_file' defaults when attempting to compare any configuration file entry changes --- src/config.d | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config.d b/src/config.d index 5b5f4dd8b..ae3916e33 100644 --- a/src/config.d +++ b/src/config.d @@ -1571,8 +1571,8 @@ class ApplicationConfig { skip_file_present = true; // Handle multiple entries of skip_file if (backupConfigFileSkipFile.empty) { - // currently no entry exists - backupConfigFileSkipFile = c.front.dup; + // currently no entry exists, include 'defaultSkipFile' entries + backupConfigFileSkipFile = defaultSkipFile ~ "|" ~ to!string(c.front.dup); } else { // add to existing backupConfigFileSkipFile entry backupConfigFileSkipFile = backupConfigFileSkipFile ~ "|" ~ to!string(c.front.dup); From a5be48dea914f521ebeb32fff5d8443c8bbbb564 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 14 Jan 2024 12:00:18 +1100 Subject: [PATCH 021/305] Update curlEngine.d Update comment for libcurl forbid_reuse --- src/curlEngine.d | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/curlEngine.d b/src/curlEngine.d index 3fdf07a99..528081b5f 100644 --- a/src/curlEngine.d +++ b/src/curlEngine.d @@ -89,7 +89,7 @@ class CurlEngine { // Ensure that we ARE NOT reusing TCP sockets connections - setting to 0 ensures that we ARE reusing connections (we did this in v2.4.xx) to ensure connections remained open and usable // Setting this to 1 ensures that when we close the curl instance, any open sockets are closed - which we need to do when running // multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly - // The libcurl default is 1 - ensure we are configuring not to reuse connections and leave unused sockets open + // The libcurl default is 0 as per the documentation (to REUSE connections) - ensure we are configuring NOT to reuse connections and leave unused sockets needlessly open which will lead to local socket exhaustion http.handle.set(CurlOption.forbid_reuse,1); if (httpsDebug) { From c5f2ec4622833d58f0843ffb20e1976b3ee27808 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Tue, 16 Jan 2024 05:19:25 +1100 Subject: [PATCH 022/305] Update sync.d Fix reported bug where the online date is being changed in download-only mode --- src/sync.d | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/src/sync.d b/src/sync.d index c8c943934..d30f7735a 100644 --- a/src/sync.d +++ b/src/sync.d @@ -2259,19 +2259,30 @@ class SyncEngine { addLogEntry("Local item time discrepancy detected: " ~ path, ["verbose"]); addLogEntry("This local item has a different modified time " ~ to!string(localModifiedTime) ~ " when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime), ["verbose"]); - // The file has been modified ... is the hash the same? + // The file has a different timestamp ... is the hash the same meaning no file modification? // Test the file hash as the date / time stamp is different // Generating a hash is computationally expensive - we only generate the hash if timestamp was different if (testFileHash(path, item)) { // The hash is the same .. so we need to fix-up the timestamp depending on where it is wrong - addLogEntry("Local item has the same hash value as the item online - correcting timestamp", ["verbose"]); + addLogEntry("Local item has the same hash value as the item online - correcting the applicable file timestamp", ["verbose"]); // Test if the local timestamp is newer if (localModifiedTime > itemModifiedTime) { - // The source of the out-of-date timestamp was OneDrive and this needs to be corrected to avoid always generating a hash test if timestamp is different - addLogEntry("The source of the incorrect timestamp was OneDrive online - correcting timestamp online", ["verbose"]); - if (!dryRun) { - // Attempt to update the online date time stamp - uploadLastModifiedTime(item.driveId, item.id, localModifiedTime.toUTC(), item.eTag); + // Local file is newer .. are we in a --download-only situation? + if (!appConfig.getValueBool("download_only")) { + // --download-only not being used + // The source of the out-of-date timestamp was OneDrive and this needs to be corrected to avoid always generating a hash test if timestamp is different + addLogEntry("The source of the incorrect timestamp was OneDrive online - correcting timestamp online", ["verbose"]); + if (!dryRun) { + // Attempt to update the online date time stamp + uploadLastModifiedTime(item.driveId, item.id, localModifiedTime.toUTC(), item.eTag); + } + } else { + // --download-only is being used ... local file needs to be corrected ... but why is it newer - indexing application potentially changing the timestamp ? + addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally due to --download-only", ["verbose"]); + if (!dryRun) { + addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); + setTimes(path, item.mtime, item.mtime); + } } } else { // The source of the out-of-date timestamp was the local file and this needs to be corrected to avoid always generating a hash test if timestamp is different From cda8317ef3c8e043a738159963a2ad542443acf6 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Tue, 16 Jan 2024 06:11:16 +1100 Subject: [PATCH 023/305] Fix that --dry-run prevents authorization Fix that --dry-run prevents authorization --- src/onedrive.d | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/src/onedrive.d b/src/onedrive.d index 545afc01e..1e68e05e1 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -564,11 +564,23 @@ class OneDriveApi { return false; } } else { - addLogEntry("Authorise this application by visiting:\n", ["consoleOnly"]); - addLogEntry(url ~ "\n\n", ["consoleOnly"]); - addLogEntry("Enter the response uri from your browser: ", ["consoleOnlyNoNewLine"]); - readln(response); - appConfig.applicationAuthorizeResponseUri = true; + // Are we in a --dry-run scenario? + if (!appConfig.getValueBool("dry_run")) { + // No --dry-run is being used + addLogEntry("Authorise this application by visiting:\n", ["consoleOnly"]); + addLogEntry(url ~ "\n", ["consoleOnly"]); + addLogEntry("Enter the response uri from your browser: ", ["consoleOnlyNoNewLine"]); + readln(response); + appConfig.applicationAuthorizeResponseUri = true; + } else { + // The application cannot be authorised when using --dry-run as we have to write out the authentication data, which negates the whole 'dry-run' process + addLogEntry(); + addLogEntry("The application requires authorisation, which involves saving authentication data on your system. Note that authorisation cannot be completed with the '--dry-run' option."); + addLogEntry(); + addLogEntry("To exclusively authorise the application without performing any additional actions, use this command: onedrive"); + addLogEntry(); + forceExit(); + } } // match the authorization code auto c = matchFirst(response, r"(?:[\?&]code=)([\w\d-.]+)"); From b1a07a25f50f18047b64912222114c6b2956cb71 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Wed, 17 Jan 2024 08:53:50 +1100 Subject: [PATCH 024/305] Update sync.d * If timestamp needs to be corrected, return false so that the correct actions can be taken * If in a --download-only scenario, do not update the timestamp online --- src/sync.d | 92 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 67 insertions(+), 25 deletions(-) diff --git a/src/sync.d b/src/sync.d index d30f7735a..0f40afd2c 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1673,6 +1673,7 @@ class SyncEngine { // As such, we should not be doing any other checks here to determine if the JSON item is wanted .. it is if (exists(newItemPath)) { + addLogEntry("Path on local disk already exists", ["debug"]); // Issue #2209 fix - test if path is a bad symbolic link if (isSymlink(newItemPath)) { addLogEntry("Path on local disk is a symbolic link ........", ["debug"]); @@ -1696,7 +1697,7 @@ class SyncEngine { return; } else { // Item details from OneDrive and local item details in database are NOT in-sync - addLogEntry("The item to sync exists locally but is NOT in the local database - otherwise this would be handled as changed item", ["debug"]); + addLogEntry("The item to sync exists locally but is potentially not in the local database - otherwise this would be handled as changed item", ["debug"]); // Which object is newer? The local file or the remote file? SysTime localModifiedTime = timeLastModified(newItemPath).toUTC(); @@ -1713,8 +1714,27 @@ class SyncEngine { // item id is in the database // no local rename // no download needed - addLogEntry("Local item modified time is newer based on UTC time conversion - keeping local item as this exists in the local database", ["verbose"]); - addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local item modified time being newer than OneDrive item and present in the sqlite database", ["debug"]); + + // Fetch the latest DB record - as this could have been updated by the isItemSynced if the date online was being corrected, then the DB updated as a result + Item latestDatabaseItem; + itemDB.selectById(newDatabaseItem.driveId, newDatabaseItem.id, latestDatabaseItem); + addLogEntry("latestDatabaseItem: " ~ to!string(latestDatabaseItem), ["debug"]); + + SysTime latestItemModifiedTime = latestDatabaseItem.mtime; + // Reduce time resolution to seconds before comparing + latestItemModifiedTime.fracSecs = Duration.zero; + + if (localModifiedTime == latestItemModifiedTime) { + // Log action + addLogEntry("Local file modified time matches existing database record - keeping local file", ["verbose"]); + addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local file modified time matching database data", ["debug"]); + } else { + // Log action + addLogEntry("Local file modified time is newer based on UTC time conversion - keeping local file as this exists in the local database", ["verbose"]); + addLogEntry("Skipping OneDrive change as this is determined to be unwanted due to local file modified time being newer than OneDrive file and present in the sqlite database", ["debug"]); + } + // Return as no further action needed + return; } else { // item id is not in the database .. maybe a --resync ? // file exists locally but is not in the sqlite database - maybe a failed download? @@ -1846,18 +1866,20 @@ class SyncEngine { // Rename this item, passing in if we are performing a --dry-run or not safeBackup(changedItemPath, dryRun); - // If the item is a file, make sure that the local timestamp now is the same as the timestamp online - // Otherwise when we do the DB check, the move on the file system, the file technically has a newer timestamp - // which is 'correct' .. but we need to report locally the online timestamp here as the move was made online - if (changedOneDriveItem.type == ItemType.file) { - setTimes(changedItemPath, changedOneDriveItem.mtime, changedOneDriveItem.mtime); - } + // If we are in a --dry-run situation? , the actual rename did not occur - but we need to track like it did + if(!dryRun) { + // Flag that the item was moved | renamed + itemWasMoved = true; - // Flag that the item was moved | renamed - itemWasMoved = true; - - // If we are in a --dry-run situation, the actual rename did not occur - but we need to track like it did - if (dryRun) { + // If the item is a file, make sure that the local timestamp now is the same as the timestamp online + // Otherwise when we do the DB check, the move on the file system, the file technically has a newer timestamp + // which is 'correct' .. but we need to report locally the online timestamp here as the move was made online + if (changedOneDriveItem.type == ItemType.file) { + addLogEntry("Calling setTimes() for this file: " ~ changedItemPath, ["debug"]); + setTimes(changedItemPath, changedOneDriveItem.mtime, changedOneDriveItem.mtime); + } + } else { + // --dry-run situation - the actual rename did not occur - but we need to track like it did // Track this as a faked id item idsFaked ~= [changedOneDriveItem.driveId, changedOneDriveItem.id]; // We also need to track that we did not rename this path @@ -2151,8 +2173,10 @@ class SyncEngine { } // set the correct time on the downloaded file - addLogEntry("Calling setTimes() for this file: " ~ newItemPath, ["debug"]); - setTimes(newItemPath, itemModifiedTime, itemModifiedTime); + if (!dryRun) { + addLogEntry("Calling setTimes() for this file: " ~ newItemPath, ["debug"]); + setTimes(newItemPath, itemModifiedTime, itemModifiedTime); + } } catch (FileException e) { // display the error message displayFileSystemErrorMessage(e.msg, getFunctionName!({})); @@ -2240,6 +2264,9 @@ class SyncEngine { // Test if the given item is in-sync. Returns true if the given item corresponds to the local one bool isItemSynced(Item item, string path, string itemSource) { + + // This function is typically called when we are processing JSON objects from 'online' + // This function is not used in an --upload-only scenario if (!exists(path)) return false; final switch (item.type) { @@ -2256,8 +2283,9 @@ class SyncEngine { if (localModifiedTime == itemModifiedTime) { return true; } else { - addLogEntry("Local item time discrepancy detected: " ~ path, ["verbose"]); - addLogEntry("This local item has a different modified time " ~ to!string(localModifiedTime) ~ " when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime), ["verbose"]); + // The file has a different timestamp ... is the hash the same meaning no file modification? + addLogEntry("Local file time discrepancy detected: " ~ path, ["verbose"]); + addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime), ["verbose"]); // The file has a different timestamp ... is the hash the same meaning no file modification? // Test the file hash as the date / time stamp is different @@ -2275,13 +2303,15 @@ class SyncEngine { if (!dryRun) { // Attempt to update the online date time stamp uploadLastModifiedTime(item.driveId, item.id, localModifiedTime.toUTC(), item.eTag); + return false; } } else { // --download-only is being used ... local file needs to be corrected ... but why is it newer - indexing application potentially changing the timestamp ? addLogEntry("The source of the incorrect timestamp was the local file - correcting timestamp locally due to --download-only", ["verbose"]); - if (!dryRun) { + if (!dryRun) { addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); setTimes(path, item.mtime, item.mtime); + return false; } } } else { @@ -2290,12 +2320,12 @@ class SyncEngine { if (!dryRun) { addLogEntry("Calling setTimes() for this file: " ~ path, ["debug"]); setTimes(path, item.mtime, item.mtime); + return false; } } - return true; } else { // The hash is different so the content of the file has to be different as to what is stored online - addLogEntry("The local item has a different hash when compared to " ~ itemSource ~ " item hash", ["verbose"]); + addLogEntry("The local file has a different hash when compared to " ~ itemSource ~ " file hash", ["verbose"]); return false; } } @@ -2859,10 +2889,22 @@ class SyncEngine { } else { // The file contents have not changed, but the modified timestamp has addLogEntry("The last modified timestamp has changed however the file content has not changed", ["verbose"]); - addLogEntry("The local item has the same hash value as the item online - correcting timestamp online", ["verbose"]); - if (!dryRun) { - // Attempt to update the online date time stamp - uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime.toUTC(), dbItem.eTag); + + // Local file is newer .. are we in a --download-only situation? + if (!appConfig.getValueBool("download_only")) { + // Not a --download-only scenario + addLogEntry("The local item has the same hash value as the item online - correcting timestamp online", ["verbose"]); + if (!dryRun) { + // Attempt to update the online date time stamp + uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime.toUTC(), dbItem.eTag); + } + } else { + // --download-only being used + addLogEntry("The local item has the same hash value as the item online - correcting local timestamp due to --download-only being used to ensure local file matches timestamp online", ["verbose"]); + if (!dryRun) { + addLogEntry("Calling setTimes() for this file: " ~ localFilePath, ["debug"]); + setTimes(localFilePath, dbItem.mtime, dbItem.mtime); + } } } } else { From c328b029c5870eff6cf4aee610b48f9929e4ada0 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Wed, 17 Jan 2024 09:04:42 +1100 Subject: [PATCH 025/305] Fix debug output * Fix debug output --- src/main.d | 2 +- src/sync.d | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/main.d b/src/main.d index 5776045a8..a67e3210c 100644 --- a/src/main.d +++ b/src/main.d @@ -318,7 +318,7 @@ int main(string[] cliArgs) { if (appConfig.getValueBool("resync")) { // what is the risk acceptance for --resync? bool resyncRiskAcceptance = appConfig.displayResyncRiskForAcceptance(); - addLogEntry("Returned --resync risk acceptance: " ~ resyncRiskAcceptance, ["debug"]); + addLogEntry("Returned --resync risk acceptance: " ~ to!string(resyncRiskAcceptance), ["debug"]); // Action based on user response if (!resyncRiskAcceptance){ diff --git a/src/sync.d b/src/sync.d index 0f40afd2c..2f012ad97 100644 --- a/src/sync.d +++ b/src/sync.d @@ -423,8 +423,8 @@ class SyncEngine { addLogEntry("appConfig.accountType = " ~ appConfig.accountType, ["debug"]); addLogEntry("appConfig.defaultDriveId = " ~ appConfig.defaultDriveId, ["debug"]); addLogEntry("appConfig.remainingFreeSpace = " ~ to!string(appConfig.remainingFreeSpace), ["debug"]); - addLogEntry("appConfig.quotaAvailable = " ~ appConfig.quotaAvailable, ["debug"]); - addLogEntry("appConfig.quotaRestricted = " ~ appConfig.quotaRestricted, ["debug"]); + addLogEntry("appConfig.quotaAvailable = " ~ to!string(appConfig.quotaAvailable), ["debug"]); + addLogEntry("appConfig.quotaRestricted = " ~ to!string(appConfig.quotaRestricted), ["debug"]); // Make sure that appConfig.defaultDriveId is in our driveIDs array to use when checking if item is in database // Keep the driveIDsArray with unique entries only From e8406b719e76d0402cc81dc8e05350f9073fb1ff Mon Sep 17 00:00:00 2001 From: abraunegg Date: Wed, 17 Jan 2024 09:46:36 +1100 Subject: [PATCH 026/305] Reinstate safeRename for online item moves Reinstate safeRename for online item moves --- src/sync.d | 15 ++++++++------- src/util.d | 12 ++++++++++++ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/src/sync.d b/src/sync.d index 2f012ad97..12ffc8d98 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1749,10 +1749,10 @@ class SyncEngine { // Has the user configured to IGNORE local data protection rules? if (bypassDataPreservation) { - // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename + // The user has configured to ignore data safety checks and overwrite local data rather than preserve & safeBackup addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { - // local data protection is configured, rename the local file, passing in if we are performing a --dry-run or not + // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not safeBackup(newItemPath, dryRun); } } @@ -1764,10 +1764,10 @@ class SyncEngine { // Has the user configured to IGNORE local data protection rules? if (bypassDataPreservation) { - // The user has configured to ignore data safety checks and overwrite local data rather than preserve & rename + // The user has configured to ignore data safety checks and overwrite local data rather than preserve & safeBackup addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { - // local data protection is configured, rename the local file, passing in if we are performing a --dry-run or not + // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not safeBackup(newItemPath, dryRun); } } @@ -1863,11 +1863,11 @@ class SyncEngine { // Try and rename path, catch any exception generated try { - // Rename this item, passing in if we are performing a --dry-run or not - safeBackup(changedItemPath, dryRun); - // If we are in a --dry-run situation? , the actual rename did not occur - but we need to track like it did if(!dryRun) { + // Rename this item, passing in if we are performing a --dry-run or not + safeRename(existingItemPath, changedItemPath, dryRun); + // Flag that the item was moved | renamed itemWasMoved = true; @@ -1875,6 +1875,7 @@ class SyncEngine { // Otherwise when we do the DB check, the move on the file system, the file technically has a newer timestamp // which is 'correct' .. but we need to report locally the online timestamp here as the move was made online if (changedOneDriveItem.type == ItemType.file) { + // Set the timestamp addLogEntry("Calling setTimes() for this file: " ~ changedItemPath, ["debug"]); setTimes(changedItemPath, changedOneDriveItem.mtime, changedOneDriveItem.mtime); } diff --git a/src/util.d b/src/util.d index 68ae41829..7eee3e3a9 100644 --- a/src/util.d +++ b/src/util.d @@ -96,6 +96,18 @@ void safeBackup(const(char)[] path, bool dryRun) { } } +// Rename the given item, and only performs the function if not in a --dry-run scenario +void safeRename(const(char)[] oldPath, const(char)[] newPath, bool dryRun) { + // Perform the rename + if (!dryRun) { + addLogEntry("Calling rename(oldPath, newPath)", ["debug"]); + // Use rename() as Linux is POSIX compliant, we have an atomic operation where at no point in time the 'to' is missing. + rename(oldPath, newPath); + } else { + addLogEntry("DRY-RUN: Skipping local file rename", ["debug"]); + } +} + // Deletes the specified file without throwing an exception if it does not exists void safeRemove(const(char)[] path) { if (exists(path)) remove(path); From a1f10cc5ebae9f90bf41391ec815239d5b0b082b Mon Sep 17 00:00:00 2001 From: abraunegg Date: Thu, 18 Jan 2024 18:56:22 +1100 Subject: [PATCH 027/305] Update sync.d * Add further timestamp qualifications --- src/sync.d | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/src/sync.d b/src/sync.d index 12ffc8d98..4e24afc9b 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1757,18 +1757,28 @@ class SyncEngine { } } } else { - // Remote file is newer than the existing local item - addLogEntry("Remote item modified time is newer based on UTC time conversion", ["verbose"]); // correct message, remote item is newer - addLogEntry("localModifiedTime (local file): " ~ to!string(localModifiedTime), ["debug"]); - addLogEntry("itemModifiedTime (OneDrive item): " ~ to!string(itemModifiedTime), ["debug"]); + // Is the remote newer? + if (localModifiedTime < itemModifiedTime) { + // Remote file is newer than the existing local item + addLogEntry("Remote item modified time is newer based on UTC time conversion", ["verbose"]); // correct message, remote item is newer + addLogEntry("localModifiedTime (local file): " ~ to!string(localModifiedTime), ["debug"]); + addLogEntry("itemModifiedTime (OneDrive item): " ~ to!string(itemModifiedTime), ["debug"]); + + // Has the user configured to IGNORE local data protection rules? + if (bypassDataPreservation) { + // The user has configured to ignore data safety checks and overwrite local data rather than preserve & safeBackup + addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); + } else { + // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not + safeBackup(newItemPath, dryRun); + } + } - // Has the user configured to IGNORE local data protection rules? - if (bypassDataPreservation) { - // The user has configured to ignore data safety checks and overwrite local data rather than preserve & safeBackup - addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); - } else { - // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not - safeBackup(newItemPath, dryRun); + // Are the timestamps equal? + if (localModifiedTime == itemModifiedTime) { + // yes they are equal + addLogEntry("File timestamps are equal, no further action required", ["verbose"]); // correct message as timestamps are equal + return; } } } @@ -2052,7 +2062,7 @@ class SyncEngine { // local file is different to what we know to be true addLogEntry("The local file to replace (" ~ newItemPath ~ ") has been modified locally since the last download. Renaming it to avoid potential local data loss."); - // Perform the local rename of the existing local file, passing in if we are performing a --dry-run or not + // Perform the local safeBackup of the existing local file, passing in if we are performing a --dry-run or not safeBackup(newItemPath, dryRun); } } From e470b21818bc08a4675cec795ed3978914a54ec7 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Fri, 19 Jan 2024 09:08:50 +1100 Subject: [PATCH 028/305] Update sync.d * Add UTC note when detailing that there is a time difference --- src/sync.d | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sync.d b/src/sync.d index 4e24afc9b..8dd1256ee 100644 --- a/src/sync.d +++ b/src/sync.d @@ -2296,7 +2296,7 @@ class SyncEngine { } else { // The file has a different timestamp ... is the hash the same meaning no file modification? addLogEntry("Local file time discrepancy detected: " ~ path, ["verbose"]); - addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime), ["verbose"]); + addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["verbose"]); // The file has a different timestamp ... is the hash the same meaning no file modification? // Test the file hash as the date / time stamp is different From 4e93777d7a918386522893d9e4378ce12aa0f1fc Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 20 Jan 2024 05:09:17 +1100 Subject: [PATCH 029/305] Fix edge case file upload bugs when using --local-first * Add PR 2527v2 to 'alpha-5': Fix edge case file upload bugs when using --local-first --- src/sync.d | 73 +++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 58 insertions(+), 15 deletions(-) diff --git a/src/sync.d b/src/sync.d index 8dd1256ee..a61ba6e10 100644 --- a/src/sync.d +++ b/src/sync.d @@ -3512,7 +3512,12 @@ class SyncEngine { // Get the file size from the actual file ulong thisFileSizeLocal = getSize(localFilePath); // Get the file size from the DB data - ulong thisFileSizeFromDB = to!ulong(dbItem.size); + ulong thisFileSizeFromDB; + if (!dbItem.size.empty) { + thisFileSizeFromDB = to!ulong(dbItem.size); + } else { + thisFileSizeFromDB = 0; + } // remainingFreeSpace online includes the current file online // we need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value @@ -3850,6 +3855,13 @@ class SyncEngine { JSONValue currentDriveQuota; ulong remainingQuota; + // Ensure that we have a valid driveId + if (driveId.empty) { + // no driveId was provided, use the application default + driveId = appConfig.defaultDriveId; + } + + // Try and query the quota for the provided driveId try { // Create a new OneDrive API instance OneDriveApi getCurrentDriveQuotaApiInstance; @@ -4792,13 +4804,32 @@ class SyncEngine { throw new jsonResponseException("Unable to perform POSIX test as the OneDrive API request generated an invalid JSON response"); } + // If we get to this point, the OneDrive API returned a 200 OK with valid JSON data that indicates a 'file' exists at this location already + // and that it matches the POSIX filename of the local item we are trying to upload as a new file + addLogEntry("The file we are attemtping to upload as a new file already exists on Microsoft OneDrive: " ~ fileToUpload, ["verbose"]); + // No 404 or otherwise was triggered, meaning that the file already exists online and passes the POSIX test ... addLogEntry("fileDetailsFromOneDrive after exist online check: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); - // Does the data from online match our local file? - if (performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize)) { - // Save item to the database + // Does the data from online match our local file that we are attempting to upload as a new file? + if (!disableUploadValidation && performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize)) { + // Save online item details to the database saveItem(fileDetailsFromOneDrive); + } else { + // The local file we are attempting to upload as a new file is different to the existing file online + addLogEntry("Triggering newfile upload target already exists edge case, where the online item does not match what we are trying to upload", ["debug"]); + + // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - consituting online data loss + // The file 'version history' online will have to be used to 'recover' the prior online file + string changedItemParentId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; + string changedItemId = fileDetailsFromOneDrive["id"].str; + databaseItemsWhereContentHasChanged ~= [changedItemParentId, changedItemId, fileToUpload]; + + // In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data to the local DB + saveItem(fileDetailsFromOneDrive); + + // Attempt the processing of the different local file + processChangedLocalItemsToUpload(); } } catch (OneDriveException exception) { // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online @@ -6702,26 +6733,38 @@ class SyncEngine { ]; // Perform the move operation on OneDrive + bool isMoveSuccess = false; JSONValue response; + string eTag = oldItem.eTag; // Create a new API Instance for this thread and initialise it OneDriveApi movePathOnlineApiInstance; movePathOnlineApiInstance = new OneDriveApi(appConfig); movePathOnlineApiInstance.initialise(); - try { - response = movePathOnlineApiInstance.updateById(oldItem.driveId, oldItem.id, data, oldItem.eTag); - } catch (OneDriveException e) { - if (e.httpStatusCode == 412) { - // OneDrive threw a 412 error, most likely: ETag does not match current item's value - // Retry without eTag - addLogEntry("File Move Failed - OneDrive eTag / cTag match issue", ["debug"]); - addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error", ["verbose"]); - string nullTag = null; - // move the file but without the eTag - response = movePathOnlineApiInstance.updateById(oldItem.driveId, oldItem.id, data, nullTag); + // Try the online move + for (int i = 0; i < 3; i++) { + try { + response = movePathOnlineApiInstance.updateById(oldItem.driveId, oldItem.id, data, oldItem.eTag); + isMoveSuccess = true; + break; + } catch (OneDriveException e) { + if (e.httpStatusCode == 412) { + // OneDrive threw a 412 error, most likely: ETag does not match current item's value + // Retry without eTag + addLogEntry("File Move Failed - OneDrive eTag / cTag match issue", ["debug"]); + addLogEntry("OneDrive returned a 'HTTP 412 - Precondition Failed' when attempting to move the file - gracefully handling error", ["verbose"]); + eTag = null; + // Retry to move the file but without the eTag, via the for() loop + } else if (e.httpStatusCode == 409) { + // Destination item already exists, delete it first + addLogEntry("Moved local item overwrote an existing item - deleting old online item"); + uploadDeletedItem(newItem, newPath); + } else + break; } } + // Shutdown API instance movePathOnlineApiInstance.shutdown(); // Free object and memory From 814d364c134e89e619c420a1fd72b25d895d2ac5 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 20 Jan 2024 06:15:33 +1100 Subject: [PATCH 030/305] Update libcurl socket reuse handling * Clarify CURLOPT_CONNECTTIMEOUT in documentation * Clarify CURLOPT_TIMEOUT in documentation * Add 'threads' as a config file option to control the number of threads used, default of 8, max of 16 * Change CURLOPT_CONNECTTIMEOUT value to v2.4.x value default of 10 seconds * Configure libcurl to reuse connections --- docs/application-config-options.md | 22 ++++++++++++++++++---- src/config.d | 30 +++++++++++++++++++++++------- src/curlEngine.d | 9 ++++----- src/main.d | 4 ++-- src/sync.d | 12 ++++++------ 5 files changed, 53 insertions(+), 24 deletions(-) diff --git a/docs/application-config-options.md b/docs/application-config-options.md index ba1509ff1..fb3e01b41 100644 --- a/docs/application-config-options.md +++ b/docs/application-config-options.md @@ -51,6 +51,7 @@ Before reading this document, please ensure you are running application version - [sync_dir_permissions](#sync_dir_permissions) - [sync_file_permissions](#sync_file_permissions) - [sync_root_files](#sync_root_files) + - [threads](#threads) - [upload_only](#upload_only) - [user_agent](#user_agent) - [webhook_enabled](#webhook_enabled) @@ -184,13 +185,13 @@ _**CLI Option Use:**_ `--cleanup-local-files` _**Additional Usage Requirement:**_ This configuration option can only be used with 'download_only'. It cannot be used with any other application option. ### connect_timeout -_**Description:**_ This configuration setting manages the TCP connection timeout duration in seconds for HTTPS connections to Microsoft OneDrive when using the curl library. +_**Description:**_ This configuration setting manages the TCP connection timeout duration in seconds for HTTPS connections to Microsoft OneDrive when using the curl library (CURLOPT_CONNECTTIMEOUT). _**Value Type:**_ Integer -_**Default Value:**_ 30 +_**Default Value:**_ 10 -_**Config Example:**_ `connect_timeout = "20"` +_**Config Example:**_ `connect_timeout = "15"` ### data_timeout _**Description:**_ This setting controls the timeout duration, in seconds, for when data is not received on an active connection to Microsoft OneDrive over HTTPS when using the curl library, before that connection is timeout out. @@ -446,7 +447,7 @@ _**CLI Option Use:**_ `--no-remote-delete` _**Additional Usage Notes:**_ This configuration option can *only* be used in conjunction with `--upload-only` ### operation_timeout -_**Description:**_ This configuration option controls the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. We recommend users not to tamper with this option unless strictly necessary. +_**Description:**_ This configuration option controls the maximum amount of time (seconds) a file operation is allowed to take. This includes DNS resolution, connecting, data transfer, etc. We recommend users not to tamper with this option unless strictly necessary. This option controls the CURLOPT_TIMEOUT setting of libcurl. _**Value Type:**_ Integer @@ -727,6 +728,19 @@ _**CLI Option Use:**_ `--sync-root-files` _**Additional Usage Notes:**_ Although it's not mandatory, it's recommended that after enabling this option, you perform a `--resync`. This ensures that any previously excluded content is now included in your sync process. +### threads +_**Description:**_ This configuration option controls the number of 'threads' for upload and download operations when files need to be transfered between your local system and Microsoft OneDrive. + +_**Value Type:**_ Integer + +_**Default Value:**_ `8` + +_**Maximum Value:**_ `16` + +_**Config Example:**_ `threads = "16"` + +_**Additional Usage Notes:**_ Increasing the threads beyond the default will lead to increased system utilisation and local TCP port use, which may lead to unpredictable behaviour and/or application stability issues. + ### upload_only _**Description:**_ This setting forces the client to only upload data to Microsoft OneDrive and replicate the locate state online. By default, this will also remove content online, that has been removed locally. diff --git a/src/config.d b/src/config.d index ae3916e33..c0187e614 100644 --- a/src/config.d +++ b/src/config.d @@ -63,12 +63,14 @@ class ApplicationConfig { // libcurl dns_cache_timeout timeout immutable int defaultDnsTimeout = 60; // Connect timeout for HTTP|HTTPS connections - immutable int defaultConnectTimeout = 30; + // Controls CURLOPT_CONNECTTIMEOUT + immutable int defaultConnectTimeout = 10; // Default data timeout for HTTP // curl.d has a default of: _defaultDataTimeout = dur!"minutes"(2); immutable int defaultDataTimeout = 240; // Maximum time any operation is allowed to take // This includes dns resolution, connecting, data transfer, etc. + // Controls CURLOPT_TIMEOUT immutable int defaultOperationTimeout = 3600; // Specify what IP protocol version should be used when communicating with OneDrive immutable int defaultIpProtocol = 0; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only @@ -131,8 +133,8 @@ class ApplicationConfig { bool fullScanTrueUpRequired = false; bool surpressLoggingOutput = false; - // This is the value that needs testing when we are actually downloading and uploading data - ulong concurrentThreads = 16; + // Number of concurrent threads when downloading and uploading data + ulong defaultConcurrentThreads = 8; // All application run-time paths are formulated from this as a set of defaults // - What is the home path of the actual 'user' that is running the application @@ -258,17 +260,23 @@ class ApplicationConfig { // HTTPS & CURL Operation Settings // - Maximum time an operation is allowed to take - // This includes dns resolution, connecting, data transfer, etc. + // This includes dns resolution, connecting, data transfer, etc - controls CURLOPT_TIMEOUT + // CURLOPT_TIMEOUT: This option sets the maximum time in seconds that you allow the libcurl transfer operation to take. + // This is useful for controlling how long a specific transfer should take before it is considered too slow and aborted. However, it does not directly control the keep-alive time of a socket. longValues["operation_timeout"] = defaultOperationTimeout; // libcurl dns_cache_timeout timeout longValues["dns_timeout"] = defaultDnsTimeout; - // Timeout for HTTPS connections + // Timeout for HTTPS connections - controls CURLOPT_CONNECTTIMEOUT + // CURLOPT_CONNECTTIMEOUT: This option sets the timeout, in seconds, for the connection phase. It is the maximum time allowed for the connection to be established. longValues["connect_timeout"] = defaultConnectTimeout; // Timeout for activity on a HTTPS connection longValues["data_timeout"] = defaultDataTimeout; // What IP protocol version should be used when communicating with OneDrive longValues["ip_protocol_version"] = defaultIpProtocol; // 0 = IPv4 + IPv6, 1 = IPv4 Only, 2 = IPv6 Only + // Number of concurrent threads + longValues["threads"] = defaultConcurrentThreads; // Default is 8, user can increase or decrease + // - Do we wish to upload only? boolValues["upload_only"] = false; // - Do we need to check for the .nomount file on the mount point? @@ -848,9 +856,16 @@ class ApplicationConfig { ulong tempValue = thisConfigValue; if (tempValue > 2) { addLogEntry("Invalid value for key in config file - using default value: " ~ key); - tempValue = 0; + tempValue = defaultIpProtocol; } setValueLong("ip_protocol_version", tempValue); + } else if (key == "threads") { + ulong tempValue = thisConfigValue; + if (tempValue > 16) { + addLogEntry("Invalid value for key in config file - using default value: " ~ key); + tempValue = defaultConcurrentThreads; + } + setValueLong("threads", tempValue); } } else { addLogEntry("Unknown key in config file: " ~ key); @@ -863,7 +878,7 @@ class ApplicationConfig { // Update the application configuration based on CLI passed in parameters void updateFromArgs(string[] cliArgs) { - // Add additional options that are NOT configurable via config file + // Add additional CLI options that are NOT configurable via config file stringValues["create_directory"] = ""; stringValues["create_share_link"] = ""; stringValues["destination_directory"] = ""; @@ -1327,6 +1342,7 @@ class ApplicationConfig { addLogEntry("Config option 'connect_timeout' = " ~ to!string(getValueLong("connect_timeout"))); addLogEntry("Config option 'data_timeout' = " ~ to!string(getValueLong("data_timeout"))); addLogEntry("Config option 'ip_protocol_version' = " ~ to!string(getValueLong("ip_protocol_version"))); + addLogEntry("Config option 'threads' = " ~ to!string(getValueLong("threads"))); // Is sync_list configured ? if (exists(syncListFilePath)){ diff --git a/src/curlEngine.d b/src/curlEngine.d index 528081b5f..7a396da12 100644 --- a/src/curlEngine.d +++ b/src/curlEngine.d @@ -86,11 +86,10 @@ class CurlEngine { // https://curl.se/libcurl/c/CURLOPT_FORBID_REUSE.html // CURLOPT_FORBID_REUSE - make connection get closed at once after use - // Ensure that we ARE NOT reusing TCP sockets connections - setting to 0 ensures that we ARE reusing connections (we did this in v2.4.xx) to ensure connections remained open and usable - // Setting this to 1 ensures that when we close the curl instance, any open sockets are closed - which we need to do when running - // multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly - // The libcurl default is 0 as per the documentation (to REUSE connections) - ensure we are configuring NOT to reuse connections and leave unused sockets needlessly open which will lead to local socket exhaustion - http.handle.set(CurlOption.forbid_reuse,1); + // Setting this to 0 ensures that we ARE reusing connections (we did this in v2.4.xx) to ensure connections remained open and usable + // Setting this to 1 ensures that when we close the curl instance, any open sockets are forced closed when the API curl instance is destroyed + // The libcurl default is 0 as per the documentation (to REUSE connections) - ensure we are configuring to reuse sockets + http.handle.set(CurlOption.forbid_reuse,0); if (httpsDebug) { // Output what options we are using so that in the debug log this can be tracked diff --git a/src/main.d b/src/main.d index a67e3210c..c0c90f61f 100644 --- a/src/main.d +++ b/src/main.d @@ -619,8 +619,8 @@ int main(string[] cliArgs) { // Do we need to validate the runtimeSyncDirectory to check for the presence of a '.nosync' file checkForNoMountScenario(); - // Set the default thread pool value - hard coded to 16 - defaultPoolThreads(to!int(appConfig.concurrentThreads)); + // Set the default thread pool value + defaultPoolThreads(to!int(appConfig.getValueLong("threads"))); // Is the sync engine initiallised correctly? if (appConfig.syncEngineWasInitialised) { diff --git a/src/sync.d b/src/sync.d index a61ba6e10..bd0863223 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1965,12 +1965,12 @@ class SyncEngine { // Download new file items as identified void downloadOneDriveItems() { // Lets deal with all the JSON items that need to be downloaded in a batch process - ulong batchSize = appConfig.concurrentThreads; + ulong batchSize = appConfig.getValueLong("threads"); ulong batchCount = (fileJSONItemsToDownload.length + batchSize - 1) / batchSize; ulong batchesProcessed = 0; foreach (chunk; fileJSONItemsToDownload.chunks(batchSize)) { - // send an array containing 'appConfig.concurrentThreads' (16) JSON items to download + // send an array containing 'appConfig.getValueLong("threads")' JSON items to download downloadOneDriveItemsInParallel(chunk); } } @@ -3469,7 +3469,7 @@ class SyncEngine { void processChangedLocalItemsToUpload() { // Each element in this array 'databaseItemsWhereContentHasChanged' is an Database Item ID that has been modified locally - ulong batchSize = appConfig.concurrentThreads; + ulong batchSize = appConfig.getValueLong("threads"); ulong batchCount = (databaseItemsWhereContentHasChanged.length + batchSize - 1) / batchSize; ulong batchesProcessed = 0; @@ -4644,7 +4644,7 @@ class SyncEngine { // Upload new file items as identified void uploadNewLocalFileItems() { // Lets deal with the new local items in a batch process - ulong batchSize = appConfig.concurrentThreads; + ulong batchSize = appConfig.getValueLong("threads"); ulong batchCount = (newLocalFilesToUploadToOneDrive.length + batchSize - 1) / batchSize; ulong batchesProcessed = 0; @@ -7477,12 +7477,12 @@ class SyncEngine { // there are valid items to resume upload // Lets deal with all the JSON items that need to be reumed for upload in a batch process - ulong batchSize = appConfig.concurrentThreads; + ulong batchSize = appConfig.getValueLong("threads"); ulong batchCount = (jsonItemsToResumeUpload.length + batchSize - 1) / batchSize; ulong batchesProcessed = 0; foreach (chunk; jsonItemsToResumeUpload.chunks(batchSize)) { - // send an array containing 'appConfig.concurrentThreads' (16) JSON items to resume upload + // send an array containing 'appConfig.getValueLong("threads")' JSON items to resume upload resumeSessionUploadsInParallel(chunk); } } From 7a584cd01c3fbd74b1311dae60852cce99992e25 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 20 Jan 2024 06:29:21 +1100 Subject: [PATCH 031/305] Update application-config-options.md * Add 'force_http_2' depreciation note --- docs/application-config-options.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/application-config-options.md b/docs/application-config-options.md index fb3e01b41..c0ab06320 100644 --- a/docs/application-config-options.md +++ b/docs/application-config-options.md @@ -86,6 +86,7 @@ Before reading this document, please ensure you are running application version - [CLI Option: --verbose | -v+](#cli-option---verbose--v) - [CLI Option: --with-editing-perms](#cli-option---with-editing-perms) - [Depreciated Configuration File and CLI Options](#depreciated-configuration-file-and-cli-options) + - [force_http_2](#force_http_2) - [min_notify_changes](#min_notify_changes) - [CLI Option: --synchronize](#cli-option---synchronize) @@ -1063,7 +1064,16 @@ _**Usage Example:**_ `onedrive --create-share-link 'relative/path/to/your/file.t _**Additional Usage Notes:**_ Placement of `--with-editing-perms` is critical. It *must* be placed after the file path as per the example above. ## Depreciated Configuration File and CLI Options -The following configuration options are no longer supported +The following configuration options are no longer supported: + +### force_http_2 +_**Description:**_ Force the use of HTTP/2 for all operations where applicable + +_**Depreciated Config Example:**_ `force_http_2 = "true"` + +_**Depreciated CLI Option:**_ `--force-http-2` + +_**Reason for depreciation:**_ HTTP/2 will be used by default where possible, when the OneDrive API platform does not downgrade the connection to HTTP/1.1, thus this confuguration option is no longer required. ### min_notify_changes _**Description:**_ Minimum number of pending incoming changes necessary to trigger a GUI desktop notification. From c8d29c8231e9a56b180c5dd229fd2c7eef657652 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 20 Jan 2024 07:41:53 +1100 Subject: [PATCH 032/305] Add libcurl timeout on handle messaging * Add libcurl timeout on handle messaging to use 'ip_protocol_version' to set what IP protocol to use. Testing has shown that when this is triggered, it is libcurl having an issue with IPv6 DNS resolution. --- src/onedrive.d | 9 +++++++-- src/sync.d | 2 +- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/src/onedrive.d b/src/onedrive.d index 1e68e05e1..ef33cb50d 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -1579,6 +1579,7 @@ class OneDriveApi { }; try { + // Attempt to perform the action curlEngine.http.perform(); // Check the HTTP Response headers - needed for correct 429 handling checkHTTPResponseHeaders(); @@ -1603,12 +1604,16 @@ class OneDriveApi { SysTime currentTime; // Connectivity to Microsoft OneDrive was lost - addLogEntry("Internet connectivity to Microsoft OneDrive service has been lost .. re-trying in the background"); + addLogEntry("Internet connectivity to Microsoft OneDrive service has been interrupted .. re-trying in the background"); // what caused the initial curl exception? if (canFind(errorMessage, "Couldn't connect to server on handle")) addLogEntry("Unable to connect to server - HTTPS access blocked?", ["debug"]); if (canFind(errorMessage, "Couldn't resolve host name on handle")) addLogEntry("Unable to resolve server - DNS access blocked?", ["debug"]); - if (canFind(errorMessage, "Timeout was reached on handle")) addLogEntry("A timeout was triggered - data too slow, no response ... use --debug-https to diagnose further", ["debug"]); + if (canFind(errorMessage, "Timeout was reached on handle")) { + // Common cause is libcurl trying IPv6 DNS resolution when there are only IPv4 DNS servers available + addLogEntry("A libcurl timeout was triggered - data too slow, no DNS resolution response, no server response ... use --debug-https to diagnose this issue further.", ["verbose"]); + addLogEntry("A common cause is IPv6 DNS resolution. Investigate 'ip_protocol_version' to only use IPv4 network communication to potentially resolve this issue.", ["verbose"]); + } while (!retrySuccess){ try { diff --git a/src/sync.d b/src/sync.d index bd0863223..3d428799d 100644 --- a/src/sync.d +++ b/src/sync.d @@ -5703,7 +5703,7 @@ class SyncEngine { } } else { // log error - addLogEntry("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object"); + addLogEntry("ERROR: An error was returned from OneDrive and the resulting response is not a valid JSON object that can be processed."); addLogEntry("ERROR: Increase logging verbosity to assist determining why."); } } From f2414f4a56ec286e44d8a8d879a366d769eec4fe Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 21 Jan 2024 07:11:58 +1100 Subject: [PATCH 033/305] Update sync.d * Update logging output for added clarity --- src/sync.d | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/sync.d b/src/sync.d index 3d428799d..68755bbba 100644 --- a/src/sync.d +++ b/src/sync.d @@ -210,8 +210,10 @@ class SyncEngine { // Do we configure to clean up local files if using --download-only ? if ((appConfig.getValueBool("download_only")) && (appConfig.getValueBool("cleanup_local_files"))) { // --download-only and --cleanup-local-files were passed in + addLogEntry(); addLogEntry("WARNING: Application has been configured to cleanup local files that are not present online."); addLogEntry("WARNING: Local data loss MAY occur in this scenario if you are expecting data to remain archived locally."); + addLogEntry(); // Set the flag this.cleanupLocalFiles = true; } @@ -525,7 +527,7 @@ class SyncEngine { void syncOneDriveAccountToLocalDisk() { // performFullScanTrueUp value - addLogEntry("Perform a Full Scan True-Up: " ~ appConfig.fullScanTrueUpRequired, ["debug"]); + addLogEntry("Perform a Full Scan True-Up: " ~ to!string(appConfig.fullScanTrueUpRequired), ["debug"]); // Fetch the API response of /delta to track changes on OneDrive fetchOneDriveDeltaAPIResponse(null, null, null); @@ -1807,7 +1809,7 @@ class SyncEngine { // updated by the local Operating System with the latest timestamp - as this is normal operation // as the directory has been modified addLogEntry("Setting directory lastModifiedDateTime for: " ~ newItemPath ~ " to " ~ to!string(newDatabaseItem.mtime), ["debug"]); - addLogEntry("Calling setTimes() for this file: " ~ newItemPath, ["debug"]); + addLogEntry("Calling setTimes() for this directory: " ~ newItemPath, ["debug"]); setTimes(newItemPath, newDatabaseItem.mtime, newDatabaseItem.mtime); // Save the item to the database saveItem(onedriveJSONItem); @@ -4286,13 +4288,14 @@ class SyncEngine { // Check if this path in the database Item databaseItem; - bool pathFoundInDB = false; + addLogEntry("Search DB for this path: " ~ searchPath, ["debug"]); foreach (driveId; driveIDsArray) { if (itemDB.selectByPath(searchPath, driveId, databaseItem)) { - pathFoundInDB = true; + addLogEntry("DB Record for search path: " ~ to!string(databaseItem), ["debug"]); + return true; // Early exit on finding the path in the DB } } - return pathFoundInDB; + return false; // Return false if path is not found in any drive } // Create a new directory online on OneDrive From e92947ea327ced54dae276beb8d8481d9d88bb90 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 21 Jan 2024 16:15:56 +1100 Subject: [PATCH 034/305] Update path validation * Update check for ASCII control codes and specific non-ASCII control characters * Add UTF-16 path validation check --- src/sync.d | 8 ++++++++ src/util.d | 57 ++++++++++++++++++++++++++++++++++++++++++++++-------- 2 files changed, 57 insertions(+), 8 deletions(-) diff --git a/src/sync.d b/src/sync.d index 68755bbba..4d0d8fe21 100644 --- a/src/sync.d +++ b/src/sync.d @@ -3068,6 +3068,14 @@ class SyncEngine { } } + // Validate that the path is a valid UTF-16 encoded path + if (!invalidPath) { + if (!isValidUTF16(localFilePath)) { // This will return true if this is a valid UTF-16 encoded path, so we are checking for 'false' as response + addLogEntry("Skipping item - invalid name (Invalid UTF-16 encoded item): " ~ localFilePath, ["info", "notify"]); + invalidPath = true; + } + } + // Check path for ASCII Control Codes if (!invalidPath) { if (containsASCIIControlCodes(localFilePath)) { // This will return true if this contains ASCII Control Codes diff --git a/src/util.d b/src/util.d index 7eee3e3a9..d981dc61e 100644 --- a/src/util.d +++ b/src/util.d @@ -19,6 +19,7 @@ import std.algorithm; import std.uri; import std.json; import std.traits; +import std.utf; import core.stdc.stdlib; import core.thread; import core.memory; @@ -415,7 +416,7 @@ bool containsASCIIHTMLCodes(string path) { // Does the path contain any ASCII Control Codes bool containsASCIIControlCodes(string path) { - // Check for null or empty string + // Check for null or empty string if (path.length == 0) { return false; } @@ -425,17 +426,57 @@ bool containsASCIIControlCodes(string path) { return false; } - // https://github.com/abraunegg/onedrive/discussions/2553#discussioncomment-7995254 - // Define a ctRegex pattern for ASCII control codes - auto controlCodePattern = ctRegex!(`[^\x20-\x7E./]`); + // https://github.com/abraunegg/onedrive/discussions/2553#discussioncomment-7995254 + // Define a ctRegex pattern for ASCII control codes and specific non-ASCII control characters + // This pattern includes the ASCII control range and common non-ASCII control characters + // Adjust the pattern as needed to include specific characters of concern + auto controlCodePattern = ctRegex!(`[\x00-\x1F\x7F]|\p{Cc}`); // Blocks ƒ†¯~‰ (#2553) , allows α (#2598) - // Use match to search for ASCII control codes in the path - auto matchResult = match(path, controlCodePattern); + // Use match to search for ASCII control codes in the path + auto matchResult = match(path, controlCodePattern); - // Return true if matchResult is not empty (indicating a control code was found) - return !matchResult.empty; + // Return true if matchResult is not empty (indicating a control code was found) + return !matchResult.empty; } +// Is the path a valid UTF-16 encoded path? +bool isValidUTF16(string path) { + // Check for null or empty string + if (path.length == 0) { + return true; + } + + // Check for root item + if (path == ".") { + return true; + } + + auto wpath = toUTF16(path); // Convert to UTF-16 encoding + auto it = wpath.byCodeUnit; + + while (!it.empty) { + ushort current = it.front; + + // Check for valid single unit + if (current <= 0xD7FF || (current >= 0xE000 && current <= 0xFFFF)) { + it.popFront(); + } + // Check for valid surrogate pair + else if (current >= 0xD800 && current <= 0xDBFF) { + it.popFront(); + if (it.empty || it.front < 0xDC00 || it.front > 0xDFFF) { + return false; // Invalid surrogate pair + } + it.popFront(); + } else { + return false; // Invalid code unit + } + } + + return true; +} + + // Does the path contain any HTML URL encoded items (e.g., '%20' for space) bool containsURLEncodedItems(string path) { // Check for null or empty string From deb63b6e7690c8220eba89da826591450d76c6b9 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Mon, 22 Jan 2024 07:57:47 +1100 Subject: [PATCH 035/305] Update util.d Fix readLocalFile for zero byte files --- src/util.d | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/src/util.d b/src/util.d index d981dc61e..629b1d5a7 100644 --- a/src/util.d +++ b/src/util.d @@ -275,24 +275,28 @@ bool retryInternetConnectivtyTest(ApplicationConfig appConfig) { // https://github.com/abraunegg/onedrive/issues/113 // returns true if file can be accessed bool readLocalFile(string path) { - try { - // Attempt to read up to the first 1 byte of the file - auto data = read(path, 1); + // What is the file size + if (getSize(path) != 0) { + try { + // Attempt to read up to the first 1 byte of the file + auto data = read(path, 1); - // Check if the read operation was successful - if (data.length != 1) { - // What is the file size? - if (getSize(path) != 0) { + // Check if the read operation was successful + if (data.length != 1) { + // Read operation not sucessful addLogEntry("Failed to read the required amount from the file: " ~ path); + return false; } - return false; - } - } catch (std.file.FileException e) { - // Unable to read the file, log the error message - displayFileSystemErrorMessage(e.msg, getFunctionName!({})); - return false; - } - return true; + } catch (std.file.FileException e) { + // Unable to read the file, log the error message + displayFileSystemErrorMessage(e.msg, getFunctionName!({})); + return false; + } + return true; + } else { + // zero byte files cannot be read, return true + return true; + } } // Calls globMatch for each string in pattern separated by '|' From e2f1b68b1a01e6182d86d7f87935bcf46fd8d002 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Mon, 22 Jan 2024 08:25:51 +1100 Subject: [PATCH 036/305] Update sync.d * Whenever the function is being returned, ensure that API instance is shutdown and destroyed --- src/sync.d | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/sync.d b/src/sync.d index 4d0d8fe21..f8c3d9d31 100644 --- a/src/sync.d +++ b/src/sync.d @@ -4525,11 +4525,19 @@ class SyncEngine { // OneDrive API returned a 404 (above) to say the directory did not exist // but when we attempted to create it, OneDrive responded that it now already exists addLogEntry("OneDrive reported that " ~ thisNewPathToCreate ~ " already exists .. OneDrive API race condition", ["verbose"]); + // Shutdown API instance + createDirectoryOnlineOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(createDirectoryOnlineOneDriveApiInstance); return; } else { // some other error from OneDrive was returned - display what it is addLogEntry("OneDrive generated an error when creating this path: " ~ thisNewPathToCreate); displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + // Shutdown API instance + createDirectoryOnlineOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(createDirectoryOnlineOneDriveApiInstance); return; } } @@ -4608,6 +4616,10 @@ class SyncEngine { // Add this path to businessSharedFoldersOnlineToSkip businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate]; // no save to database, no online create + // Shutdown API instance + createDirectoryOnlineOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(createDirectoryOnlineOneDriveApiInstance); return; } } @@ -4617,6 +4629,10 @@ class SyncEngine { // Is the response a valid JSON object - validation checking done in saveItem saveItem(onlinePathData); + // Shutdown API instance + createDirectoryOnlineOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(createDirectoryOnlineOneDriveApiInstance); return; } else { // Normally this would throw an error, however we cant use throw new posixException() @@ -4627,6 +4643,10 @@ class SyncEngine { addLogEntry("Skipping creating this directory online due to 'case-insensitive match': " ~ thisNewPathToCreate); // Add this path to posixViolationPaths posixViolationPaths ~= [thisNewPathToCreate]; + // Shutdown API instance + createDirectoryOnlineOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(createDirectoryOnlineOneDriveApiInstance); return; } } else { @@ -4634,6 +4654,10 @@ class SyncEngine { addLogEntry("ERROR: There was an error performing this operation on Microsoft OneDrive"); addLogEntry("ERROR: Increase logging verbosity to assist determining why."); addLogEntry("Skipping: " ~ buildNormalizedPath(absolutePath(thisNewPathToCreate))); + // Shutdown API instance + createDirectoryOnlineOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(createDirectoryOnlineOneDriveApiInstance); return; } } From d5185e5207f91c2d56d6be8bd1a727168a1a824d Mon Sep 17 00:00:00 2001 From: abraunegg Date: Wed, 24 Jan 2024 08:12:40 +1100 Subject: [PATCH 037/305] Update sync.d * Move 'thisFileSize = getSize(fileToUpload)' back to original location * Add check if path exists before attempting upload --- src/sync.d | 339 +++++++++++++++++++++++++++-------------------------- 1 file changed, 173 insertions(+), 166 deletions(-) diff --git a/src/sync.d b/src/sync.d index f8c3d9d31..12df0505d 100644 --- a/src/sync.d +++ b/src/sync.d @@ -4748,202 +4748,209 @@ class SyncEngine { parentItem.driveId = appConfig.defaultDriveId; } - // Get the new file size - // Even if the permissions on the file are: -rw-------. 1 root root 8 Jan 11 09:42 - // We can obtain the file size - thisFileSize = getSize(fileToUpload); - - // Can we read the file - as a permissions issue or actual file corruption will cause a failure - // Resolves: https://github.com/abraunegg/onedrive/issues/113 - // readLocalFile cannot 'read' 1 byte of data from a zero byte file size .. - if (readLocalFile(fileToUpload) || (thisFileSize == 0)) { - if (parentPathFoundInDB) { + // Check if the path still exists locally before we try to upload + if (exists(fileToUpload)) { + // Can we read the file - as a permissions issue or actual file corruption will cause a failure + // Resolves: https://github.com/abraunegg/onedrive/issues/113 + if (readLocalFile(fileToUpload)) { // The local file can be read - so we can read it to attemtp to upload it in this thread - - // Does this file exceed the maximum filesize for OneDrive - // Resolves: https://github.com/skilion/onedrive/issues/121 , https://github.com/skilion/onedrive/issues/294 , https://github.com/skilion/onedrive/issues/329 - if (thisFileSize <= maxUploadFileSize) { - // Is there enough free space on OneDrive when we started this thread, to upload the file to OneDrive? - remainingFreeSpaceOnline = getRemainingFreeSpace(parentItem.driveId); - addLogEntry("Current Available Space Online (Upload Target Drive ID): " ~ to!string((remainingFreeSpaceOnline / 1024 / 1024)) ~ " MB", ["debug"]); - - // When we compare the space online to the total we are trying to upload - is there space online? - ulong calculatedSpaceOnlinePostUpload = remainingFreeSpaceOnline - thisFileSize; - - // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated - // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true - // If 'business' accounts, if driveId == defaultDriveId, then we will have data - // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true + // Is the path parent in the DB? + if (parentPathFoundInDB) { + // Parent path is in the database + // Get the new file size + // Even if the permissions on the file are: -rw-------. 1 root root 8 Jan 11 09:42 + // we can still obtain the file size, however readLocalFile() also tests if the file can be read (permission check) + thisFileSize = getSize(fileToUpload); - if (remainingFreeSpaceOnline > totalDataToUpload) { - // Space available - spaceAvailableOnline = true; - } else { - // we need to look more granular - // What was the latest getRemainingFreeSpace() value? - if (appConfig.quotaAvailable) { - // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? - if (calculatedSpaceOnlinePostUpload > 0) { - // Based on this thread action, we beleive that there is space available online to upload - proceed - spaceAvailableOnline = true; - } - } - } - - // Is quota being restricted? - if (appConfig.quotaRestricted) { - // If the upload target drive is not our drive id, then it is a shared folder .. we need to print a space warning message - if (parentItem.driveId != appConfig.defaultDriveId) { - // Different message depending on account type - if (appConfig.accountType == "personal") { - addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); - } else { - addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); - } - } else { - if (appConfig.accountType == "personal") { - addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); - } else { - addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); - } - } - // Space available online is being restricted - so we have no way to really know if there is space available online - spaceAvailableOnline = true; - } - - // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) - if (spaceAvailableOnline) { - // We need to check that this new local file does not exist on OneDrive + // Does this file exceed the maximum filesize for OneDrive + // Resolves: https://github.com/skilion/onedrive/issues/121 , https://github.com/skilion/onedrive/issues/294 , https://github.com/skilion/onedrive/issues/329 + if (thisFileSize <= maxUploadFileSize) { + // Is there enough free space on OneDrive when we started this thread, to upload the file to OneDrive? + remainingFreeSpaceOnline = getRemainingFreeSpace(parentItem.driveId); + addLogEntry("Current Available Space Online (Upload Target Drive ID): " ~ to!string((remainingFreeSpaceOnline / 1024 / 1024)) ~ " MB", ["debug"]); - // Create a new API Instance for this thread and initialise it - OneDriveApi checkFileOneDriveApiInstance; - checkFileOneDriveApiInstance = new OneDriveApi(appConfig); - checkFileOneDriveApiInstance.initialise(); + // When we compare the space online to the total we are trying to upload - is there space online? + ulong calculatedSpaceOnlinePostUpload = remainingFreeSpaceOnline - thisFileSize; - JSONValue fileDetailsFromOneDrive; - - // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file - // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, - // even though some file systems (such as a POSIX-compliant file systems that Linux use) may consider them as different. - // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior, OneDrive does not use this. + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true - // In order to upload this file - this query HAS to respond as a 404 - Not Found + if (remainingFreeSpaceOnline > totalDataToUpload) { + // Space available + spaceAvailableOnline = true; + } else { + // we need to look more granular + // What was the latest getRemainingFreeSpace() value? + if (appConfig.quotaAvailable) { + // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? + if (calculatedSpaceOnlinePostUpload > 0) { + // Based on this thread action, we beleive that there is space available online to upload - proceed + spaceAvailableOnline = true; + } + } + } - // Does this 'file' already exist on OneDrive? - try { - fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); - // Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API - if (hasName(fileDetailsFromOneDrive)) { - performPosixTest(baseName(fileToUpload), fileDetailsFromOneDrive["name"].str); + // Is quota being restricted? + if (appConfig.quotaRestricted) { + // If the upload target drive is not our drive id, then it is a shared folder .. we need to print a space warning message + if (parentItem.driveId != appConfig.defaultDriveId) { + // Different message depending on account type + if (appConfig.accountType == "personal") { + addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); + } else { + addLogEntry("WARNING: Shared Folder OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + } } else { - throw new jsonResponseException("Unable to perform POSIX test as the OneDrive API request generated an invalid JSON response"); + if (appConfig.accountType == "personal") { + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Space available online cannot be guaranteed.", ["verbose"]); + } else { + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + } } + // Space available online is being restricted - so we have no way to really know if there is space available online + spaceAvailableOnline = true; + } + + // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) + if (spaceAvailableOnline) { + // We need to check that this new local file does not exist on OneDrive - // If we get to this point, the OneDrive API returned a 200 OK with valid JSON data that indicates a 'file' exists at this location already - // and that it matches the POSIX filename of the local item we are trying to upload as a new file - addLogEntry("The file we are attemtping to upload as a new file already exists on Microsoft OneDrive: " ~ fileToUpload, ["verbose"]); + // Create a new API Instance for this thread and initialise it + OneDriveApi checkFileOneDriveApiInstance; + checkFileOneDriveApiInstance = new OneDriveApi(appConfig); + checkFileOneDriveApiInstance.initialise(); - // No 404 or otherwise was triggered, meaning that the file already exists online and passes the POSIX test ... - addLogEntry("fileDetailsFromOneDrive after exist online check: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); + JSONValue fileDetailsFromOneDrive; + + // https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file + // Do not assume case sensitivity. For example, consider the names OSCAR, Oscar, and oscar to be the same, + // even though some file systems (such as a POSIX-compliant file systems that Linux use) may consider them as different. + // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior, OneDrive does not use this. - // Does the data from online match our local file that we are attempting to upload as a new file? - if (!disableUploadValidation && performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize)) { - // Save online item details to the database - saveItem(fileDetailsFromOneDrive); - } else { - // The local file we are attempting to upload as a new file is different to the existing file online - addLogEntry("Triggering newfile upload target already exists edge case, where the online item does not match what we are trying to upload", ["debug"]); - - // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - consituting online data loss - // The file 'version history' online will have to be used to 'recover' the prior online file - string changedItemParentId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; - string changedItemId = fileDetailsFromOneDrive["id"].str; - databaseItemsWhereContentHasChanged ~= [changedItemParentId, changedItemId, fileToUpload]; + // In order to upload this file - this query HAS to respond as a 404 - Not Found + + // Does this 'file' already exist on OneDrive? + try { + fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); + // Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API + if (hasName(fileDetailsFromOneDrive)) { + performPosixTest(baseName(fileToUpload), fileDetailsFromOneDrive["name"].str); + } else { + throw new jsonResponseException("Unable to perform POSIX test as the OneDrive API request generated an invalid JSON response"); + } - // In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data to the local DB - saveItem(fileDetailsFromOneDrive); + // If we get to this point, the OneDrive API returned a 200 OK with valid JSON data that indicates a 'file' exists at this location already + // and that it matches the POSIX filename of the local item we are trying to upload as a new file + addLogEntry("The file we are attemtping to upload as a new file already exists on Microsoft OneDrive: " ~ fileToUpload, ["verbose"]); - // Attempt the processing of the different local file - processChangedLocalItemsToUpload(); - } - } catch (OneDriveException exception) { - // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online - if (exception.httpStatusCode == 404) { - // The file has been checked, client side filtering checked, does not exist online - we need to upload it - addLogEntry("fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); generated a 404 - file does not exist online - must upload it", ["debug"]); - uploadFailed = performNewFileUpload(parentItem, fileToUpload, thisFileSize); - } else { + // No 404 or otherwise was triggered, meaning that the file already exists online and passes the POSIX test ... + addLogEntry("fileDetailsFromOneDrive after exist online check: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); - string thisFunctionName = getFunctionName!({}); - // HTTP request returned status code 408,429,503,504 - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // Handle the 429 - if (exception.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(checkFileOneDriveApiInstance); - addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); - } - // re-try the specific changes queries - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // 408 - Request Time Out - // 503 - Service Unavailable - // 504 - Gateway Timeout - // Transient error - try again in 30 seconds - auto errorArray = splitLines(exception.msg); - addLogEntry(to!string(errorArray[0]) ~ " when attempting to validate file details on OneDrive - retrying applicable request in 30 seconds"); - addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); - - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429, 503, 504 - but loop back calling this function - addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); - uploadNewFile(fileToUpload); + // Does the data from online match our local file that we are attempting to upload as a new file? + if (!disableUploadValidation && performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize)) { + // Save online item details to the database + saveItem(fileDetailsFromOneDrive); } else { - // Default operation if not 408,429,503,504 errors - // display what the error is - displayOneDriveErrorMessage(exception.msg, thisFunctionName); + // The local file we are attempting to upload as a new file is different to the existing file online + addLogEntry("Triggering newfile upload target already exists edge case, where the online item does not match what we are trying to upload", ["debug"]); + + // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - consituting online data loss + // The file 'version history' online will have to be used to 'recover' the prior online file + string changedItemParentId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; + string changedItemId = fileDetailsFromOneDrive["id"].str; + databaseItemsWhereContentHasChanged ~= [changedItemParentId, changedItemId, fileToUpload]; + + // In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data to the local DB + saveItem(fileDetailsFromOneDrive); + + // Attempt the processing of the different local file + processChangedLocalItemsToUpload(); } + } catch (OneDriveException exception) { + // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online + if (exception.httpStatusCode == 404) { + // The file has been checked, client side filtering checked, does not exist online - we need to upload it + addLogEntry("fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); generated a 404 - file does not exist online - must upload it", ["debug"]); + uploadFailed = performNewFileUpload(parentItem, fileToUpload, thisFileSize); + } else { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(checkFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to validate file details on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + uploadNewFile(fileToUpload); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + } catch (posixException e) { + displayPosixErrorMessage(e.msg); + uploadFailed = true; + } catch (jsonResponseException e) { + addLogEntry(e.msg, ["debug"]); + uploadFailed = true; } - } catch (posixException e) { - displayPosixErrorMessage(e.msg); - uploadFailed = true; - } catch (jsonResponseException e) { - addLogEntry(e.msg, ["debug"]); + + // Operations in this thread are done / complete - either upload was done or it failed + checkFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(checkFileOneDriveApiInstance); + } else { + // skip file upload - insufficent space to upload + addLogEntry("Skipping uploading this new file as it exceeds the available free space on OneDrive: " ~ fileToUpload); uploadFailed = true; } - - // Operations in this thread are done / complete - either upload was done or it failed - checkFileOneDriveApiInstance.shutdown(); - // Free object and memory - object.destroy(checkFileOneDriveApiInstance); } else { - // skip file upload - insufficent space to upload - addLogEntry("Skipping uploading this new file as it exceeds the available free space on OneDrive: " ~ fileToUpload); + // Skip file upload - too large + addLogEntry("Skipping uploading this new file as it exceeds the maximum size allowed by OneDrive: " ~ fileToUpload); uploadFailed = true; } } else { - // Skip file upload - too large - addLogEntry("Skipping uploading this new file as it exceeds the maximum size allowed by OneDrive: " ~ fileToUpload); + // why was the parent path not in the database? + if (canFind(posixViolationPaths, parentPath)) { + addLogEntry("ERROR: POSIX 'case-insensitive match' for the parent path which violates the Microsoft OneDrive API namespace convention."); + } else { + addLogEntry("ERROR: Parent path is not in the database or online."); + } + addLogEntry("ERROR: Unable to upload this file: " ~ fileToUpload); uploadFailed = true; } } else { - // why was the parent path not in the database? - if (canFind(posixViolationPaths, parentPath)) { - addLogEntry("ERROR: POSIX 'case-insensitive match' for the parent path which violates the Microsoft OneDrive API namespace convention."); - } else { - addLogEntry("ERROR: Parent path is not in the database or online."); - } - addLogEntry("ERROR: Unable to upload this file: " ~ fileToUpload); + // Unable to read local file + addLogEntry("Skipping uploading this file as it cannot be read (file permissions or file corruption): " ~ fileToUpload); uploadFailed = true; } } else { - // Unable to read local file - addLogEntry("Skipping uploading this file as it cannot be read (file permissions or file corruption): " ~ fileToUpload); - uploadFailed = true; + // File disappeared before upload + addLogEntry("File disappeared locally before upload: " ~ fileToUpload); + // dont set uploadFailed = true; as the file disappeared before upload, thus nothing here failed } - + // Upload success or failure? if (uploadFailed) { // Need to add this to fileUploadFailures to capture at the end From 5a32a3298dbfaf2cbb6a295eab1fefd144eeabb9 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Wed, 24 Jan 2024 22:56:45 +1100 Subject: [PATCH 038/305] Update sync.d In a --resync --local-first scenario, a Shared Folder will always be 'remote' so we need to check the remote parent id, rather than parentItem details and ensure we have a DB Tie record for the shared folder in the DB --- src/sync.d | 62 +++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 50 insertions(+), 12 deletions(-) diff --git a/src/sync.d b/src/sync.d index 12df0505d..9f5b8a0ba 100644 --- a/src/sync.d +++ b/src/sync.d @@ -659,8 +659,7 @@ class SyncEngine { addLogEntry(); addLogEntry("The requested --single-directory path to sync has generated an error. Please correct this error and try again."); addLogEntry(); - Thread.sleep(dur!("msecs")(500)); - exit(EXIT_FAILURE); + forceExit(); } } @@ -4418,20 +4417,58 @@ class SyncEngine { addLogEntry("Attempting to query OneDrive API for this path: " ~ thisNewPathToCreate, ["debug"]); addLogEntry("parentItem details: " ~ to!string(parentItem), ["debug"]); - if (parentItem.driveId == appConfig.defaultDriveId) { + // Depending on the data within parentItem, will depend on what method we are using to search + // In a --local-first scenario, a Shared Folder will be 'remote' so we need to check the remote parent id, rather than parentItem details + Item queryItem; + + if ((appConfig.getValueBool("local_first")) && (parentItem.type == ItemType.remote)) { + // We are --local-first scenario and this folder is a potential shared object + addLogEntry("--localfirst & parentItem is a remote item object", ["debug"]); + + queryItem.driveId = parentItem.remoteDriveId; + queryItem.id = parentItem.remoteId; + + // Need to create the DB Tie for this object + addLogEntry("Creating a DB Tie for this Shared Folder", ["debug"]); + // New DB Tie Item to bind the 'remote' path to our parent path + Item tieDBItem; + // Set the name + tieDBItem.name = parentItem.name; + // Set the correct item type + tieDBItem.type = ItemType.dir; + // Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie + tieDBItem.driveId = parentItem.remoteDriveId; + tieDBItem.id = parentItem.remoteId; + // Set the correct mtime + tieDBItem.mtime = parentItem.mtime; + // Add tie DB record to the local database + addLogEntry("Adding DB Tie record to database: " ~ to!string(tieDBItem), ["debug"]); + itemDB.upsert(tieDBItem); + + } else { + // Use parent item for the query item + addLogEntry("Standard Query, use parentItem", ["debug"]); + queryItem = parentItem; + } + + if (queryItem.driveId == appConfig.defaultDriveId) { // Use getPathDetailsByDriveId - onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, thisNewPathToCreate); + addLogEntry("Selecting getPathDetailsByDriveId to query OneDrive API for path data", ["debug"]); + onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsByDriveId(queryItem.driveId, thisNewPathToCreate); } else { - // If the parentItem.driveId is not our driveId - the path we are looking for will not be at the logical location that getPathDetailsByDriveId + // Use searchDriveForPath to query OneDrive + addLogEntry("Selecting searchDriveForPath to query OneDrive API for path data", ["debug"]); + // If the queryItem.driveId is not our driveId - the path we are looking for will not be at the logical location that getPathDetailsByDriveId // can use - as it will always return a 404 .. even if the path actually exists (which is the whole point of this test) - // Search the parentItem.driveId for any folder name match that we are going to create, then compare response JSON items with parentItem.id + // Search the queryItem.driveId for any folder name match that we are going to create, then compare response JSON items with queryItem.id // If no match, the folder we want to create does not exist at the location we are seeking to create it at, thus generate a 404 - onlinePathData = createDirectoryOnlineOneDriveApiInstance.searchDriveForPath(parentItem.driveId, baseName(thisNewPathToCreate)); + onlinePathData = createDirectoryOnlineOneDriveApiInstance.searchDriveForPath(queryItem.driveId, baseName(thisNewPathToCreate)); + addLogEntry("onlinePathData: " ~to!string(onlinePathData), ["debug"]); // Process the response from searching the drive ulong responseCount = count(onlinePathData["value"].array); if (responseCount > 0) { - // Search 'name' matches were found .. need to match these against parentItem.id + // Search 'name' matches were found .. need to match these against queryItem.id bool foundDirectoryOnline = false; JSONValue foundDirectoryJSONItem; // Items were returned .. but is one of these what we are looking for? @@ -4440,7 +4477,7 @@ class SyncEngine { if (!isFileItem(childJSON)) { Item thisChildItem = makeItem(childJSON); // Direct Match Check - if ((parentItem.id == thisChildItem.parentId) && (baseName(thisNewPathToCreate) == thisChildItem.name)) { + if ((queryItem.id == thisChildItem.parentId) && (baseName(thisNewPathToCreate) == thisChildItem.name)) { // High confidence that this child folder is a direct match we are trying to create and it already exists online addLogEntry("Path we are searching for exists online (Direct Match): " ~ baseName(thisNewPathToCreate), ["debug"]); addLogEntry("childJSON: " ~ to!string(childJSON), ["debug"]); @@ -4448,6 +4485,7 @@ class SyncEngine { foundDirectoryJSONItem = childJSON; break; } + // Full Lower Case POSIX Match Check string childAsLower = toLower(childJSON["name"].str); string thisFolderNameAsLower = toLower(baseName(thisNewPathToCreate)); @@ -4465,7 +4503,8 @@ class SyncEngine { } if (foundDirectoryOnline) { - // Directory we are seeking was found online ... + // Directory we are seeking was found online ... + addLogEntry("The directory we are seeking was found online by using searchDriveForPath ...", ["debug"]); onlinePathData = foundDirectoryJSONItem; } else { // No 'search item matches found' - raise a 404 so that the exception handling will take over to create the folder @@ -4629,6 +4668,7 @@ class SyncEngine { // Is the response a valid JSON object - validation checking done in saveItem saveItem(onlinePathData); + // Shutdown API instance createDirectoryOnlineOneDriveApiInstance.shutdown(); // Free object and memory @@ -5688,7 +5728,6 @@ class SyncEngine { // Save JSON item details into the item database void saveItem(JSONValue jsonItem) { - // jsonItem has to be a valid object if (jsonItem.type() == JSONType.object) { // Check if the response JSON has an 'id', otherwise makeItem() fails with 'Key not found: id' @@ -6436,7 +6475,6 @@ class SyncEngine { if (isItemRemote(getPathDetailsAPIResponse)) { // Remote Directory .. need a DB Tie Item addLogEntry("Creating a DB Tie for this Shared Folder", ["debug"]); - // New DB Tie Item to bind the 'remote' path to our parent path Item tieDBItem; // Set the name From 8094d86ed5d8721a2518eb273b6d1b5c4c23870b Mon Sep 17 00:00:00 2001 From: JC-comp <147694781+JC-comp@users.noreply.github.com> Date: Thu, 25 Jan 2024 10:18:54 +0800 Subject: [PATCH 039/305] Fix file upload fallback (#2603) * Fix file upload fallback --- src/sync.d | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/src/sync.d b/src/sync.d index 9f5b8a0ba..d143c591b 100644 --- a/src/sync.d +++ b/src/sync.d @@ -3999,7 +3999,7 @@ class SyncEngine { // Are there any items to download post fetching the /delta data? if (!newLocalFilesToUploadToOneDrive.empty) { // There are elements to upload - addLogEntry("New items to upload to OneDrive: " ~ to!string(newLocalFilesToUploadToOneDrive.length), ["verbose"]); + addLogEntry("New items to upload to OneDrive: " ~ to!string(newLocalFilesToUploadToOneDrive.length)); // Reset totalDataToUpload totalDataToUpload = 0; @@ -4036,6 +4036,14 @@ class SyncEngine { // Cleanup array memory after uploading all files newLocalFilesToUploadToOneDrive = []; } + + if (!databaseItemsWhereContentHasChanged.empty) { + // There are changed local files that were in the DB to upload + addLogEntry("Changed local items to upload to OneDrive: " ~ to!string(databaseItemsWhereContentHasChanged.length)); + processChangedLocalItemsToUpload(); + // Cleanup array memory + databaseItemsWhereContentHasChanged = []; + } } // Scan this path for new data @@ -4310,7 +4318,7 @@ class SyncEngine { // for the path flow and create the folder that way void createDirectoryOnline(string thisNewPathToCreate) { // Log what we are doing - addLogEntry("OneDrive Client requested to create this directory online: " ~ thisNewPathToCreate); + addLogEntry("OneDrive Client requested to create this directory online: " ~ thisNewPathToCreate, ["verbose"]); Item parentItem; JSONValue onlinePathData; @@ -4889,7 +4897,8 @@ class SyncEngine { addLogEntry("fileDetailsFromOneDrive after exist online check: " ~ to!string(fileDetailsFromOneDrive), ["debug"]); // Does the data from online match our local file that we are attempting to upload as a new file? - if (!disableUploadValidation && performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize)) { + bool raiseWarning = false; + if (!disableUploadValidation && performUploadIntegrityValidationChecks(fileDetailsFromOneDrive, fileToUpload, thisFileSize, raiseWarning)) { // Save online item details to the database saveItem(fileDetailsFromOneDrive); } else { @@ -4900,13 +4909,11 @@ class SyncEngine { // The file 'version history' online will have to be used to 'recover' the prior online file string changedItemParentId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; string changedItemId = fileDetailsFromOneDrive["id"].str; + addLogEntry("Skipping uploading this file as moving it to upload as a modified file (online item already exists): " ~ fileToUpload); databaseItemsWhereContentHasChanged ~= [changedItemParentId, changedItemId, fileToUpload]; // In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data to the local DB saveItem(fileDetailsFromOneDrive); - - // Attempt the processing of the different local file - processChangedLocalItemsToUpload(); } } catch (OneDriveException exception) { // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online @@ -6862,7 +6869,7 @@ class SyncEngine { } // Perform integrity validation of the file that was uploaded - bool performUploadIntegrityValidationChecks(JSONValue uploadResponse, string localFilePath, ulong localFileSize) { + bool performUploadIntegrityValidationChecks(JSONValue uploadResponse, string localFilePath, ulong localFileSize, bool raiseWarning=true) { bool integrityValid = false; @@ -6878,7 +6885,7 @@ class SyncEngine { // Uploaded file integrity intact addLogEntry("Uploaded local file matches reported online size and hash values", ["debug"]); integrityValid = true; - } else { + } else if (raiseWarning) { // Upload integrity failure .. what failed? // There are 2 scenarios where this happens: // 1. Failed Transfer From dbe92514c243221ea65ea73f91a4481b24d11b7d Mon Sep 17 00:00:00 2001 From: JC-comp <147694781+JC-comp@users.noreply.github.com> Date: Sat, 27 Jan 2024 06:55:11 +0800 Subject: [PATCH 040/305] Refactoring CurlEngine + Add Curl Socket Reuse Support (#2604) * Refactor CurlEngine - Add socket cleanup/setup/execution - Add response class * Add support for reusing curl socket * Fix deconstruct brhavior --- src/curlEngine.d | 282 ++++++++++++++++++++++++++++++++++++++++++++++- src/main.d | 3 + src/onedrive.d | 20 ++-- src/util.d | 6 +- 4 files changed, 292 insertions(+), 19 deletions(-) diff --git a/src/curlEngine.d b/src/curlEngine.d index 7a396da12..3875ade63 100644 --- a/src/curlEngine.d +++ b/src/curlEngine.d @@ -6,21 +6,204 @@ import std.net.curl; import etc.c.curl: CurlOption; import std.datetime; import std.conv; +import std.file; +import std.json; import std.stdio; +import std.range; // What other modules that we have created do we need to import? import log; +class CurlResponse { + HTTP.Method method; + const(char)[] url; + const(char)[][const(char)[]] requestHeaders; + const(char)[] postBody; + + string[string] responseHeaders; + HTTP.StatusLine statusLine; + char[] content; + + void reset() { + method = HTTP.Method.undefined; + url = null; + requestHeaders = null; + postBody = null; + + responseHeaders = null; + object.destroy(statusLine); + content = null; + } + + void addRequestHeader(const(char)[] name, const(char)[] value) { + requestHeaders[name] = value; + } + + void connect(HTTP.Method method, const(char)[] url) { + this.method = method; + this.url = url; + } + + const JSONValue json() { + JSONValue json; + try { + json = content.parseJSON(); + } catch (JSONException e) { + // Log that a JSON Exception was caught, dont output the HTML response from OneDrive + addLogEntry("JSON Exception caught when performing HTTP operations - use --debug-https to diagnose further", ["debug"]); + } + return json; + }; + + void update(HTTP *http) { + this.responseHeaders = http.responseHeaders(); + this.statusLine = http.statusLine; + } + + @safe pure HTTP.StatusLine getStatus() { + return this.statusLine; + } + + // Return the current value of retryAfterValue + ulong getRetryAfterValue() { + ulong delayBeforeRetry; + // is retry-after in the response headers + if ("retry-after" in responseHeaders) { + // Set the retry-after value + addLogEntry("curlEngine.http.perform() => Received a 'Retry-After' Header Response with the following value: " ~ to!string(responseHeaders["retry-after"]), ["debug"]); + addLogEntry("curlEngine.http.perform() => Setting retryAfterValue to: " ~ responseHeaders["retry-after"], ["debug"]); + delayBeforeRetry = to!ulong(responseHeaders["retry-after"]); + } else { + // Use a 120 second delay as a default given header value was zero + // This value is based on log files and data when determining correct process for 429 response handling + delayBeforeRetry = 120; + // Update that we are over-riding the provided value with a default + addLogEntry("HTTP Response Header retry-after value was 0 - Using a preconfigured default of: " ~ to!string(delayBeforeRetry), ["debug"]); + } + + return delayBeforeRetry; // default to 60 seconds + } + + const string parseHeaders(const(string[string]) headers) { + string responseHeadersStr = ""; + foreach (const(char)[] header; headers.byKey()) { + responseHeadersStr ~= "> " ~ header ~ ": " ~ headers[header] ~ "\n"; + } + return responseHeadersStr; + } + + + const string parseHeaders(const(const(char)[][const(char)[]]) headers) { + string responseHeadersStr = ""; + foreach (string header; headers.byKey()) { + if (header == "Authorization") + continue; + responseHeadersStr ~= "< " ~ header ~ ": " ~ headers[header] ~ "\n"; + } + return responseHeadersStr; + } + + const string dumpDebug() { + import std.range; + import std.format : format; + + string str = ""; + str ~= format("< %s %s\n", method, url); + if (!requestHeaders.empty) { + str ~= parseHeaders(requestHeaders); + } + if (!postBody.empty) { + str ~= format("----\n%s\n----\n", postBody); + } + str ~= format("< %s\n", statusLine); + if (!responseHeaders.empty) { + str ~= parseHeaders(responseHeaders); + } + return str; + } + + const string dumpResponse() { + import std.range; + import std.format : format; + + string str = ""; + if (!content.empty) { + str ~= format("----\n%s\n----\n", content); + } + return str; + } + + override string toString() const { + string str = "Curl debugging: \n"; + str ~= dumpDebug(); + str ~= "Curl response: \n"; + str ~= dumpResponse(); + return str; + } + + CurlResponse dup() { + CurlResponse copy = new CurlResponse(); + copy.method = method; + copy.url = url; + copy.requestHeaders = requestHeaders; + copy.postBody = postBody; + + copy.responseHeaders = responseHeaders; + copy.statusLine = statusLine; + copy.content = content; + + return copy; + } +} + class CurlEngine { + + __gshared CurlEngine[] curlEnginePool; + + static CurlEngine get() { + synchronized(CurlEngine.classinfo) { + if (curlEnginePool.empty) { + return new CurlEngine; + } else { + CurlEngine curlEngine = curlEnginePool[$-1]; + curlEnginePool.popBack(); + return curlEngine; + } + } + } + + static releaseAll() { + synchronized(CurlEngine.classinfo) { + foreach(curlEngine; curlEnginePool) { + curlEngine.shutdown(); + } + curlEnginePool = null; + } + } + + void release() { + cleanUp(); + synchronized(CurlEngine.classinfo) { + curlEnginePool ~= this; + } + } + HTTP http; bool keepAlive; ulong dnsTimeout; - + CurlResponse response; + this() { http = HTTP(); + response = new CurlResponse(); + } + + ~this() { + object.destroy(http); + object.destroy(response); } - void initialise(ulong dnsTimeout, ulong connectTimeout, ulong dataTimeout, ulong operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, ulong userRateLimit, ulong protocolVersion, bool keepAlive=false) { + void initialise(ulong dnsTimeout, ulong connectTimeout, ulong dataTimeout, ulong operationTimeout, int maxRedirects, bool httpsDebug, string userAgent, bool httpProtocol, ulong userRateLimit, ulong protocolVersion, bool keepAlive=true) { // Setting this to false ensures that when we close the curl instance, any open sockets are closed - which we need to do when running // multiple threads and API instances at the same time otherwise we run out of local files | sockets pretty quickly this.keepAlive = keepAlive; @@ -103,11 +286,104 @@ class CurlEngine { } } + void addRequestHeader(const(char)[] name, const(char)[] value) { + http.addRequestHeader(name, value); + response.addRequestHeader(name, value); + } + void connect(HTTP.Method method, const(char)[] url) { if (!keepAlive) - http.addRequestHeader("Connection", "close"); + addRequestHeader("Connection", "close"); http.method = method; http.url = url; + response.connect(method, url); + } + + void setContent(const(char)[] contentType, const(char)[] sendData) { + addRequestHeader("Content-Type", contentType); + if (sendData) { + http.contentLength = sendData.length; + http.onSend = (void[] buf) { + import std.algorithm: min; + size_t minLen = min(buf.length, sendData.length); + if (minLen == 0) return 0; + buf[0 .. minLen] = cast(void[]) sendData[0 .. minLen]; + sendData = sendData[minLen .. $]; + return minLen; + }; + response.postBody = sendData; + } + } + + void setFile(File* file, ulong offsetSize) { + addRequestHeader("Content-Type", "application/octet-stream"); + http.onSend = data => file.rawRead(data).length; + http.contentLength = offsetSize; + } + + CurlResponse execute() { + scope(exit) { + cleanUp(); + } + http.onReceive = (ubyte[] data) { + response.content ~= data; + // HTTP Server Response Code Debugging if --https-debug is being used + + return data.length; + }; + http.perform(); + response.update(&http); + return response.dup; + } + + CurlResponse download(string originalFilename, string downloadFilename) { + // Threshold for displaying download bar + long thresholdFileSize = 4 * 2^^20; // 4 MiB + + CurlResponse response = new CurlResponse(); + // open downloadFilename as write in binary mode + auto file = File(downloadFilename, "wb"); + + // function scopes + scope(exit) { + cleanUp(); + if (file.isOpen()){ + // close open file + file.close(); + } + } + + http.onReceive = (ubyte[] data) { + file.rawWrite(data); + return data.length; + }; + + http.perform(); + + // Rename downloaded file + rename(downloadFilename, originalFilename); + + response.update(&http); + return response; + } + + void cleanUp() { + // Reset any values to defaults, freeing any set objects + http.clearRequestHeaders(); + http.onSend = null; + http.onReceive = null; + http.onReceiveHeader = null; + http.onReceiveStatusLine = null; + http.onProgress = delegate int(size_t dltotal, size_t dlnow, size_t ultotal, size_t ulnow) { + return 0; + }; + http.contentLength = 0; + response.reset(); + } + + void shutdown() { + // Shut down the curl instance & close any open sockets + http.shutdown(); } void setDisableSSLVerifyPeer() { diff --git a/src/main.d b/src/main.d index c0c90f61f..bf9cff832 100644 --- a/src/main.d +++ b/src/main.d @@ -1126,6 +1126,9 @@ void performStandardExitProcess(string scopeCaller = null) { } object.destroy(itemDB); } + + // Shutdown cached sockets + CurlEngine.releaseAll(); // Set all objects to null if (scopeCaller == "failureScope") { diff --git a/src/onedrive.d b/src/onedrive.d index ef33cb50d..745b55118 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -225,9 +225,9 @@ class OneDriveApi { } // Initialise the OneDrive API class - bool initialise(bool keepAlive=false) { + bool initialise(bool keepAlive=true) { // Initialise the curl engine - curlEngine = new CurlEngine(); + curlEngine = CurlEngine.get(); curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version"), keepAlive); // Authorised value to return @@ -489,17 +489,11 @@ class OneDriveApi { object.destroy(webhook); } - // Reset any values to defaults, freeing any set objects - curlEngine.http.clearRequestHeaders(); - curlEngine.http.onSend = null; - curlEngine.http.onReceive = null; - curlEngine.http.onReceiveHeader = null; - curlEngine.http.onReceiveStatusLine = null; - curlEngine.http.contentLength = 0; - // Shut down the curl instance & close any open sockets - curlEngine.http.shutdown(); - // Free object and memory - object.destroy(curlEngine); + // Release curl instance + if (curlEngine !is null) { + curlEngine.release(); + curlEngine = null; + } } // Authenticate this client against Microsoft OneDrive API diff --git a/src/util.d b/src/util.d index 629b1d5a7..c75e62b1a 100644 --- a/src/util.d +++ b/src/util.d @@ -206,7 +206,7 @@ bool testInternetReachability(ApplicationConfig appConfig) { bool result = false; try { // Use preconfigured object with all the correct http values assigned - curlEngine = new CurlEngine(); + curlEngine = CurlEngine.get(); curlEngine.initialise(appConfig.getValueLong("dns_timeout"), appConfig.getValueLong("connect_timeout"), appConfig.getValueLong("data_timeout"), appConfig.getValueLong("operation_timeout"), appConfig.defaultMaxRedirects, appConfig.getValueBool("debug_https"), appConfig.getValueString("user_agent"), appConfig.getValueBool("force_http_11"), appConfig.getValueLong("rate_limit"), appConfig.getValueLong("ip_protocol_version")); // Configure the remaining items required @@ -228,8 +228,8 @@ bool testInternetReachability(ApplicationConfig appConfig) { displayOneDriveErrorMessage(e.msg, getFunctionName!({})); } finally { if (curlEngine) { - curlEngine.http.shutdown(); - object.destroy(curlEngine); + curlEngine.release(); + curlEngine = null; } } From 1a3724d44b75bd351f923a3ab8369b8206042196 Mon Sep 17 00:00:00 2001 From: JC-comp <147694781+JC-comp@users.noreply.github.com> Date: Sat, 27 Jan 2024 07:03:46 +0800 Subject: [PATCH 041/305] Add processing dots for all long running operation (#2606) Changes - Add helper for processing log with message and dots - Set a rate limit of 1 second for processing dots. - Showing processing dots during 1. db consistency check 2. file system walk. --- src/log.d | 19 +++++++++++++++++++ src/sync.d | 45 ++++++++++++++++++++++++--------------------- 2 files changed, 43 insertions(+), 21 deletions(-) diff --git a/src/log.d b/src/log.d index 2bf6d1546..822197ca5 100644 --- a/src/log.d +++ b/src/log.d @@ -19,6 +19,9 @@ version(Notifications) { // Shared module object shared LogBuffer logBuffer; +// Timer for logging +shared MonoTime lastInsertedTime; + class LogBuffer { private: string[3][] buffer; @@ -138,6 +141,7 @@ class LogBuffer { // Function to initialize the logging system void initialiseLogging(bool verboseLogging = false, bool debugLogging = false) { logBuffer = cast(shared) new LogBuffer(verboseLogging, debugLogging); + lastInsertedTime = MonoTime.currTime(); } // Function to add a log entry with multiple levels @@ -145,6 +149,21 @@ void addLogEntry(string message = "", string[] levels = ["info"]) { logBuffer.logThisMessage(message, levels); } +void addProcessingLogHeaderEntry(string message = "") { + addLogEntry(message, ["logFileOnly"]); + // Use the dots to show the application is 'doing something' + addLogEntry(message ~ " .", ["consoleOnlyNoNewLine"]); +} + +void addProcessingDotEntry() { + if (MonoTime.currTime() - lastInsertedTime < dur!"seconds"(1)) { + // Don't flood the log buffer + return; + } + lastInsertedTime = MonoTime.currTime(); + addLogEntry(".", ["consoleOnlyNoNewLine"]); +} + // Function to set logFilePath and enable logging to a file void enableLogFileOutput(string configuredLogFilePath) { logBuffer.logFilePath = configuredLogFilePath; diff --git a/src/sync.d b/src/sync.d index d143c591b..0ee33fe47 100644 --- a/src/sync.d +++ b/src/sync.d @@ -731,10 +731,7 @@ class SyncEngine { // Dynamic output for non-verbose and verbose run so that the user knows something is being retreived from the OneDrive API if (appConfig.verbosityCount == 0) { if (!appConfig.surpressLoggingOutput) { - addLogEntry("Fetching items from the OneDrive API for Drive ID: " ~ driveIdToQuery, ["logFileOnly"]); - - // Use the dots to show the application is 'doing something' - addLogEntry("Fetching items from the OneDrive API for Drive ID: " ~ driveIdToQuery ~ " .", ["consoleOnlyNoNewLine"]); + addProcessingLogHeaderEntry("Fetching items from the OneDrive API for Drive ID: " ~ driveIdToQuery); } } else { addLogEntry("Fetching /delta response from the OneDrive API for Drive ID: " ~ driveIdToQuery, ["verbose"]); @@ -768,7 +765,7 @@ class SyncEngine { if (appConfig.verbosityCount == 0) { // Dynamic output for a non-verbose run so that the user knows something is happening if (!appConfig.surpressLoggingOutput) { - addLogEntry(".", ["consoleOnlyNoNewLine"]); + addProcessingDotEntry(); } } else { addLogEntry("Processing API Response Bundle: " ~ to!string(responseBundleCount) ~ " - Quantity of 'changes|items' in this bundle to process: " ~ to!string(nrChanges), ["verbose"]); @@ -907,9 +904,7 @@ class SyncEngine { // Dynamic output for a non-verbose run so that the user knows something is happening if (!appConfig.surpressLoggingOutput) { // Logfile entry - addLogEntry("Processing " ~ to!string(jsonItemsToProcess.length) ~ " applicable changes and items received from Microsoft OneDrive", ["logFileOnly"]); - // Console only output - addLogEntry("Processing " ~ to!string(jsonItemsToProcess.length) ~ " applicable changes and items received from Microsoft OneDrive ", ["consoleOnlyNoNewLine"]); + addProcessingLogHeaderEntry("Processing " ~ to!string(jsonItemsToProcess.length) ~ " applicable changes and items received from Microsoft OneDrive"); if (appConfig.verbosityCount != 0) { // Close out the console only processing line above, if we are doing verbose or above logging @@ -926,7 +921,7 @@ class SyncEngine { if (appConfig.verbosityCount == 0) { // Dynamic output for a non-verbose run so that the user knows something is happening if (!appConfig.surpressLoggingOutput) { - addLogEntry(".", ["consoleOnlyNoNewLine"]); + addProcessingDotEntry(); } } else { addLogEntry("Processing OneDrive JSON item batch [" ~ to!string(batchesProcessed) ~ "/" ~ to!string(batchCount) ~ "] to ensure consistent local state", ["verbose"]); @@ -2708,7 +2703,7 @@ class SyncEngine { // Log what we are doing if (!appConfig.surpressLoggingOutput) { - addLogEntry("Performing a database consistency and integrity check on locally stored data ... "); + addProcessingLogHeaderEntry("Performing a database consistency and integrity check on locally stored data"); } // What driveIDsArray do we use? If we are doing a --single-directory we need to use just the drive id associated with that operation @@ -2796,6 +2791,9 @@ class SyncEngine { } } } + + // Close out the '....' being printed to the console + addLogEntry("\n", ["consoleOnlyNoNewLine"]); // Are we doing a --download-only sync? if (!appConfig.getValueBool("download_only")) { @@ -2833,6 +2831,8 @@ class SyncEngine { // Log what we are doing addLogEntry("Processing " ~ logOutputPath, ["verbose"]); + // Add a processing '.' + addProcessingDotEntry(); // Determine which action to take final switch (dbItem.type) { @@ -3973,9 +3973,9 @@ class SyncEngine { if (isDir(path)) { if (!appConfig.surpressLoggingOutput) { if (!cleanupLocalFiles) { - addLogEntry("Scanning the local file system '" ~ logPath ~ "' for new data to upload ..."); + addProcessingLogHeaderEntry("Scanning the local file system '" ~ logPath ~ "' for new data to upload"); } else { - addLogEntry("Scanning the local file system '" ~ logPath ~ "' for data to cleanup ..."); + addProcessingLogHeaderEntry("Scanning the local file system '" ~ logPath ~ "' for data to cleanup"); } } } @@ -3985,6 +3985,7 @@ class SyncEngine { // Perform the filesystem walk of this path, building an array of new items to upload scanPathForNewData(path); + addLogEntry("\n", ["consoleOnlyNoNewLine"]); // To finish off the processing items, this is needed to reflect this in the log addLogEntry("------------------------------------------------------------------", ["debug"]); @@ -3999,7 +4000,7 @@ class SyncEngine { // Are there any items to download post fetching the /delta data? if (!newLocalFilesToUploadToOneDrive.empty) { // There are elements to upload - addLogEntry("New items to upload to OneDrive: " ~ to!string(newLocalFilesToUploadToOneDrive.length)); + addProcessingLogHeaderEntry("New items to upload to OneDrive: " ~ to!string(newLocalFilesToUploadToOneDrive.length)); // Reset totalDataToUpload totalDataToUpload = 0; @@ -4048,7 +4049,9 @@ class SyncEngine { // Scan this path for new data void scanPathForNewData(string path) { - + // Add a processing '.' + addProcessingDotEntry(); + ulong maxPathLength; ulong pathWalkLength; @@ -4734,11 +4737,14 @@ class SyncEngine { foreach (chunk; newLocalFilesToUploadToOneDrive.chunks(batchSize)) { uploadNewLocalFileItemsInParallel(chunk); } + addLogEntry("\n", ["consoleOnlyNoNewLine"]); } // Upload the file batches in parallel void uploadNewLocalFileItemsInParallel(string[] array) { foreach (i, fileToUpload; taskPool.parallel(array)) { + // Add a processing '.' + addProcessingDotEntry(); addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); uploadNewFile(fileToUpload); addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); @@ -6036,10 +6042,7 @@ class SyncEngine { // Dynamic output for a non-verbose run so that the user knows something is happening if (appConfig.verbosityCount == 0) { if (!appConfig.surpressLoggingOutput) { - addLogEntry("Fetching items from the OneDrive API for Drive ID: " ~ searchItem.driveId, ["logFileOnly"]); - - // Use the dots to show the application is 'doing something' - addLogEntry("Fetching items from the OneDrive API for Drive ID: " ~ searchItem.driveId ~ " .", ["consoleOnlyNoNewLine"]); + addProcessingLogHeaderEntry("Fetching items from the OneDrive API for Drive ID: " ~ searchItem.driveId); } } else { addLogEntry("Generating a /delta response from the OneDrive API for Drive ID: " ~ searchItem.driveId, ["verbose"]); @@ -6244,7 +6247,7 @@ class SyncEngine { if (appConfig.verbosityCount == 0) { // Dynamic output for a non-verbose run so that the user knows something is happening if (!appConfig.surpressLoggingOutput) { - addLogEntry(".", ["consoleOnlyNoNewLine"]); + addProcessingDotEntry(); } } @@ -7163,7 +7166,7 @@ class SyncEngine { deltaLink = itemDB.getDeltaLink(driveIdToQuery, itemIdToQuery); // Log what we are doing - addLogEntry("Querying the change status of Drive ID: " ~ driveIdToQuery ~ " .", ["consoleOnlyNoNewLine"]); + addProcessingLogHeaderEntry("Querying the change status of Drive ID: " ~ driveIdToQuery); // Query the OenDrive API using the applicable details, following nextLink if applicable // Create a new API Instance for querying /delta and initialise it @@ -7173,7 +7176,7 @@ class SyncEngine { for (;;) { // Add a processing '.' - addLogEntry(".", ["consoleOnlyNoNewLine"]); + addProcessingDotEntry(); // Get the /delta changes via the OneDrive API // getDeltaChangesByItemId has the re-try logic for transient errors From 593c6ead80df8deb7281000fbf02c78f6e098afb Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 3 Feb 2024 05:49:07 +1100 Subject: [PATCH 042/305] Change quota tracking and handling * Change how online space available is being tracked to significantly reduce quota API calls when uploading new or modified data --- src/config.d | 8 +- src/main.d | 3 + src/sync.d | 341 ++++++++++++++++++++++++++++++++++++--------------- 3 files changed, 251 insertions(+), 101 deletions(-) diff --git a/src/config.d b/src/config.d index c0187e614..2166c1323 100644 --- a/src/config.d +++ b/src/config.d @@ -123,13 +123,13 @@ class ApplicationConfig { bool apiWasInitialised = false; bool syncEngineWasInitialised = false; + + // Important Account Details string accountType; string defaultDriveId; string defaultRootId; - ulong remainingFreeSpace = 0; - bool quotaAvailable = true; - bool quotaRestricted = false; - + + // Sync Operations bool fullScanTrueUpRequired = false; bool surpressLoggingOutput = false; diff --git a/src/main.d b/src/main.d index bf9cff832..a85299787 100644 --- a/src/main.d +++ b/src/main.d @@ -932,6 +932,9 @@ int main(string[] cliArgs) { // Attempt to reset syncFailures syncEngineInstance.resetSyncFailures(); + // Update cached quota details from online as this may have changed online in the background outside of this application + syncEngineInstance.freshenCachedDriveQuotaDetails(); + // Did the user specify --upload-only? if (appConfig.getValueBool("upload_only")) { // Perform the --upload-only sync process diff --git a/src/sync.d b/src/sync.d index 0ee33fe47..3d5b8c248 100644 --- a/src/sync.d +++ b/src/sync.d @@ -61,6 +61,16 @@ class SyncException: Exception { } } +struct driveDetailsCache { + // - driveId is the drive for the operations were items need to be stored + // - quotaRestricted details a bool value as to if that drive is restricting our ability to understand if there is space available. Some 'Business' and 'SharePoint' restrict, and most (if not all) shared folders it cant be determined if there is free space + // - quotaAvailable is a ulong value that stores the value of what the current free space is available online + string driveId; + bool quotaRestricted; + bool quotaAvailable; + ulong quotaRemaining; +} + class SyncEngine { // Class Variables ApplicationConfig appConfig; @@ -79,8 +89,8 @@ class SyncEngine { JSONValue[] fileJSONItemsToDownload; // Array of paths that failed to download string[] fileDownloadFailures; - // Array of all OneDrive driveId's that have been seen - string[] driveIDsArray; + // Associative array mapping of all OneDrive driveId's that have been seen, mapped with driveDetailsCache data for reference + driveDetailsCache[string] onlineDriveDetails; // List of items we fake created when using --dry-run string[2][] idsFaked; // List of paths we fake deleted when using --dry-run @@ -387,53 +397,52 @@ class SyncEngine { appConfig.accountType = defaultOneDriveDriveDetails["driveType"].str; appConfig.defaultDriveId = defaultOneDriveDriveDetails["id"].str; - // Get the initial remaining size from OneDrive API response JSON - // This will be updated as we upload data to OneDrive - if (hasQuota(defaultOneDriveDriveDetails)) { - if ("remaining" in defaultOneDriveDriveDetails["quota"]){ - // use the value provided - appConfig.remainingFreeSpace = defaultOneDriveDriveDetails["quota"]["remaining"].integer; - } - } + // Make sure that appConfig.defaultDriveId is in our driveIDs array to use when checking if item is in database + // Keep the driveDetailsCache array with unique entries only + driveDetailsCache cachedOnlineDriveData; + if (!canFindDriveId(appConfig.defaultDriveId, cachedOnlineDriveData)) { + // Add this driveId to the drive cache, which then also sets for the defaultDriveId: + // - quotaRestricted; + // - quotaAvailable; + // - quotaRemaining; + addOrUpdateOneDriveOnlineDetails(appConfig.defaultDriveId); + } + + // Fetch the details from cachedOnlineDriveData + cachedOnlineDriveData = getDriveDetails(appConfig.defaultDriveId); + // - cachedOnlineDriveData.quotaRestricted; + // - cachedOnlineDriveData.quotaAvailable; + // - cachedOnlineDriveData.quotaRemaining; // In some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero - if (appConfig.remainingFreeSpace <= 0) { + if (cachedOnlineDriveData.quotaRemaining <= 0) { // free space is <= 0 .. why ? if ("remaining" in defaultOneDriveDriveDetails["quota"]) { if (appConfig.accountType == "personal") { // zero space available addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online."); - appConfig.quotaAvailable = false; } else { // zero space available is being reported, maybe being restricted? addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); - appConfig.quotaRestricted = true; } } else { // json response was missing a 'remaining' value if (appConfig.accountType == "personal") { addLogEntry("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online."); - appConfig.quotaAvailable = false; } else { // quota details not available addLogEntry("ERROR: OneDrive quota information is being restricted. Please fix by speaking to your OneDrive / Office 365 Administrator."); - appConfig.quotaRestricted = true; } } } + // What did we set based on the data from the JSON - addLogEntry("appConfig.accountType = " ~ appConfig.accountType, ["debug"]); - addLogEntry("appConfig.defaultDriveId = " ~ appConfig.defaultDriveId, ["debug"]); - addLogEntry("appConfig.remainingFreeSpace = " ~ to!string(appConfig.remainingFreeSpace), ["debug"]); - addLogEntry("appConfig.quotaAvailable = " ~ to!string(appConfig.quotaAvailable), ["debug"]); - addLogEntry("appConfig.quotaRestricted = " ~ to!string(appConfig.quotaRestricted), ["debug"]); + addLogEntry("appConfig.accountType = " ~ appConfig.accountType, ["debug"]); + addLogEntry("appConfig.defaultDriveId = " ~ appConfig.defaultDriveId, ["debug"]); + addLogEntry("cachedOnlineDriveData.quotaRemaining = " ~ to!string(cachedOnlineDriveData.quotaRemaining), ["debug"]); + addLogEntry("cachedOnlineDriveData.quotaAvailable = " ~ to!string(cachedOnlineDriveData.quotaAvailable), ["debug"]); + addLogEntry("cachedOnlineDriveData.quotaRestricted = " ~ to!string(cachedOnlineDriveData.quotaRestricted), ["debug"]); - // Make sure that appConfig.defaultDriveId is in our driveIDs array to use when checking if item is in database - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, appConfig.defaultDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= appConfig.defaultDriveId; - } } else { // Handle the invalid JSON response throw new accountDetailsException(); @@ -529,7 +538,7 @@ class SyncEngine { // performFullScanTrueUp value addLogEntry("Perform a Full Scan True-Up: " ~ to!string(appConfig.fullScanTrueUpRequired), ["debug"]); - // Fetch the API response of /delta to track changes on OneDrive + // Fetch the API response of /delta to track changes that were performed online fetchOneDriveDeltaAPIResponse(null, null, null); // Process any download activities or cleanup actions processDownloadActivities(); @@ -959,11 +968,12 @@ class SyncEngine { itemDB.setDeltaLink(driveIdToQuery, itemIdToQuery, latestDeltaLink); } - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, driveIdToQuery)) { - // Add this driveId to the array of driveId's we know about - driveIDsArray ~= driveIdToQuery; - } + // Keep the driveDetailsCache array with unique entries only + driveDetailsCache cachedOnlineDriveData; + if (!canFindDriveId(driveIdToQuery, cachedOnlineDriveData)) { + // Add this driveId to the drive cache + addOrUpdateOneDriveOnlineDetails(driveIdToQuery); + } } // Process the /delta API JSON response items @@ -2042,8 +2052,7 @@ class SyncEngine { // file exists locally already Item databaseItem; bool fileFoundInDB = false; - - foreach (driveId; driveIDsArray) { + foreach (driveId; onlineDriveDetails.keys) { if (itemDB.selectByPath(newItemPath, driveId, databaseItem)) { fileFoundInDB = true; break; @@ -2487,13 +2496,17 @@ class SyncEngine { addLogEntry("Default Drive ID: " ~ appConfig.defaultDriveId, ["verbose"]); addLogEntry("Default Root ID: " ~ appConfig.defaultRootId, ["verbose"]); + // Fetch the details from cachedOnlineDriveData + driveDetailsCache cachedOnlineDriveData; + cachedOnlineDriveData = getDriveDetails(appConfig.defaultDriveId); + // What do we display here for space remaining - if (appConfig.remainingFreeSpace > 0) { + if (cachedOnlineDriveData.quotaRemaining > 0) { // Display the actual value - addLogEntry("Remaining Free Space: " ~ to!string(byteToGibiByte(appConfig.remainingFreeSpace)) ~ " GB (" ~ to!string(appConfig.remainingFreeSpace) ~ " bytes)", ["verbose"]); + addLogEntry("Remaining Free Space: " ~ to!string(byteToGibiByte(cachedOnlineDriveData.quotaRemaining)) ~ " GB (" ~ to!string(cachedOnlineDriveData.quotaRemaining) ~ " bytes)", ["verbose"]); } else { // zero or non-zero value or restricted - if (!appConfig.quotaRestricted){ + if (!cachedOnlineDriveData.quotaRestricted){ addLogEntry("Remaining Free Space: 0 KB", ["verbose"]); } else { addLogEntry("Remaining Free Space: Not Available", ["verbose"]); @@ -2711,7 +2724,10 @@ class SyncEngine { if (singleDirectoryScope) { consistencyCheckDriveIdsArray ~= singleDirectoryScopeDriveId; } else { - consistencyCheckDriveIdsArray = driveIDsArray; + foreach (driveId; onlineDriveDetails.keys) { + // For each key, add this to consistencyCheckDriveIdsArray + consistencyCheckDriveIdsArray ~= driveId; + } } // Create a new DB blank item @@ -3514,9 +3530,13 @@ class SyncEngine { Item dbItem; itemDB.selectById(changedItemParentId, changedItemId, dbItem); - // Query the available space online - // This will update appConfig.quotaAvailable & appConfig.quotaRestricted values - remainingFreeSpace = getRemainingFreeSpace(dbItem.driveId); + // Fetch the details from cachedOnlineDriveData + // - cachedOnlineDriveData.quotaRestricted; + // - cachedOnlineDriveData.quotaAvailable; + // - cachedOnlineDriveData.quotaRemaining; + driveDetailsCache cachedOnlineDriveData; + cachedOnlineDriveData = getDriveDetails(dbItem.driveId); + remainingFreeSpace = cachedOnlineDriveData.quotaRemaining; // Get the file size from the actual file ulong thisFileSizeLocal = getSize(localFilePath); @@ -3528,32 +3548,32 @@ class SyncEngine { thisFileSizeFromDB = 0; } - // remainingFreeSpace online includes the current file online - // we need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value + // 'remainingFreeSpace' online includes the current file online + // We need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal; // Based on what we know, for this thread - can we safely upload this modified local file? - addLogEntry("This Thread Current Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]); + addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]); addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); - JSONValue uploadResponse; bool spaceAvailableOnline = false; - // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated - // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true - // If 'business' accounts, if driveId == defaultDriveId, then we will have data - // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true + // If 'personal' accounts, if driveId == defaultDriveId, then we will have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - cachedOnlineDriveData.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will potentially have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused + // If 'business' accounts, if driveId != defaultDriveId, then we will potentially have quota data, but it most likely will be a 0 value - cachedOnlineDriveData.quotaRestricted will be set as true - // What was the latest getRemainingFreeSpace() value? - if (appConfig.quotaAvailable) { + // Is there quota available for the given drive where we are uploading to? + if (cachedOnlineDriveData.quotaAvailable) { // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? if (calculatedSpaceOnlinePostUpload > 0) { // Based on this thread action, we beleive that there is space available online to upload - proceed spaceAvailableOnline = true; } } + // Is quota being restricted? - if (appConfig.quotaRestricted) { + if (cachedOnlineDriveData.quotaRestricted) { // Space available online is being restricted - so we have no way to really know if there is space available online spaceAvailableOnline = true; } @@ -3606,8 +3626,12 @@ class SyncEngine { // Save JSON item in database saveItem(uploadResponse); + // Update the 'cachedOnlineDriveData' record for this 'dbItem.driveId' so that this is tracked as accuratly as possible for other threads + updateDriveDetailsCache(dbItem.driveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSizeLocal); + + // Check the integrity of the uploaded modified file if not in a --dry-run scenario if (!dryRun) { - // Check the integrity of the uploaded modified file + // Perform the integrity of the uploaded modified file performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal); // Update the date / time of the file online to match the local item @@ -3856,17 +3880,20 @@ class SyncEngine { } // Query the OneDrive API using the provided driveId to get the latest quota details - ulong getRemainingFreeSpace(string driveId) { + string[3][] getRemainingFreeSpaceOnline(string driveId) { - // Get the quota details for this driveId, as this could have changed since we started the application - the user could have added / deleted data online, or purchased additional storage + // Get the quota details for this driveId // Quota details are ONLY available for the main default driveId, as the OneDrive API does not provide quota details for shared folders - JSONValue currentDriveQuota; - ulong remainingQuota; - // Ensure that we have a valid driveId + // Assume that quota is being restricted, there is no quota available and zero space online as a default + bool quotaRestricted = true; + bool quotaAvailable = false; + ulong quotaRemainingOnline = 0; + + // Ensure that we have a valid driveId to query if (driveId.empty) { - // no driveId was provided, use the application default + // No 'driveId' was provided, use the application default driveId = appConfig.defaultDriveId; } @@ -3886,33 +3913,38 @@ class SyncEngine { addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]); } - // validate that currentDriveQuota is a JSON value + // Validate that currentDriveQuota is a JSON value if (currentDriveQuota.type() == JSONType.object) { // Response from API contains valid data // If 'personal' accounts, if driveId == defaultDriveId, then we will have data // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data // If 'business' accounts, if driveId == defaultDriveId, then we will have data // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value + addLogEntry("Quota Details: " ~ to!string(currentDriveQuota), ["debug"]); + // Was 'quota' returned in the JSON data? if ("quota" in currentDriveQuota){ if (driveId == appConfig.defaultDriveId) { // We potentially have updated quota remaining details available // However in some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero if ("remaining" in currentDriveQuota["quota"]){ // We have valid quota remaining details returned for the provided drive id - remainingQuota = currentDriveQuota["quota"]["remaining"].integer; + quotaRemainingOnline = currentDriveQuota["quota"]["remaining"].integer; + // Quota is not being restricted + quotaRestricted = false; - if (remainingQuota <= 0) { + if (quotaRemainingOnline <= 0) { if (appConfig.accountType == "personal"){ // zero space available addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); - remainingQuota = 0; - appConfig.quotaAvailable = false; + quotaRemainingOnline = 0; + quotaAvailable = false; } else { // zero space available is being reported, maybe being restricted? addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); - remainingQuota = 0; - appConfig.quotaRestricted = true; + quotaRemainingOnline = 0; + quotaAvailable = true; // technically unknown + quotaRestricted = true; } } } @@ -3923,8 +3955,9 @@ class SyncEngine { if (currentDriveQuota["quota"]["remaining"].integer <= 0) { // value returned is 0 or less than 0 addLogEntry("OneDrive quota information is set at zero, as this is not our drive id, ignoring", ["verbose"]); - remainingQuota = 0; - appConfig.quotaRestricted = true; + quotaRemainingOnline = 0; + quotaRestricted = true; + quotaAvailable = true; } } } @@ -3933,20 +3966,26 @@ class SyncEngine { if (driveId == appConfig.defaultDriveId) { // no quota details returned for current drive id addLogEntry("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); - remainingQuota = 0; - appConfig.quotaRestricted = true; + quotaRemainingOnline = 0; + quotaRestricted = true; + quotaAvailable = true; } else { // quota details not available addLogEntry("WARNING: OneDrive quota information is being restricted as this is not our drive id.", ["debug"]); - remainingQuota = 0; - appConfig.quotaRestricted = true; + quotaRemainingOnline = 0; + quotaRestricted = true; + quotaAvailable = true; } } } - // what was the determined available quota? - addLogEntry("Available quota: " ~ to!string(remainingQuota), ["debug"]); - return remainingQuota; + // What was the determined available quota? + addLogEntry("Reported Available Online Quota for driveID '" ~ driveId ~ "': " ~ to!string(quotaRemainingOnline), ["debug"]); + + // Return result + string[3][] result; + result ~= [to!string(quotaRestricted), to!string(quotaAvailable), to!string(quotaRemainingOnline)]; + return result; } // Perform a filesystem walk to uncover new data to upload to OneDrive @@ -4027,9 +4066,13 @@ class SyncEngine { } } - // How much space is available (Account Drive ID) + // How much space is available // The file, could be uploaded to a shared folder, which, we are not tracking how much free space is available there ... - addLogEntry("Current Available Space Online (Account Drive ID): " ~ to!string((appConfig.remainingFreeSpace / 1024 / 1024)) ~ " MB", ["debug"]); + // Iterate through all the drives we have cached thus far, that we know about + foreach (driveId, driveDetails; onlineDriveDetails) { + // Log how much space is available for each driveId + addLogEntry("Current Available Space Online (" ~ driveId ~ "): " ~ to!string((driveDetails.quotaRemaining / 1024 / 1024)) ~ " MB", ["debug"]); + } // Perform the upload uploadNewLocalFileItems(); @@ -4275,7 +4318,7 @@ class SyncEngine { bool fileFoundInDB = false; string[3][] modifiedItemToUpload; - foreach (driveId; driveIDsArray) { + foreach (driveId; onlineDriveDetails.keys) { if (itemDB.selectByPath(localFilePath, driveId, databaseItem)) { fileFoundInDB = true; break; @@ -4307,12 +4350,14 @@ class SyncEngine { // Check if this path in the database Item databaseItem; addLogEntry("Search DB for this path: " ~ searchPath, ["debug"]); - foreach (driveId; driveIDsArray) { + + foreach (driveId; onlineDriveDetails.keys) { if (itemDB.selectByPath(searchPath, driveId, databaseItem)) { addLogEntry("DB Record for search path: " ~ to!string(databaseItem), ["debug"]); return true; // Early exit on finding the path in the DB } } + return false; // Return false if path is not found in any drive } @@ -4352,7 +4397,7 @@ class SyncEngine { Item databaseItem; bool parentPathFoundInDB = false; - foreach (driveId; driveIDsArray) { + foreach (driveId; onlineDriveDetails.keys) { addLogEntry("Query DB with this driveID for the Parent Path: " ~ driveId, ["debug"]); // Query the database for this parent path using each driveId that we know about if (itemDB.selectByPath(parentPath, driveId, databaseItem)) { @@ -4772,6 +4817,9 @@ class SyncEngine { // Is there space available online bool spaceAvailableOnline = false; + driveDetailsCache cachedOnlineDriveData; + ulong calculatedSpaceOnlinePostUpload; + // Check the database for the parent path of fileToUpload Item parentItem; // What parent path to use? @@ -4784,7 +4832,7 @@ class SyncEngine { parentPathFoundInDB = true; } else { // Query the database using each of the driveId's we are using - foreach (driveId; driveIDsArray) { + foreach (driveId; onlineDriveDetails.keys) { // Query the database for this parent path using each driveId Item dbResponse; if(itemDB.selectByPath(parentPath, driveId, dbResponse)){ @@ -4793,6 +4841,7 @@ class SyncEngine { parentPathFoundInDB = true; } } + } // If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty @@ -4819,13 +4868,21 @@ class SyncEngine { // Does this file exceed the maximum filesize for OneDrive // Resolves: https://github.com/skilion/onedrive/issues/121 , https://github.com/skilion/onedrive/issues/294 , https://github.com/skilion/onedrive/issues/329 if (thisFileSize <= maxUploadFileSize) { - // Is there enough free space on OneDrive when we started this thread, to upload the file to OneDrive? - remainingFreeSpaceOnline = getRemainingFreeSpace(parentItem.driveId); - addLogEntry("Current Available Space Online (Upload Target Drive ID): " ~ to!string((remainingFreeSpaceOnline / 1024 / 1024)) ~ " MB", ["debug"]); + // Is there enough free space on OneDrive as compared to when we started this thread, to safely upload the file to OneDrive? + // Fetch the details from cachedOnlineDriveData + // - cachedOnlineDriveData.quotaRestricted; + // - cachedOnlineDriveData.quotaAvailable; + // - cachedOnlineDriveData.quotaRemaining; + cachedOnlineDriveData = getDriveDetails(parentItem.driveId); + remainingFreeSpaceOnline = cachedOnlineDriveData.quotaRemaining; // When we compare the space online to the total we are trying to upload - is there space online? - ulong calculatedSpaceOnlinePostUpload = remainingFreeSpaceOnline - thisFileSize; + calculatedSpaceOnlinePostUpload = remainingFreeSpaceOnline - thisFileSize; + // Based on what we know, for this thread - can we safely upload this modified local file? + addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpaceOnline), ["debug"]); + addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true // If 'business' accounts, if driveId == defaultDriveId, then we will have data @@ -4837,7 +4894,7 @@ class SyncEngine { } else { // we need to look more granular // What was the latest getRemainingFreeSpace() value? - if (appConfig.quotaAvailable) { + if (cachedOnlineDriveData.quotaAvailable) { // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? if (calculatedSpaceOnlinePostUpload > 0) { // Based on this thread action, we beleive that there is space available online to upload - proceed @@ -4847,7 +4904,7 @@ class SyncEngine { } // Is quota being restricted? - if (appConfig.quotaRestricted) { + if (cachedOnlineDriveData.quotaRestricted) { // If the upload target drive is not our drive id, then it is a shared folder .. we need to print a space warning message if (parentItem.driveId != appConfig.defaultDriveId) { // Different message depending on account type @@ -5005,7 +5062,11 @@ class SyncEngine { } // Upload success or failure? - if (uploadFailed) { + if (!uploadFailed) { + // Update the 'cachedOnlineDriveData' record for this 'dbItem.driveId' so that this is tracked as accuratly as possible for other threads + updateDriveDetailsCache(parentItem.driveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSize); + + } else { // Need to add this to fileUploadFailures to capture at the end fileUploadFailures ~= fileToUpload; } @@ -5671,8 +5732,7 @@ class SyncEngine { if (parentPath != ".") { // Not a 'root' parent - // For each driveid in the existing driveIDsArray - foreach (searchDriveId; driveIDsArray) { + foreach (searchDriveId; onlineDriveDetails.keys) { addLogEntry("FakeResponse: searching database for: " ~ searchDriveId ~ " " ~ parentPath, ["debug"]); if (itemDB.selectByPath(parentPath, searchDriveId, databaseItem)) { @@ -5681,6 +5741,10 @@ class SyncEngine { fakeRootId = databaseItem.id; } } + + + + } // real id / eTag / cTag are different format for personal / business account @@ -5783,10 +5847,11 @@ class SyncEngine { // If we have a remote drive ID, add this to our list of known drive id's if (!item.remoteDriveId.empty) { - // Keep the driveIDsArray with unique entries only - if (!canFind(driveIDsArray, item.remoteDriveId)) { - // Add this drive id to the array to search with - driveIDsArray ~= item.remoteDriveId; + // Keep the driveDetailsCache array with unique entries only + driveDetailsCache cachedOnlineDriveData; + if (!canFindDriveId(item.remoteDriveId, cachedOnlineDriveData)) { + // Add this driveId to the drive cache + addOrUpdateOneDriveOnlineDetails(item.remoteDriveId); } } } @@ -5872,7 +5937,7 @@ class SyncEngine { // Is this failed item in the DB? It should not be .. Item downloadDBItem; // Need to check all driveid's we know about, not just the defaultDriveId - foreach (searchDriveId; driveIDsArray) { + foreach (searchDriveId; onlineDriveDetails.keys) { if (itemDB.selectByPath(failedFileToDownload, searchDriveId, downloadDBItem)) { // item was found in the DB addLogEntry("ERROR: Failed Download Path found in database, must delete this item from the database .. it should not be in there if it failed to download"); @@ -5901,7 +5966,7 @@ class SyncEngine { // Is this failed item in the DB? It should not be .. Item uploadDBItem; // Need to check all driveid's we know about, not just the defaultDriveId - foreach (searchDriveId; driveIDsArray) { + foreach (searchDriveId; onlineDriveDetails.keys) { if (itemDB.selectByPath(failedFileToUpload, searchDriveId, uploadDBItem)) { // item was found in the DB addLogEntry("ERROR: Failed Upload Path found in database, must delete this item from the database .. it should not be in there if it failed to upload"); @@ -6668,7 +6733,7 @@ class SyncEngine { // Need to check all driveid's we know about, not just the defaultDriveId bool itemInDB = false; - foreach (searchDriveId; driveIDsArray) { + foreach (searchDriveId; onlineDriveDetails.keys) { if (itemDB.selectByPath(path, searchDriveId, dbItem)) { // item was found in the DB itemInDB = true; @@ -7748,4 +7813,86 @@ class SyncEngine { } return pathToCheck; } + + // Function to find a given DriveId in the onlineDriveDetails associative array that maps driveId to driveDetailsCache + // If 'true' will return 'driveDetails' containing the struct data 'driveDetailsCache' + bool canFindDriveId(string driveId, out driveDetailsCache driveDetails) { + auto ptr = driveId in onlineDriveDetails; + if (ptr !is null) { + driveDetails = *ptr; // Dereference the pointer to get the value + return true; + } else { + return false; + } + } + + // Add this driveId plus relevant details for future reference and use + void addOrUpdateOneDriveOnlineDetails(string driveId) { + + bool quotaRestricted; + bool quotaAvailable; + ulong quotaRemaining; + + // Get the data from online + auto onlineDriveData = getRemainingFreeSpaceOnline(driveId); + quotaRestricted = to!bool(onlineDriveData[0][0]); + quotaAvailable = to!bool(onlineDriveData[0][1]); + quotaRemaining = to!long(onlineDriveData[0][2]); + onlineDriveDetails[driveId] = driveDetailsCache(driveId, quotaRestricted, quotaAvailable, quotaRemaining); + + // Debug log what the cached array now contains + addLogEntry("onlineDriveDetails: " ~ to!string(onlineDriveDetails), ["debug"]); + } + + + // Return a specific 'driveId' details from 'onlineDriveDetails' + driveDetailsCache getDriveDetails(string driveId) { + auto ptr = driveId in onlineDriveDetails; + if (ptr !is null) { + return *ptr; // Dereference the pointer to get the value + } else { + // Return a default driveDetailsCache or handle the case where the driveId is not found + return driveDetailsCache.init; // Return default-initialized struct + } + } + + // Update 'onlineDriveDetails' with the latest data about this drive + void updateDriveDetailsCache(string driveId, bool quotaRestricted, bool quotaAvailable, ulong localFileSize) { + + // As each thread is running differently, what is the current 'quotaRemaining' for 'driveId' ? + ulong quotaRemaining; + driveDetailsCache cachedOnlineDriveData; + cachedOnlineDriveData = getDriveDetails(driveId); + quotaRemaining = cachedOnlineDriveData.quotaRemaining; + + // Update 'quotaRemaining' + quotaRemaining = quotaRemaining - localFileSize; + + // Do the flags get updated? + if (quotaRemaining <= 0) { + if (appConfig.accountType == "personal"){ + // zero space available + addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); + quotaRemaining = 0; + quotaAvailable = false; + } else { + // zero space available is being reported, maybe being restricted? + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator.", ["verbose"]); + quotaRemaining = 0; + quotaRestricted = true; + } + } + + // Updated the details + onlineDriveDetails[driveId] = driveDetailsCache(driveId, quotaRestricted, quotaAvailable, quotaRemaining); + } + + // Update all of the known cached driveId quota details + void freshenCachedDriveQuotaDetails() { + foreach (driveId; onlineDriveDetails.keys) { + // Update this driveid quota details + addLogEntry("Freshen Quota Details: " ~ driveId, ["debug"]); + addOrUpdateOneDriveOnlineDetails(driveId); + } + } } \ No newline at end of file From 8976f16ce69c9e45d291def3188fc4b2f0b0dc7a Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 3 Feb 2024 10:10:35 +1100 Subject: [PATCH 043/305] Add PUML * Add PUML --- docs/application-security.md | 7 +++ docs/puml/onedrive_linux_authentication.puml | 47 +++++++++++++++ .../onedrive_windows_ad_authentication.puml | 59 +++++++++++++++++++ .../puml/onedrive_windows_authentication.puml | 47 +++++++++++++++ 4 files changed, 160 insertions(+) create mode 100644 docs/puml/onedrive_linux_authentication.puml create mode 100644 docs/puml/onedrive_windows_ad_authentication.puml create mode 100644 docs/puml/onedrive_windows_authentication.puml diff --git a/docs/application-security.md b/docs/application-security.md index 7c22c4f13..cf5769f3c 100644 --- a/docs/application-security.md +++ b/docs/application-security.md @@ -63,6 +63,13 @@ When these delegated API permissions are combined, these provide the effective a These 'default' permissions will allow the OneDrive Client for Linux to read, write and delete data associated with your OneDrive Account. +## How are the Authentication Scopes used? + +When using the OneDrive Client for Linux, the above authentication scopes will be presented to the Microsoft Authentication Service (login.microsoftonline.com), where the service will validate the request and provide an applicable token to access Microsoft OneDrive with. This can be illustrated as the following: + +![Linux Authentication to Microsoft OneDrive](http://www.plantuml.com/plantuml/proxy?src=https://raw.github.com/plantu) + + ## Configuring read-only access to your OneDrive data In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation. diff --git a/docs/puml/onedrive_linux_authentication.puml b/docs/puml/onedrive_linux_authentication.puml new file mode 100644 index 000000000..4d89f0dbb --- /dev/null +++ b/docs/puml/onedrive_linux_authentication.puml @@ -0,0 +1,47 @@ +@startuml +participant "OneDrive Client for Linux" +participant "Microsoft OneDrive\nAuthentication Service\n(login.microsoftonline.com)" as AuthServer +participant "User's Device (for MFA)" as UserDevice +participant "Microsoft Graph API\n(graph.microsoft.com)" as GraphAPI +participant "Microsoft OneDrive" + +"OneDrive Client for Linux" -> AuthServer: Request Authorization\n(Client Credentials, Scopes) +AuthServer -> "OneDrive Client for Linux": Provide Authorization Code + +"OneDrive Client for Linux" -> AuthServer: Request Access Token\n(Authorization Code, Client Credentials) + +alt MFA Enabled + AuthServer -> UserDevice: Trigger MFA Challenge + UserDevice -> AuthServer: Provide MFA Verification + AuthServer -> "OneDrive Client for Linux": Return Access Token\n(and Refresh Token) + "OneDrive Client for Linux" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "OneDrive Client for Linux" -> AuthServer: Is Access Token Expired? + alt Token Expired + "OneDrive Client for Linux" -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> "OneDrive Client for Linux": Return New Access Token + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "OneDrive Client for Linux": Provide Data + end + end +else MFA Not Required + AuthServer -> "OneDrive Client for Linux": Return Access Token\n(and Refresh Token) + "OneDrive Client for Linux" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "OneDrive Client for Linux" -> AuthServer: Is Access Token Expired? + alt Token Expired + "OneDrive Client for Linux" -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> "OneDrive Client for Linux": Return New Access Token + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "OneDrive Client for Linux": Provide Data + end + end +else MFA Failed or Other Auth Error + AuthServer -> "OneDrive Client for Linux": Error Message (e.g., Invalid Credentials, MFA Failure) +end + +@enduml \ No newline at end of file diff --git a/docs/puml/onedrive_windows_ad_authentication.puml b/docs/puml/onedrive_windows_ad_authentication.puml new file mode 100644 index 000000000..43b312fab --- /dev/null +++ b/docs/puml/onedrive_windows_ad_authentication.puml @@ -0,0 +1,59 @@ +@startuml +participant "Microsoft Windows OneDrive Client" +participant "Azure Active Directory\n(Active Directory)\n(login.microsoftonline.com)" as AzureAD +participant "Microsoft OneDrive\nAuthentication Service\n(login.microsoftonline.com)" as AuthServer +participant "User's Device (for MFA)" as UserDevice +participant "Microsoft Graph API\n(graph.microsoft.com)" as GraphAPI +participant "Microsoft OneDrive" + +"Microsoft Windows OneDrive Client" -> AzureAD: Request Authorization\n(Client Credentials, Scopes) +AzureAD -> AuthServer: Validate Credentials\n(Forward Request) +AuthServer -> AzureAD: Provide Authorization Code +AzureAD -> "Microsoft Windows OneDrive Client": Provide Authorization Code (via AzureAD) + +"Microsoft Windows OneDrive Client" -> AzureAD: Request Access Token\n(Authorization Code, Client Credentials) +AzureAD -> AuthServer: Request Access Token\n(Authorization Code, Forwarded Credentials) +AuthServer -> AzureAD: Return Access Token\n(and Refresh Token) +AzureAD -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) (via AzureAD) + +alt MFA Enabled + AzureAD -> UserDevice: Trigger MFA Challenge + UserDevice -> AzureAD: Provide MFA Verification + AzureAD -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) (Post MFA) + "Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "Microsoft Windows OneDrive Client" -> AzureAD: Is Access Token Expired? + AzureAD -> AuthServer: Validate Token Expiry + alt Token Expired + "Microsoft Windows OneDrive Client" -> AzureAD: Request New Access Token\n(Refresh Token) + AzureAD -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> AzureAD: Return New Access Token + AzureAD -> "Microsoft Windows OneDrive Client": Return New Access Token (via AzureAD) + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data + end + end +else MFA Not Required + AzureAD -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) (Direct) + "Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "Microsoft Windows OneDrive Client" -> AzureAD: Is Access Token Expired? + AzureAD -> AuthServer: Validate Token Expiry + alt Token Expired + "Microsoft Windows OneDrive Client" -> AzureAD: Request New Access Token\n(Refresh Token) + AzureAD -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> AzureAD: Return New Access Token + AzureAD -> "Microsoft Windows OneDrive Client": Return New Access Token (via AzureAD) + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data + end + end +else MFA Failed or Other Auth Error + AzureAD -> "Microsoft Windows OneDrive Client": Error Message (e.g., Invalid Credentials, MFA Failure) +end + +@enduml diff --git a/docs/puml/onedrive_windows_authentication.puml b/docs/puml/onedrive_windows_authentication.puml new file mode 100644 index 000000000..43a458a04 --- /dev/null +++ b/docs/puml/onedrive_windows_authentication.puml @@ -0,0 +1,47 @@ +@startuml +participant "Microsoft Windows OneDrive Client" +participant "Microsoft OneDrive\nAuthentication Service\n(login.microsoftonline.com)" as AuthServer +participant "User's Device (for MFA)" as UserDevice +participant "Microsoft Graph API\n(graph.microsoft.com)" as GraphAPI +participant "Microsoft OneDrive" + +"Microsoft Windows OneDrive Client" -> AuthServer: Request Authorization\n(Client Credentials, Scopes) +AuthServer -> "Microsoft Windows OneDrive Client": Provide Authorization Code + +"Microsoft Windows OneDrive Client" -> AuthServer: Request Access Token\n(Authorization Code, Client Credentials) + +alt MFA Enabled + AuthServer -> UserDevice: Trigger MFA Challenge + UserDevice -> AuthServer: Provide MFA Verification + AuthServer -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) + "Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "Microsoft Windows OneDrive Client" -> AuthServer: Is Access Token Expired? + alt Token Expired + "Microsoft Windows OneDrive Client" -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> "Microsoft Windows OneDrive Client": Return New Access Token + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data + end + end +else MFA Not Required + AuthServer -> "Microsoft Windows OneDrive Client": Return Access Token\n(and Refresh Token) + "Microsoft Windows OneDrive Client" -> GraphAPI: Request Microsoft OneDrive Data\n(Access Token) + loop Token Expiry Check + "Microsoft Windows OneDrive Client" -> AuthServer: Is Access Token Expired? + alt Token Expired + "Microsoft Windows OneDrive Client" -> AuthServer: Request New Access Token\n(Refresh Token) + AuthServer -> "Microsoft Windows OneDrive Client": Return New Access Token + else Token Valid + GraphAPI -> "Microsoft OneDrive": Retrieve Data + "Microsoft OneDrive" -> GraphAPI: Return Data + GraphAPI -> "Microsoft Windows OneDrive Client": Provide Data + end + end +else MFA Failed or Other Auth Error + AuthServer -> "Microsoft Windows OneDrive Client": Error Message (e.g., Invalid Credentials, MFA Failure) +end + +@enduml \ No newline at end of file From 07a3bc737f9814bf8bfcea0454f5f2a6b96f5793 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 3 Feb 2024 10:11:42 +1100 Subject: [PATCH 044/305] Update application-security.md * Update link --- docs/application-security.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/application-security.md b/docs/application-security.md index cf5769f3c..4c0787ec2 100644 --- a/docs/application-security.md +++ b/docs/application-security.md @@ -67,7 +67,7 @@ These 'default' permissions will allow the OneDrive Client for Linux to read, wr When using the OneDrive Client for Linux, the above authentication scopes will be presented to the Microsoft Authentication Service (login.microsoftonline.com), where the service will validate the request and provide an applicable token to access Microsoft OneDrive with. This can be illustrated as the following: -![Linux Authentication to Microsoft OneDrive](http://www.plantuml.com/plantuml/proxy?src=https://raw.github.com/plantu) +![Linux Authentication to Microsoft OneDrive](http://www.plantuml.com/plantuml/proxy?src=https://raw.githubusercontent.com/abraunegg/onedrive/onedrive-v2.5.0-alpha-5/docs/puml/onedrive_linux_authentication.puml) ## Configuring read-only access to your OneDrive data From 73e53961be8dfa970823671b9dba6915f78ee207 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sat, 3 Feb 2024 10:16:04 +1100 Subject: [PATCH 045/305] Update application-security.md * Update --- docs/application-security.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/application-security.md b/docs/application-security.md index 4c0787ec2..c4b5971d7 100644 --- a/docs/application-security.md +++ b/docs/application-security.md @@ -67,8 +67,7 @@ These 'default' permissions will allow the OneDrive Client for Linux to read, wr When using the OneDrive Client for Linux, the above authentication scopes will be presented to the Microsoft Authentication Service (login.microsoftonline.com), where the service will validate the request and provide an applicable token to access Microsoft OneDrive with. This can be illustrated as the following: -![Linux Authentication to Microsoft OneDrive](http://www.plantuml.com/plantuml/proxy?src=https://raw.githubusercontent.com/abraunegg/onedrive/onedrive-v2.5.0-alpha-5/docs/puml/onedrive_linux_authentication.puml) - +![Linux Authentication to Microsoft OneDrive](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.githubusercontent.com/abraunegg/onedrive/onedrive-v2.5.0-alpha-5/docs/puml/onedrive_linux_authentication.puml) ## Configuring read-only access to your OneDrive data In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation. From ec9c7f073cf6aceafcdacb13ece2cd4ce0c4aa79 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 4 Feb 2024 05:50:46 +1100 Subject: [PATCH 046/305] update security doc * update security doc --- docs/application-security.md | 8 +++++++- docs/puml/onedrive_linux_authentication.png | Bin 0 -> 94410 bytes docs/puml/onedrive_windows_authentication.png | Bin 0 -> 95227 bytes 3 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 docs/puml/onedrive_linux_authentication.png create mode 100644 docs/puml/onedrive_windows_authentication.png diff --git a/docs/application-security.md b/docs/application-security.md index c4b5971d7..e0fad5a24 100644 --- a/docs/application-security.md +++ b/docs/application-security.md @@ -67,7 +67,13 @@ These 'default' permissions will allow the OneDrive Client for Linux to read, wr When using the OneDrive Client for Linux, the above authentication scopes will be presented to the Microsoft Authentication Service (login.microsoftonline.com), where the service will validate the request and provide an applicable token to access Microsoft OneDrive with. This can be illustrated as the following: -![Linux Authentication to Microsoft OneDrive](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.githubusercontent.com/abraunegg/onedrive/onedrive-v2.5.0-alpha-5/docs/puml/onedrive_linux_authentication.puml) +![Linux Authentication to Microsoft OneDrive](./puml/onedrive_linux_authentication.png) + +This is similar to the Microsoft Windows OneDrive Client: + +![Linux Authentication to Microsoft OneDrive](./puml/onedrive_windows_authentication.png) + +In a business environment, where IT Staff need to 'approve' the OneDrive Client for Linux, can do so knowing that the client is safe to use. The only concernt that the IT Staff should have is how is the client device, where the OneDrive Client for Linux is running, is being secured, as in a corporate setting, Windows would be controlled by Active Directory and applicable Group Policy Objects (GPO's) to ensure the security of corporate data on the client device. It is out of scope for this client to handle how Linux devices are being secure. ## Configuring read-only access to your OneDrive data In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation. diff --git a/docs/puml/onedrive_linux_authentication.png b/docs/puml/onedrive_linux_authentication.png new file mode 100644 index 0000000000000000000000000000000000000000..4e27a21442215e4ce536403562002720751a291c GIT binary patch literal 94410 zcmce;byQXB7B>tE(xMsoLvX+qhU58rj2$7+M+H>e?F`JkfJ`Vq$M^W5>b3U}K?cW$$2VL9cIZ>Db>* z3IC+#RpSkEw`Hg2grsG+?#twR^6+o@|Fm#G<4jdsAX^JzNx znPLQ&m^5*dXEXtYL*#MEcUj^kRZ?EhD^VYwicowNTGE~kdW;CWm9#e*pUuBwq(6fn z{vwILj-tfHyy;mY8eHkRnihDL)sM`UgBxK(QU2~>LYC-yJ(UBwqPd)(37KDu@+Ta* zyVdZ-Yso{Sc9OISGT622x@jAEU)PKcgR5fvn|{xTnmQ3^ z&Uo^U+2(BL^+QYCrM)|aD8|$8bJ7roRN);_v$ZJbcVL4tk~2ak$>%c~!VA#uX2d;h z%EE;gu!yTy4#>LiD*6X*ODH?IHmu4~|exa8wOrDDC2OxEC#&gZCvzWqrO zAE;X&l>L}CQpMWT&o=pL!J_6S-#|I}KvQ?rFS9V&sb1lxIn%fF zPO}o{w7N+275zQN>)gvNT#?xEv+y(ck-Ru-!{f-*KNznY+YheuK90va8Ra84v|n1H zovy2B?(Ge*-r(KOYEz<8pnug6o=-y3lfuT@gjKIU20e zYoDfor7K@;bDT`As!jbCUh=D^gnc`D6FHc+`N6n%NOuGVa4qLK_ec=+Fd~hQOuY*W z9AM(iHYy-{6V%qajCv`3RM%$%M6BWE+Z1nzDbtVZc5@xB&d)L(?1MsyStvH8H{9Tl z3d1oiZS?pN`-1q13+PcTA&RPz*OmJ;RC+4zUAn52dlUC=M+rI}2D^@t&~;}qs?oj2 zuS;mp!n_)_iZ;hdYDyxwd2nC(sigjR+hYckZa39de3srxvS9mK29J1!|FK0T%(&fGU%XD(p+A^XEp%%kWI6x;+owb~B@ zw{=M?+s>;8y?0w%x%OR}8X2@-827sme0;HR-hcjb0M&{WgvQSoy z&DiIOUDCW8Hi^RvmGCBOvDYBu1UO78W*_R;dF?J9cZ&N&u zk>So8$hZB)r)qSRXNz@v2SI`KJx)%c9+x{zkv3&UeKU2ZFOR>fO<9g}v-EVaA5H4Z zuv*QI_CUU9I=u=bzP%dCFkT)bcs@?QxVuaA;K5{#U6qQ~`FHU43QNt{#rg)@KL4d0Hwwfe)wA{khu!+1sg|Bfr^oS_6A4K=I@f+( zh57BrD7&*gFYS>M{W!P19h<8&7Z=0p_UXAs4>r>>hNjuLgo}xj9nK1qA!@eM%=*%? z^*6bu|C~2|I^-H0EoVCVA_^KRT3R$T%d4dzg=+c8o0t1X8^k%%QlX(%8!KaJVp02X z+9}k>heIN%j=QrVtaO?QVt7WwnJUkoJ~gdSg48W|x{+AhpKLpcak#p=vRdNtSWXvo zJ1T9J1?Uj8VSKqb606^9gxj$Dtl!bkVm|KP;u|+fy=5cZZcNBwZ#CFlr;t-LHRa~A zvb_9g^X@Sz7uUOcYomD*!ws2DQCByYk>iCk!FVjnYWy7c-Q%L8&zV2ZVojy-haKoX!xb) zc3jFi9+#mIWy{_!cA=P5*UgWEgP)tO>0VH*?I*FL)HmQa9`--YuQ69isjIu(DwfGH zTYgK;h;DY=#dG#;uOMo{BW}P`jfx>+`iqa>C|n*@y8GrRugI&pw%FGJbxvWISBI8U zQwxq;*!aX=#4|_362(!h8FQ!G>=#`WsZm!F;;yf19 z?ujclVi8llxVWI#s#mOgWsLVTgeUHRkkLUllk1G=bzY7jk>nyJzwdlJ#->{HWI2?G zTjFAq!RvLwY5=74nMi?_7TW57SE|Yrcu_S!Y|(nTzC!h;h}|N&RL;1YaCxn#6<$~0 zQKrQ|iBeKz39Z1x^V*L#r3NLxo)9uvIxC zAl*PT%dDhH6@&b zl)J{$1Dozl%w%#fKs=O?vjc)8AiWzK=JUDda?fiS08=APRQTZS2a20G>~eMc?B62Y z(QkHRgje_#OHf!-7~DMuC&kV;ei?MV$S4wV^nC=}t{Sqk``)nVGldLsmIoMlC8^4h z2j zTfRQ!U0MmfDUU>n&kz})yjFZJJV`@m`FQ8}`jl+?c%{_xc(YG2GR{fdrgNw96645y zTZn!+SFP2b2L(H`$D{kSRM~TTIWH$g4)P=#m6+!_(;Gh9gQWNGMajqhnh^bu+-|!O z&y-yp)`82;}&oj1E74 zR;^m*D6_`v6M>r_kzgSQ;(s_go>YN}A?7$bU0i%9X_YsI!_i7hvBnq^p4r>SO~pm5 zADi4zC6^J7Kzx-7(wEqM-WzAj9BIugFHb(H&et?N+yWh<`OhiV9ymnaS3Y}5ad~)G z+vAwf665B|+nvBSvbXmbY)H%Lazfr47cG&DpV7b=YdSI?8SuDZetN3R5Q;k)aNe80 zu%v>1d3nW_MC7@3ceH6=0?X;q5h>cqCd4}ToQk$SKKclGOxH_Vr#}h4fQ!alW+0`3 z4jU!LY3Ble*kStEuiqw->s&@E!Pm{%xti+w9JUmIq^Ll|R}%K%z}hWzuXxgHpKo{q zp|oU%o^_5p_{{q!hlg9kS)6;QkX7X$2w&)8VK4$RtC?rJB}`yIuADPSb(1q6`go57 zAvjCf7O%J0pY$#&!p~r4+;}@yq#Ft!49|{6jvI-}NbqwhnlllU>-^cV8L^j{<8?%z zH{1L~ESM-F)g8Q5)n?W35uJA=T`;XbQTUFet4@V^ygsZxzqAk1|MlXGJDDOkWbFxz zt?Ep(x*-XVU4y`#ViS5yjjFZeIf8;S#V2M#B34+Ndl{8&l;QG_@H{ zdqflqnI?h67i{ioX_MmoU(i7=WVpthzv#0Gu$`%Sak?0Q5a8bi@sT+&@o?Pzsqm3mCKZ9{)U?6OUb)pjk9e}FX`O2l$d zsMUGm!jWnz>G!Q-^yOuOo9!|>h9)i+rZ?X8r4@LDj+FY4xo7cFJ2OsElGgwvn3yc7 zV8z)sju`&E0dl7^MOqtg8%du$v6(7WI-QH-8+oEdKkJ`$80K}m2Z1y73l1LOuwS1| zd+P+ksjTeO)|%8*t)h0O2g2{{{1t_v-f?r3tj^7}yv5bw;wVfZhkPV6V3ySsL)wl| zIe^^pbbjV-J^a0UXpaxzQeR9th$?&%i_#)vYQLF2o3Umva45g*3Rz`RG@dBpN>55W zdE$OFl*6p_sW3w%3zw+q%vnCQ;3AYWTIHbDs4)A^eOivsVP7pBAj5r(8q&OQ6ARdyGe|Vd$1ZEh zuN*JC=qT300h<`j^HRRzwXO3RZ1fP>WMrQz(dX@hoLN5)BjjA$+NyK@A+M!yxzB?4 z{3~O}op$+(r1m2C+9Z6|qp6w~apJMGT_*GOuZ&h*Q}T{97+S_eV#W@di!@#A-!|P` z*yjrq<=#0=Q(nlAwp%8X=d@oh4+ucetQV*mT_#lSaV{_l!Y+JtnN5+yq);bfLvh9W zW1^`jjfBJH;GKJv;J@=nKI_KNXCiLbq0t4k^V_MtrrU=!I99C|N*vtsSEgirpC+l1 z&>0vQBAL(6+jw|*%qMQ5CN!$|Tfco1o^~^>5@_IXJ))Hozd49&8!oh!khz^47zn#I zuB>eRMNmw-TiAwr!yZMHC+P8J#U>>QeCktOjvIvXWdJM<;`_uLfwi4Z+*xq&=odeaOkZX_8{#G61VZY|~s;o^&XF*aL! zjK8sqcU)s<+8+@oYB^WCQ|>U3l2Pv~|5lr96#Mjv6pnn9G1uz_y~)!4BqOCJFe%%t z{I~K_7`_Kmp?H6%WoT6PIc`txZES#*jV~Ownu z>+YaC*+Ux2)zh^~9;5Hoi~_PRQLN=5DA+10TT*F)XFRdUd-KEZ8#Kxl zpI$E*ws|58ThgglMv_N^sQ6zIPq&}QQJsAd>Sw^vu(pFtcs71$Es9m!?;hDrz>u+HX(cS9;Y73_m3qzA6@3@ z7S|t$B&ZEVa4;hpj8gXD;Lu-%XTJuVfcb3Lk>$R29f@G?E&<9ZEiN1PX&T5so^-Mn zBEP5}5~|Dnp=zy6hsk_LB1&3X&01!=qxCoZ{9o?glm0!g?XXcGo76Vd0vk4R6!5Os%gh@&ggeP%PJbfD3!&nzkJQq%q&E3($ zjY7`yYf3hhT6t_*JP#-uSa-A--Vtv#rRJKiz*<4`XYpPW!%z`}%%i~9qGnK)xp0*o zaKz30C2UYPB)G$@KlK*&R4~6cJt^BGESxhUp`Em62+W`qpwe#rF)dLI+4(I*vb&e}^+Kn6>wPa0w zYs&=Lh6z}J=s#M84IFtFr{&81vif14!(<#6Hr++yG#6iO+?=vWi;z`U;?Wj7hy+0D z2ml}4bacJwh=`E8AS@ktMGy4peV|FxuUcUOua{4zjMeYmCph_n;FY6Kiq1S{GFL05 zx8!31x#8D|{aBOIFP+G0xuNQuKG!;7)%jo~MSn2`9j9+5-Rk*B;K!a3~HH7XrrdR7NCuP^+N3S+nlO-+rS% zxyJ2;K(p>7wblQ6er_&V=-Fx4=~yR7zCE#pR5)@AtNl`zK+AY3D#%EWG>UX4i%RK7 zX$nT>p&PNiz?CROs>vsNi7RPlx7LpZozQ!$zBkvzn{$_*>HC0KR4chhCB$xM4_h-c zY)qA*Bi-Gw^gLMRWiiFrU0htuJ|gsL?B2@B?6saK^6kZiD?;{`SCZ0{hZ{p`pPJLnip zmVKc{Q#gst=ao$nm1Axbb*akPG0TDhQMku@=|@>SAO_G*nM30#mOTH1_N^` z;XFBIJEmu-ucQ)7)*@a@o!O7}Vhga~Fgl{;WN;XrtwYDilo9@KuPgfc%cmdTQ{!&F ze9C{!Ddt;O=f=mUv(`UH83IUxvR0J(FC}0-N^Cq^wzmHM;Vr}S=YjXBnVGk}Zj1*3 zELub{d*?4u9+F>mJ^C3hwY`6_eKQFy{-!)X&*LzGoS%~#q9)zh!)aIoXJG8rqdfLW zx@pYbeZ1VOj=ZUTCma6S$fD6UyW#fcV7ySD*%gv3^XR(*QOLdf6-r%A{z{9i#uKPq zZ7qJZVZ+q;oHjvik*V6fC}pK5pNGP4R+y+{c)3$l=7FA}-k9f>-q*ln(vLy&!|YAx z8#cYyE<7elBmlU*(q}wmT<(XvpK>y!rOygaq{~axH~mLCQ5X6e}@>SY8MUTB?RXC$TY?GZBvFq*_fkTz0mHXiF!Z+5XH>C6C899VSQ$qCL`tk~$**(M0RYLyV+ z;mev%ASTo2LQFg>8M@zO6%;$K&Eqqw*eMciNP039o2ceLJbjk2+9&bdZ-6K95!P66 zD$2$;#wh7+;SfBDGVTQb;bqm$8gjjMYJB8tES)bm5m-cY21HSmeFfy^n5tX6cw8zG=IGZ zDjHYP)%N@{5;I=?W&{9Ca*XxdMM#xXYk9e$0A_EJOP+UvFdc>LvSjjwaU*pILC9bh z{^n%Kz>rkS#Do*DyP&)SKK>g&9nVU;St%!4o54g;6X4^g#J6Yf9MhF9;&#Ojh*Eg2 zzmMj14Gq!d1ppJF>FPkylyD;^>kVz0ag#)qjf~7dwp445RAL)4%chFP)a|Php&6g{ z&z&MN1ETp^&%ccN_gJYMC*hdC+9cv}&#c!p`n8bk;W^JC-5<*G!~D6ZGjGP~P3?uN z`|?-{--L}|_L^D2W703*e$^vWCNzG1DJ`_q{x1b4#L)P%O==?_Cs>&3mzrcgP3tQ8 zJhmTS((T`3RT@e3QfkFCTOv+XM`sXZ5k$-nGCb!=q)%V*Nc{kKm&lgx;UX?|3!g>7 zx_mwHvM2Cik=!kBy21y&53{4S`5>T}O2#FdU*avAb(mC+$Wc&S^+GdOyhJAGAdi2@pG>Hl_VSa{RHBHBD!$kC63)mwC8RERH3~9JK{l zP06ev@OVyALl0J$u~Poi+ofb`0M@Rl;IM8)tvrEJx<(IS8fsYiB_3Qso2ROVa%TQ_>;Uv8(nfH-ht5))-xy!Kvg1+HY;b*{okcixJMiX~ zSwG`)R{N$@wGY3klsVMT>XV|~*0^zdE|m+NWy+#;F+~y=5XT)D2hrACF&gXtCCf*?_^``y^&8fGZvYl zW2-=!n&cdM7KW;;0QveAwz%=azZ|J@o8qm}CyP^nz$FXbpK}OWI;c^@9_vybkNbj9#Wj<{Bq@Zt|Oh8PgC__P7!^+Qwr{b2PZ%u?R$`^pSK{ zHph*>7OaSR+15_xvy??li~02hZ58K;Jyg-Q7oZs`hrY2suK4y~i zTTGcN2D)Dh^XOme!T`}^KR_;UyNVUAw3wo%W_LNbQ%p;UT|=X*=x19|gnWtI@M)Zi z)7jZs_7rYS-flJPju~EC5+9ocGU|rc>W|JLM9oUm)7@t+aaUY_vA^q6&2kdN?{0Xx)EV1C!%IK&-3!o zRIqCEowYi7&X?`jj!#PgDbjNn7vWPA5G-_FDi;xakDN#4iYYn&7_!WoGgrREExoC* zvu#qhlI=)>??%zxc}5FV7X1|{Xl$g=SiYlEuomNI(tqxc&Be{_Qpe1c)f&ga#%1K2 zJ{VPyiEsl%i0kntJ|3R%S`ZqG+f>-tC=uWJN*9ftLP4H0|5ON?Rt`31vJMEHk?<6d zNBNrf$&ZApQIhXh)Redz(TAz;l(z;5%vefEp8e#VOmX4*GHO)MeQnR9lxtvgm`aTL zuv4->Ci7fp{9T<`47BpYKvqCD8D4lK1Hx;Y*Qi{5S-B!r*^E;KA5Qo6kT0@pk)-Y{ zoXSCApa|)lP+_rJg`AqSAoY$CfM{)P?b@c{c$EIEUnDq?{q6N$#St3ZBAy(o<~iXz zvNwzV6tss*Z9O7&*;ja6Y2W?38%zm;Dwg=a3}=0WdW6WxM8LSIJ6;hXVHo<(|3ha? zSjG}(N0FSRVlm%VWd-7YYkWRkq26^{wLtWpZ2Q)u>GozJtZY;|Y1#y_<7xbCqE2Al z;Y^a_NZ{$qJei%cjMdo0X+MkY(*llJH+=dM46+Xc%T7X|U_Lg!t@X6=YobL^UIEYeP_hu> ziZ}d%d>juEciy)jQH4|3l&~`2W0b{%V{_RzH4&;E?2yg8F6JN~Y| zG+v&KDh%W*JrMUtC;k}!@(H_(w2%-fr`}Xg%D2Tb8 z>9{QxwQ9*~^PC`YQ}*oGWAfeNw;@}|cNQ9!8s(___+yWOf+A;MOYUNQF#WLkZELnn zDj_azW_(!uFP5w)t+N;9e$cg6?<*25yk4s9O{;dq)@Zu)HO|F*}O-rfA?EcJ59YyL!W;msbPESwH5l#+8E2>wNRh>)Te9yM;uC(k~90wNX zhLgtM%d+A{U{i6@j(Hl1kkBM(TNHbOxZJLS>{6dra!T?Q>Jqu1k z6V&^XLpj(NjiK_X&0c_>)6F_Vm;{~Yj2G%ZmX3iog`Lqn$dP%b+1i-e4{|+#B*>*n zNrI%&R4g&nupA*vfOVD^aDci( z-f8W;&n(3GgW0i%HRlPax5-EPy+)7A;eApwjNUzozOPeE3>$O>j zz+61j_Z{G6Qz4-eUR>_)BRb9~Kp6HCmB{EOkJFq#n`Pvb1PN1P-g|9Xoj|~kIB|E@z_ZlyWx_1B(lLEkTF%T-f^VKvM@XhKxmngg$&F`!^L*+^B;P;sO1PZAPBLk&}VMl2v6!i9;kI-BzIyzJbx(EtnnDtj6=>F40 zX;3+D1G|}tY9l7F@%qxObXYJPJ!m$jxkP`BIaGAo6|^#BE>J!^+MWt1LQ`%u8xzKR zn%}-T;vm|}V%W{>vZcHRmH%=*NX*5bzO6xq-wz|sy?aNT;_1qjrKzpG;CW5-$?Zep z@SXwllERxPJ*f~vDr?!J;rj4w?a=`hZ4berM5Sxh($W)fj~TrZSr?!>(k~&79xe;< zT_f)_(E|}6tgd)()uK4Xr20N)_s+ zp74B_{!q3_gqCOO{Jq`LNujCl^;lwM{!iXmscsdBQ~K8F>A5lpVxdvJQLL%qWIFN6`1JFI>0VP)y~7sk0mAFqp95^MPawf5jVkol6LzD_k1bfX zQ;0t-^uKd&)%HI;7CiRFmAIVyL|4`I8m*NU=dL$b=#IO+a}_k!piBcivf-v?6iC2$ zy5Jhng$K4)Kzlvo(d8&a#uU?Pr$iM~eqd|#IROPX-u&b5W)f~9ris;Po`OmO zhKq{}Xu%027dEq2YtiWik6H2q34(7V+KU@m@MKw|1r-T5{#@!(2pHE9Tkwev48j+kmk(@L+dlG^z{6B?{P%C*7w^LiqCdZ9bN@Z_ zw+D#0&HRkG_kELG_^a>29Qg#pCf$9C_7d*SJ(~xppw4Zq^_rKckl3c#fr2qNq?wMFHX~YJo-v}OPNz6!fJ$#0&oI+qa-p(7f0h&?YV|6gHl~ zZbv@$y3*ofHNXEisW7|NycpqM&4@OLi0n9m)GvxOYMvYQ#)Hno|l|T113| zb2WBrDco*>GX}Vn%U1LCRM;CC93jw7Vla~g;L(GYXwu*Ho?UDeb8~Z_k3&?7Bg;{c zkdTm(@wi=&j<+UWpMy(WobK(dtxcp02Ee0WhX-hTT`S44$kq6#x*V>)diCo3;zC$h z81!>-W|EqI&Y~9`61-(pqet}Zj_0Dqp?cFNHT62|_>eHiz z>Q(jiT$Me3C^-J8k1-xRcz}nO5FP#103XuietrOGgR`64-h4x1mB8imD&K&Bj-!o{ zJVrXY5H59EY!1l-Wep8l-lwK2EO<)Wq%i$LO!_c7FMr>Oa#x}dMHP)&`wh^$6UnoI z$7(V8?Tb%Ocem&VFh1+yOtAtkCZ>o-ofYO2;^N|Z`uaP_?5}upq?11(YrFq|ryI%k zK|nufU{2AFNR49XdrSK!U*>X(pJEIQ@LD*mD2}17c;_mx|1n+utrT3OQK+2 zVzvdtdN2MSey^BjKUyFnBbTbumOmgQ^w^zcugohiADx@y=H*Q-w3;plorb~Kfk8oe zt3w$gug~+9Ak2!GF_3e1ubcXu9Ah)H%@`V$N%F?Hgz$_;kUT+=HYRptWW>kEM@)Ol zz|9n;=F_LAm9_dE_YaJsK*`<6$l3X2vDea9q>bI-MnYcCq)@*je1^sF=ELLTimsC- z$aF|ICz5xRTrVyR?)~3`{84N2R^UvPHCQ=LW@dX}_E?O(y$kS-ndliK<6uWe4p?)xeNIcx=ml6J89!~};A0JF@zj%eYrTiV%~E5`sexRWx$ifnjmGd!`EpTIwuBaBw=J@3Dh`6_=K7bl8d= zGYbg|TSFEalF`*Ap$mWd)Q{C^ca~f#VUk)q3D|0g=Azy0A^6-}T%tHwDy4=!*!hyw z(uJm968(k6B_)?dMzbX1x}qo`W5$qYBBAc<=>d~v{KEYFu$FW6E-&bCu&|DIW+tqN zkJyk&5R$_5+F_b)eoL!3g5hXiJBYouLDyi4RZL7w2tFG1;IHz9M8J-BWQOtc#Tp%usyy{iT3j!FdBr2HLeK%>gO)H@2k;p z_pcAas6cVyTa4_V0f1-tPwE7w-sd1_*GoA4Dw~^g1 z-Mp6@w(IPdX+)nQz*czh`&P*Ha|70bfx&WLBAZJW*md{N@E+meZLY215E3RwM=PqS zsd=>NP59{5>5+LTzV4jb$@I=RW(Qn-bE+(-xY$}pM+amgp3ChQCH`fU)YQmGNK`a5 zm~7q`A!=aRRDWBx!%x_|16+r_Kqj@Tl$DjyVfVQ0HFOxAF!!WBH|$>RON11tzMLi+ zosJog$d|H|q+c%5tivWCNQ#WK{oWpOaA0eDFs7pYAttHQ2c1Xs8AI&5Ki}s}e0^u8 zDv{Z=)N+PZYdBpH;bg54zFIFrJPrU*=;IgUq@<*jlqN^(Llec?Yb<}^1)%d_w}8mh ztg-cr%}ct5-+2{;jdW8n16Yp>C^pwOG>9cqv5F^gIu&_=(iN{lk*c;aMHWJ0Y^-pM zm}O)tiJ<=@92`5|f|h!h!%v?+bqX37WIEciJMTZ)aR4Fq#;5OOM20tnm^Vcl+2Px~ z+g@K^UsekW4!vv;EG%q#db*gHnB-t-Ht*Gb>-xIMCKe&yL{wmag~HzAY>geTAUqq9 zy`{+h3x5dOejARqEJ?*H`4AUb-)JSUR>UsL5d*xpS6AnU!&3H$ z4Q{7AASCHhU%q)K{eVu^?rM<3VUyXg`;ofgNEq+U%b2MA%mWoI7e976E`tyvtEo~y zQetiRpJ8*11{jDBH{QiYPU_?)$n%*H`DKBkzwt?FvfNZbRTa1RwW8wJZl~?Z#g!GZ zW7&;^rb){e5R{PM zy3&o6Uix%23xrgq$ zRA953Rdp7z0uUHUealb345-K~=~G!Pg<5eu(}hNlVO!X`ol>O=;jxs?Jm@oMg>d() zK|w_U4*a?C@ycd2wPY#}H#2i|+@xVgC?PnRQQ1vl+cNOnPf#7SA}?XGQ(I4$=Sg|I ze|bOzrE8@7q)I>6knj2qNN>3f7#xnVqV}67qF^NUw<`TE9wDOQA&WU1sNJ-PR)QCeOtdzv}9j<>uyEFEkeL6w9iril$i4*MnXoo?i%sNHj_P9e`|R@??;WN|DB6 z7PC=nuS@kxQ{c|oKauqR;k1l_iFuw5Le=nKxvNrq00c#o;|?B;*G5)cJZ77q@)ik` zPBY9E75kI4Sddb&|0AV(i2MNLPfkv*Wg+4j8K9e+dwax$gxsdXnL#)V)W|iZrKJGr z{rvo1)5gZf#{+^pWiae#PO6pyg`tK807EWz1Urr{EN#M;Skqx}VONltz+(qTc{MsR zvMlho%cQKVtaNlz4{XoR&oha{g1|uoD3smWyaBUW-W|&hO&3;lIrKqgjK;Z+;E z?0{SkAYUlIlP~5LkL}44=O+q+T$kHr=HmqqB|`Y<-$2@5faJbgw-=a|7Ah|<9~lt= zh^yfnAJh;)IN$zGIN=wyIvB+}-(IwX=-F+&+yz2pZ>mgWmK^p^VrdH;d`jzfzdoJ- z{oq0G->*-WdUIMI1BadA0{&aa?nfxYco#11K}AFV?z5MF)|S!H(FO(vlsxY+`TpOF-%JGtsyBbV31h?b^N<(xU(IdaK*2hbJ^JI-^-{vhcNdx#78Y&+ITSbt z%yn*d*7N#UCB5EXr|mvw-P!j!LXaGzl*Irf$FmJ1}}Ji zetvYY%K=CGD&8)~!p>y3DmiUFRcg3&@9{fZNOm{`k7Z*0kCcX5Sw7wD8TuxxQYS8q zBJA(+t4K1NmH!=ZO9FOV&>x`b#{q2#6wjB$S{8C~VgC@@pUio>J*6;9-pWU!vXW6G zoq=ayY^+Wjdphq{Tu>0K926J`lv%0Ep;1W+vJ$R~>{^~cJ4_n#?*Wj7yhxuLTHJIq z-P-CC$DrL<56&}&pP8gzzxdwUNK4rXSOX4}atD2VC3u?8ul{1RlzOsj!p z&WR>3FJ9iJ`-B{{s-L}kKKzN+cxGBmmv?~)|EENLw*wMPV`HOL3_30jo!Zy`(#kQp^nlJVEoJ2sAY2v1nmn&X z=``vb3o|n_6>_8%vL#EDxW1YL>H>ffYVDVQ{VMc@E@q#wi#-mr)gKL?%bD?4Cnde7^NC<>Wr>dZ&% zpgCTo2D2^a9bO2vW29hY@aCgOj}TW-UsY9_g>mH`DuPTS3;$0(Tf+gb@Q@#i*>u=9 z`wXz)rIjm>nx!Q@HQ;Mds`vHwqN1Z4_sDSl_zFfTgHmO^5E>fV-`V-CF3RMOmk`%mhi8srND;4TmVM<)nIIbX#tA$oED3I!fw* zide(prvagiU0Yop-UNAN(o9Lsm!L~gxJJ2d=l`9xfVbH3dtaaErKU^zQ(gVr* z&6_tNcr2f8fSZypsi|bQB%9IFV_`gylGW1EB4A1)*jHGeR5QkY@H3yJg~F1k+2^6 z?jIf;m^j_^wWHy)GFDxl>_i1Utr3>zBmjEt7^n-dsi7DT$=6r`m4gx{Bzs>L6_wE6b_;WO|_1BfgkP}bk?KL$R|(fDu;F>@b6HDP0k=<3$Y)%5L)6+aG0$ zMS!;I;n+VXn@p>2kL&G5kQ zKWWAPor6Fv@-q0J^vJP72d;KwhrfdtbdqtH{? zjE;*F5fK4eqXuNSrOIg^hsz0VFD$m%4|5rJo4NjnLns;q=HA1 z0GUFG_lbG7cXvJ42T8PUemM?}K>e2=|FRsEXhx?VhX}CSk~jes1zZa_I5=N_e{wQ1 z;K41x3n^v3ix6wyPbcmOwvo>bc`V%I|-M_KCB6UHsJpLFDpSS zB-9Gnw4i$)T;4DStL`gx7%tP&!AcL1zPK!Aih!+Gd%=M2q(3;|KVoFFvB42@ptb3Q zL`1->hgwq8(K$Fd4GatzLxA~%jDnKk2k<@Llb^|pKiti%a7p0-W<`{k4Ch!W5gaQ@#;N)GbT9#l9 zA|fK{oc4^=$G~g@3$)|_VEtf8nITM)_a7WkACw%R$)m~<(a@5?!TlK_jQ6Ho!a6!> zVPP`Be*dSVFobPu=9$|M`~(aP3{k=Mjt*mEV~NYwB*fl$CUCrUX}l1e;AXq%92m}$ zu#C}M(bO|}w7DIumQKLm*%k0U6R&u@+;k*P+w-y`i{;csMC>#Jv=fZ z_6lHXO00yqxCy2gUx%FA(>M?cD1hD+xC6pgpwWlE253K^k`!oSJy2*q5kdYZco(Bh zwX!Ydga!KDcMz_V4LrfDf%@F3opLTcv2+X0-_a_S3MHW83cQ+4`8sQG->E%G9?X`) zzo|F**8CP~YT0i@778R;%v6TN02D^WXWhOy-ol~Rcpns`f5#^Y4j!J2f&v&RJ!TbP z7VH+&*KVv5vs;84r&J5aR0W96n4_WB?vP@uSBb1K<*j z(vSz7ZP^6+U2tZo=Dyc3BqJw3AlV$w>RIlJa$&*C1&5g{%_nxx*HW82E-R(7et+nL z)aRa8KRRx1ZnnRuy?#Aa{pLF$LbTWtl9IiEhN&ilZwd*Qeb}fbwgR>?5Hw^lQMD2P zURMG=4-OCW)cE+kf$s{G$vHX>g=BoQr$IHSPAIoLsSs?DR1*l-NCId13zu5M>OTOf zAJ>4Fqng|FBG50eQwe*cgx}}hP$LNmiMo=*x$|SR;?goh189)_r3J zI3a=7fPwrLmM=Ne9{};4-vd1_J**rQ7-T8((m^c2{0^#Sl(QtuFu75qvh6T@{{i6t ztHAj`Amh^yaiI0}LGgqkRvGEsn2}KL_U0Nod-!2W?GGr%a>zaHy8|XjJ90zhp6Q(G z>L%_+MDR=DG}mQdoGt-bF#}B~XhAk3sK>q+0I#C6xn~>`@l}K9*YG8-W?} z^Y_0vKYt}6LP19MqsiGS&a4}^EgRyM>Bvo4NuxY05+l-VMFGD(&hT%;`{%y&Mh37O zW}M=(S}~y&6$?`62x=oZ{{s%oM90$TrG;aD&GgJ4$-Orge;lSb8gVOEH zN{b(gpg$@SA|l8kQlKp9>OH3Yx6SReH{Yk8?(tE*rL|QbSR9J+z~N32vj8y^Q3ZTS zfP9oEZ`p^SHAQ)FjCucG8juVKWT-ejPZe(hgfyN>UqC=0cLwwmHUhOpxNDHh(uXeP z8XVY<6(+}{{LX8~@!a&2kG#d`>Nmvo?aU*c71-wM&&Z&-1xW5wbdVN@wFiLWJ-xZy zTZ8-@KOZs}KR^M~dy-Msrl85h2he3P6pxD|l9(1`EI)$2p+6}=1t2s{Ry_bGjEsx~ zm?QP^x!(jakGnI7L*P}@{$I3VI=Y2YhFqJpt%+o=G(Z1a*?w7j2~rUK2a}Vk^!h-S z3TKFSh`RcpBm+%IxVtNqVfu451lld6UH; zhQ>4jvn?=&-n%8BRyOSEj`eepqR@VTz+&HF$vE;x-s>YwhV-d=0LozbOxZy#{pHJe zsadSGaWjFU+}!ttyOSk8^+N)0-Y~@wbGt^z7#kV+2Lv2mP)H|*@6yLS24WE%0N4iS zf0kIO{OmIrpA)cD2|%m5lMiBIV{7Z`*4Ea_4uB;w^bmY&2y?<}Ks(N85`-|r-Iv{Q zW+g|gP)Jy z4ZP+KfbuNr<<+UyZN103HeY3(I-QhQEYGs|y01HiCJVjeky8^8*`Qtx&DpFEw_SPy zv1lga)`2TfG=~s?hvl18xa3#zy37d4k9Eq9Vpn;`MNv%bL*n)+WEzBwTmP^5XH>r9 zIP>?zcR}*ty}t#dAp>AVM?+g7dvr^>e@6W|=)V~QwD<;i>PoS*fm-^CSYLT`*IZQ)Z1nh0E*Xv00xZ|pwZ#E z%Yi|~g@wa%X9PO;(E~TuPM~4a3b&sHf@Te%iYjIhu0y*0G#nir`S|!ibHErx#q_xM z;|pN#WWko?Rg;NQ?;il~eis}${Wo;)*sRd1JVC9D^qRe4&lGI$r~k>5_qzlv6v(JaCmhpCZu@oF z^gBPxyTdKOs!*%6*jQT=!35uDV9z6x3N_)oW`RdGmc3dTxN!-rb5ROkU*AFc?esuB zk4kYAV25mv_`zK!=>1Z);A=4cUH4U_U$(&VQVjV&ti5+U*X{c^uDwe;$*7DZlUvdi95krBzvh%y>v?|naCXsPRa-{0Tk*I%Dcm%QJvah}I< zJlB~|1D1pbT8cT~zfQKHZtuNyx_qOY@hrZ5fS)gE);suT2ht~ zvY}wZ8L*t0M3;Mm34z(WxENGLZl3**gG5FO$;CCk-mBt*HJ4lwBJin774#gd{9~aH z+Ya_*!07n#}iCwk~j{vP)T@z}?Un+aVJ zCS-x(hD$Ga|KrC_yE!4x%lXzVxBmvyWBR%a3aN5eQ2(2B*u%h3SWs}%dNIBId^~>^ zfg|}l%xz|?|M;imOk2H2d=H6`;6JX1O7~wnu>YN&Eb+Rkx_aZQ^cT@e35}-g&_(U~ zTa|axCR#p9Hd=QDcex^DySstoKjbpi~>&cNK1ZCr8bDj2VPXXX3k!GL+>@GjhR|X z)uuC8dP_K)CQ<_F^fjKzW~r`G4@%GVrl_MJIeF*nTcG{FWO$K_t_P?qHpxdTy8HON zR=AA@AV3~%b+N?tG3UdMYoGODa(_^SLQBq}FUlWr*>0KMc-@ZC3*_acUl|k}YQhaq zYVY{)uixx&@OlGYH|&!H;fq`)QqnSXUqB_Fo;bdFe_OtHSi`5hlkXK6ffO)M2J%Ep zywJnzPP8ZN2M+WC=y9(;Qyk`{iur$Kad9!WE^8Ybda1zUDqC69-%yC3YVg@C(DQr| zSP)Dp2&j3h*d`@7$p$+<`d4gFy}iBAg81=Kti{SQ}j0x;4ldP8z21MM_Fam#50h%b#SPd3jSU z!_13^Okn{N-$2>na+74f^KZ5(Q-T1<%LN##V}Z>@R?@3B8@0`=daKzo65kyS)K`D$ zCm-@~9WGdp9c;3NYMSfJi|c65Z!%nU6&U+^tk#I(M%Ddby}hM$8~5uIOf)1IxO>37 z6Zgfu^iGjGROGTU$huE#@5D!kbp%Y|;q2{7C0J@>f-ileu&@w$EsRX585s#hjPlX9 zoSo09eAsK7$Y6RXYcC^X$Pz(8L0xW@$EE!gWT6dD0`_Kz4VijUa6#DMOZNBIT^T(K>iN4K0t`O4Jjmm55XY_1(~kBehr{BryOkDRh& zE(gPwc!fh@o6%=C>|N?YqXEuETcVHka#_xMuUf1_j5XqAG^Ty@ib=4^O7#$xh9zvr-M|YE9!O@*HAZV4d z0E7nF3@K?gxko*AN+?|H{B$qNJ%z7TQCnxa7@t?KUuVcoT$h&aXnJbHU2fhQy7|2c zREFpd_H7|%^N^5`8$5Pn6+sh|lT`v{grEowIAk6#ii;T`MPO!T-o5*!eDAZ3b$Lj+ zvAK7nhIpN3{#35SGN6&IOEHp?zPjlH=)pJl>>s(QKpYXlB_$;#Z|mrzchY9FcGRZ* z6XYI)q&ydStU1;;&*gmDZ#2H?7NxctH?dHLb!b^`$IcOlkF&qp_c{-AtFyH6;^`d) z5XI#gNQQ3FSi!Vs&qoJpLnL&1D8JbjtbX-JQ9o?@)9y7S>@&jcCu^P3R})&i-7ye*>u9g06BT8+O|GBp&uQdz^yaP+ zZT{Q0ZzJl=NNL52gUVj$);=L~QSwFf?5MvfMb6{F1A}LZUz6qJUO&>^YY79h^=*qGxiyTq-uJoMlJ3JN&KpX4PV|?03 za4#nc&t5*h2@UX$-zviW*)Jq(zopDLbXOT<9}N>!I*xaQ7`~J~2(f_^@zokHm%G1% zxtk~!)+f&Lx8(~dwhm6eJ4V$uxMXLzai~)u#r6YR5Al|hkj_ZxAM)&JXZ<0@KgO%M zXJs1rD^UfpSV;9M4%ylYFisCtNP#|W!%YAmn>k0uHtZZ`xsfuM(KHFeyI{}(&3r<- znDKhcwY}e(as=z|9J&9;K(1Kk_zLngou(k$F|j=l56ZZK z_#w%0(Y8Peihq8`84(8F1*b!I1xK2r($dlrFLu_k%a_`kh*?E=9qfE%s_+BmP5Y>^ zvgw|mANS?2Wf`!3LBVs)&9ht7l4TavO|*ufO;77vdDx)`*tW~FAt5-FcB1V{tqyuW zI%eir=*257?&KY6t8`K=_{JyQRr_weW|iu~PSHg|B!n&DX8X*tLr32gB2(}H#OaB? z9l9u$`YcI(;NXPOcpx7e_rk0Mj@-dLV7^xU(gVY zj9kg~Q6BwGu$-Y0XI@?nsvb+db4Dww=O82 zUV9lSS=bdosjkA(PL{K7j>mj|h%=P8om=`F7cbjcd>{Lz#BKm-U%SBA4J6H7zrJ`V z=4kpV)O>t`N=RmOw0=XXF{pb45!2C~2fk=BExn0TcNMp^0QyKTZ&Df;I>~@No9gxg zHmJgwJ~%w=diQQ1Kq7z|n_YceMCRzx{#kl-?i)m4=|xvF%nTLKYsRNyu(DtAV{g5u z^z7_WBwQ*+s;{lQczZo)P6g<4MJZ=wRJfDD@~4Px`16vu>kCN+G-dO-EC;^AU7$lK zHo+~lQjIAvF4IeGzJ-?_;lBI!?u|rb1M;6vSqo{)hF-N?IOD3R5+*ijzpb+tmk?G$ zu{Aq0Lj)g!HPO?}9O<=)UG?&-J#Ih?L1Ih+R8G;Y5Sg+seImMOaPf@qCDP#$v%`$V zrDwV}V|*kslDDa4ak)^zM!KDDM^-<%Ewo^P#G=cuHXZ3a-o{`+CPTT@&AU%FY!}P2 z8}Ia9ujP0t_na}pL92Or_UV&R%eT5&F}!?vYvBSv1*hjEhc>XXt)Ow+Q<#=hct6^3 zSV{X5rN>Ljk<96yq_@ggwWXzT{{5Y$0t+TyZ(dY#REvA3upv}z0Aj1jFUxLUcqmsW zWZR)LZ?qP34?k*HFd}8sw^aEB|8T?Z>5`(NF(|o4QdTT@cslsh>WBM3vtA3ZI@Luo z^SiXLQInC8@#GZ_*)Le6Zab>gRj_mUrqygtJ2&O4inv{3p|hVd3G! zU*EO3AESK6zx!C&PU>GjhuLTPgmge_N@b;N2NM^`^EKMUw;>_vY0^kQ{GhpI)3rq; z4Ht>uxrpTBc?F2G6%`BclNaeVnRm~KJ?*W?E|CeBW?iA#WIvsHmYVMg+v4lju8JJE z{=&C-xk*=^PvXKJDuvb8mwDZ^Ke@kb&&`wDX8yq*bCRhRr>i}|<)ox5mu}A^T}k}P zw5@GxpWCS(Af^1r9dcz{`ocl{u+W}uI-3sfaZYol{-VyeVdcMmo+$ShS^oTzaCHR; zpx*_sP*1x&hgbu2Pm|Qm@OhS2eY`_&3DIEF@KG9{dQ4z?qQ87RpWNu{ zmMFZf5N*^}`+9nUXW5|=A}B5)0UUCuyUD1#$KR3#LhvMW(DAQkZ`baJBokX-ZGO3Z z=hQ&BAm|}ov=7M#OifLby@QvmZ^e#2HQ6++mL$60)2EtRd$r);;2>TGbA)U`bk%9& zf~aW+@bgYiD?jwT7Mwcu!k2v|&(jBP2xxg#BRZL}zglc!FGOi$ag<>~rVk4Y)20p$kjiMDJ&Qo< zy|v#_Qii(nL(S_AQVP#rF5W1`cL4ZVl7Xsk=XJDGQWQ4mFg#Y&vp14|ua5CIm4lWo zko|(ZxVY10xHyJ^YnP9-N5!F~^N8y(e|6#!k zdXMPb6>+PFSrGRlINdL3lr>mAIXOJnJn{}oG zN*gRs-Cw`r8RdtXl{II8nsOxG|&3=r&Lv=>wuO9 z1O!AUv-1}YXAfr)f&PzbJf#F8wwt=iFm4@gvdgLKz{>)*1t_~$CmKtEcSLTdZUqXuFRZ zI36SQy6I20WRc6+EOA}z`y=@B>RTJLXu`x^b^k^5Kfh?3Qe<3V*4sdy&ir0m0gLzh{(2rW`x z`R&^`4=Ipv0~JA2mB|@#ye6on@y5`mRT+lTuSZ~C8bFcDA#v2<*`H}pU{yS!)zHW> zdqNpDj+QgVwR}|B_W&>VVs6u8UDz?I#Y%$&n*dj)G+Ihb>Rv^iell$G@%e(5)@;cP z;{%^on+VHpH$4Aa*|7J&cVI19Js2=Qe}5Gj85tF_X)xgXifTsp;!C(j9jjj%9slDbn@o_QvvY;t3X`m2V4;7&d9I zWT+hhFqKKq8EDpW_5~TFMxSOYS}KFAtn8*OTfDu!+qY=$U`>_?CF3>IS5;L#d$t)X zD8?>!%++opWcAc%&YTF9=!_tKEBW$U!zuFea&jdB+y`+PP{89s3kqs#tzblZK*wB zXSnPn>lZLz(Vp9+aEDQGb?Rr!j6uWH~b;?bkCXo7K` zhdH(!R#sL~XuJ}of2;Q$n-71umN7Y0@3@-GBQVhRHHEobbD8S=E~1w1;17_NidTP=U19{GdePzu({%V1DbYd8p_M1tuW|cr3dqX@x=>)^f)|c3LL97p4jV3e)2f-DyVOT_&PoIQP3hGyJF){Z>2>N9!6$U=bDHMoA?h z?J&trFC?+N*=fPztMj_VwcM|^Ug&{rr5VbH=;7Sif~+7XN6US#^2sSM{|TMV9bs!t zFs0x75T}~T1bAJ`1RD#)RZeUO~=Qm=A$14fQuOu@j&sF#2d~jgR!U z0Q2ds(or2k#W=SBGp3KhHy-Per*R^?yQ%397Qc&%ijt6!ShjlGQ=NCa_>Y(71C++x zk_{OJ`}9;G&He5?ROCmh4A66i3i;X`5pjBT#S|7#>_y8qMy`XeDas*I9ln=&8 zqykrGjF7d`s4CTCoQhHJs!L*IWtBEiDSQ{EChexbb<-vY+@1~Q_i4UmM@$)Wn9~1uHp@**v;8^YQmzHnTg4Z z*~USV_u@x;Qyu2LvfI2oP;y}F7p+f|1Y6Wx=EgckP z$HUfZiW{$GqO$H0Mq^I9$D0kNlRK+rG#0ReTnGr*vDtB; zc+f>szP@|tLz9!`*U1?5E2M(;&ptDeG)F>uP|9n3oO_G;1AUJNDkkI_apqd6n zC0etps;XMvh1#o4A`d)1Wor-mowu)FPlRSz9kZWUp{8fBi-$*(Kv|~vZb9_hB!f5VUQuh=XX#{A<}6cu+9UM@n>eWSL;UPsjooUoECz@lxHQ%_}i6 z5n`GMb&V(!_2&@#ho)p_tEb@Gk?dpz2BqrXY;(*IJQh%_lm-(CYwJuWbx_y|`FmYy zdv@?$Imv-dTy7lF=@{Vx9>pO89UZs#p7-u0!mn6miZW{ELMG+D8|RFOg>CPq4tMQ( z>a$OPp8l9OzyACEgK9q6^0Df5?=7>M9vorkkH%!UYRkT|@iArGtJK|S?(EOc-KH0E z_}syRChd=*1I&_BQDI~O?EO&MF^;Wynu0sa2zoi19f-i~jbf;`;TO?cD_nYp<>;22 z%ipBMoGIgM7OgZ%HczpgwCteZ*6pr}s(}JWE-p@vo8@DekGecOu*T*l4UClf5NW~kjt zNi*NMu(%i}bUX?_!1Yb^8)>U`r)giSSnU%u1#xj2$@)r|xQDYBEa8@F*=RrZVa*P% ziy$*Becz12{o}O*DW_R>P zD~q?M8=|l^dz{0N1gi{0Nr$GZ#HBQ6_T@KzzP?;mL;9v6scZG7$*F`TF{`D2rQW_W`VqLWg?T|s|!SioEW}h5EYa7xhNM^-I%3PCmmZap` z{Qj+tkaAK@d|U>_B`(1=Yu)n`g`b=ju;cDyr`i{$?B z=0!Ry=3$EQ0vpotYrCz#@O~iw05D~8Y;bqpf~U5+_cS6ckVwFDS!wBqTAf zL-cv+L)cvH;IfCgiRAuET&c*sD@`{{Q}{6e*YT1xSjSrBk=$Q0uUYzELbCt&nFd!f zSAvQE^-+BmNXGvhCN`4*t6tGPB-6%uLRU`eE?@p@$ZLo)uoLTmn{fNajWQ}K7rJ%M z04F>IQgVkS*tsErt*NKNmlz1uXP3j zc@vPHKbMUbX+!Vd>>7ef#?HX?&_m$GE0U3J>54}G-FXIxtL{jd9BULW2GUl+E1 z;bHak%VLkz&YnGsCKDBz_Ue-%j8Y%$rYF&4cfl`%D6i;8_YC$9N*~>)(GS^Qcz8Jc zMd4v#g?%()MP?a~d9MRSm3Ata)2ipAa2397 z9kMH686wjFMr1oNGE>ZH{7!09)_cf?823Cr;Xk0)rnr`IE4q!T@tkR^3li$Pr7Fhj zkRsyjF1B>8(b)FZ{{Zy~-~FHBrpMw;<7~sx3ClPrL@OE(e=zUpg-{8m%1I1gd;)jR zwpkzP#gi>+rj00CZSn|Im$RUF_nGPypE&+`i>zrX$H*qJ{o}rp30S9gAs~f<(@+)N zmvuE2th=yD$P!d%o5Pi#y+LNky+EZU+%0?_yUsnwaHOS@4*yg1Fl%G}DYbY7P&7bx zi>D{vPG=5Qs|sv$K!6Mbkla#qNS@96Ls8Q zzxJ?RQ!SC+MZv9f`*LlU2BAek#~t{Xg0}LHsZ`IoHPY_50#HEQp zmTU3Zr?gA(NRk1clUM)xJoGUvc)>(=$6`bFBH(gP-@ zZ2Ho4B^b_zM58A4;dXO9iir@aNgdS2_;(mP1;4`H>)IWn$W8U`GZM(0?%o|5c`s4% z68jD5`58b&YarZU^kNPuS9FvB>7Otc9_rkFFmS_9s8?m5yHI)Qfr8Qd$MQk49zbWp z+6vWIWr_fg^;nJChA_J0z867_*Te|#H#SN~<($szBiqmWZNRtxbed>s<@1Ng)C3e^4`5b& zDwMV3;Q8ZFgQBtKcd$NO)qyc%6@t^<{ZH81SUp0 z=LSR)h&d>0(G<&GYL%T$+(FlU-<1j+46@m&TMUJuS!CqCXaP2>@sNztGsxH*XnpGe|1{$etzx0 zQS^X%S)v%z=?^Nha}Xa@`lA|vxb34Fic zHM0T(GO86Y^E*$RgriE^U|z} z**kwGoQCpm$g`I6Ndm<+UXxV6f0XewpJxp?KLC_*WRSbeKM~D*kR>Wyi4id|xMRA~ z(&eV)F#+BG!Zb^TB};v2-UzxbHKhAj`_vZIp#8@8VIcFyIfQcE4z82sgn_kBW)`Dd zynSS{y)1IBm(z^7aK7M+LR*Nq#S1T(|CR2tBfw`Ct^b$MC|skC)qLS^}5 zLRF1o1B@VkQn(GnB8k`$VOc}|T0V@w($9wEAp>cEJOq9+jL6?9NR)39I}H{nsV}zt zfvyi$;Bqi9rVKC8F`OHwgr3tBBNTo&JG&`X_Vl>jw2@)i0;bo*e2Ra${N}l|dZQe4 z*IoD(nVre?slv+=Nr|o^ZL90`AKILy1THN%-|`qUKmTMYx;@a(>Q#R^9Mr%)vQ z_ECfiYF6Xp@Hi)_+;Uav&EI}S9wCjHrS61J*&Qn#-x9Fq*UU)VBzw>l=mA?GP-4oR zyZ*nX&KX^Bst<8v8}1xC^y3r%R25l^G5&KfCG-T)RV`ol$20w@%=(K}ETZ_ERJ^v% z1fb(3L%N)?s<^2GnUq%F4_Iv^8F~I2E163LKIxfbtjM%Zkyh?9?m~`ETp$TaNv=X4 z>*x5B4h^s~Oll;Vf+DzQ&I41&^h|JJ36zM&YYV}39$I7KacQcunix3xB=G~ff-^rs z%KD{M>pN|M8kH~#XP|6{RM{>c0?`emwj<4i?<4*s*hTr$Yeg;!@W_pc}-m5);g^ALR3<=kO)7Bkrq%r zyFGZol!fqwhAgWTt(x*JQCw&5ON#NwLZou8zLKam2 z5v?KT-o4_iQW($y+q|`#>W8|H%noFHv~p4r&xoIPpqQxW*Se&RoT-kS_QN`Q$1OJc zwF8a2=MBetfpPk?(Ub_J+JO}7*Z2C^ zjry!vv&ORZjYxcN+2M6c+D{D$5O#V%Y8pp=$Ko8^f@0th7WefuFc^206%ese9&UMo+e!8Y# zY^SB79PL2sNld>y6R+~}BFqdRg0Z}4VKItR8feKqitgX-(bjvDOh*h5`50_6y3!@; zD$2{xnRgUmXp@tZD-=O55Yt(0&=C~W4U|t`|2rlp04sYJsAwIE>R8I%JveTe)1FCG z$41ZhrM`wKs~6W%Q|>zUUyl`fpxP<)^&#o!4%;kNV0mCw#TU<43vJ&i77gs&RrRXZ zB#kMX3Uf2mmTd3}b{38Wj)o=HkZu}fni+8T!Rtz+gUZ@N0#1iBcF68h&B$+w)cK;j zU8(BH15G!X`XczHL)+q}+T&Vta;663V$__*28+>MA5M91n^bO9Uo&pH+$8zn<9en= z1c|qm*n`x=rBd=K5=k8b(!X%nj9spxCGX6{2;DB%4;5Y7N$G&s<|aLc3OUN7cW+E6z_b^O_?X~ep`}qx=Mzi6+ibd!F<30Qef##- zGI-HEGHB@SI#im79Hv3h$tbV*{T-_t>!FTNgE5c|3Wf+PXAb0=yB&WIP>CM+T+sAr zP+Kqnnv-hT=Z6Mut1M)4lp*}=!~1CWj<>GV*WeJY<+(wCr_VFk3u26yF}X#CGxi!i_1^riQ~j?8+5-HhGQnApX{6axjf$F
1ZQY6pE;U6(9sk<#J=JCg(A!V2(o~W+$2g&t66XF>=iu376Y2j?+sawjue>*=zE7 z)nM5X(_SoDsV0yWGU!}OM=(#{Ru2R|Y&S1EwA!Q@fyfkG7lX&t#jzz+=dR7t z3&b5Vfs_O`THGPGw;kSUhc{R{^|R3~8!T}f%B~iQX==3O@00Om)AjiNq}h|aVALn* z&_yG{m$Q5wQIbr@E?;O6Mr~o=I+&J8J4LCM5X$A+*61_l@G&>-s_XU0yn2Y;0@m%_Ln=J*k_UCQvN3ybcW z57=J^-o9vDmdNzG!gyw_8$A&=1g+hD~)VIpDFq@#PBoLt@biE1z#Oc0~hGSY>=H!`KOzvqj3K5o>mdOm== z@x}1Lp6Onm;Y){UGoEI$Tp(7A)Tr9;{z)nY>*?{Ply!O*)zm5AIf#8rQWRj}S zNMp)emN*6Ban59Yj#tn`;nTR>}={h&4G?oDZybDN>du35Fv>_nWOdlqUQv;^3) zTyEVW+66=6+y%H#paKqs(lZ<6^*|MlVG09T4*vv#x#7%6di+?VryJcS#|u@o)j*ETIWB330ZahsjZ? z4RQKI{DHHtmah;^SCg)ohd5iafByWrh{k03?L8qPF84W&0@=2^8tSw4tu|@ZSSfNu?`IYyR{Ag?Cu>=^xwv95^C#!IGATy zHoDW*DcQ6P$|)pQ{{WNFsLj66#Ur9m?vf&tr@QRoOT<{f=?Vcpv{1~@IsH;N-EhIE zSU>{^EPgEqhxdGy)DT566s*57N(my#v@wM=zzVRr*yJfv}<-`1fz$p#*$K4eaN zw$ozwFytt$EqX8Ec0nsML=>6`M00t1c=o3bfN<`2vn8} zQMb2bSmjyqq&~|bATEi!$(_(+#mQlJ`*{B2%p0C2FGBAKtvd39Vq#+@Hx%Gd2drQH z-GXp8fNhZw-J?sK^H8f`v*Z?yHz9)JCPYyA@LM2tNiFlryNYe1t-@X9LGjg8pyB0&vf2J z><_?U+96=DyI>6^pJE-_pPh~ZmIF_=4;ABpQHU=$G*(_2jPYWWhlunU@<`_d9MD7!`*v=#7uQOILAA-O-eAL*?7$0f7+=29Q}Tl z$OVd%In%?_h8dk451I{gCWip7XF>PWes{-jekF7jGs|L;y}VF(YW;RZIOUB>?1P1v zv3afzU*r`t26GRbuUh3I(A1KPrSkde*LIMk)}dl_W%f%N<}M&9yZNJ_H$=_cayR0K z6Afm6{ZG%U|8v$D^3m2LG>WkNcX>%*hbmC0H23xOJ zAkkr@X#IdL&d)#>2MFlGbT=QC)es#?&(+`Fm6r&r+fnXGRDfiCH_v?o;nm+!%skDX zUHT$9UcYM9Uy^OE@NjC&6(*7ZzLZz}(cFAvxYbwPX6}aRK$pk9OlJb_wQ-i!Co3 zBlB;y%>g;_@l>?;Qpa(K=a!6it?4}XSbN~MRAe^*@cbRqSE^n72POmWS zL3R($Fe(Ev2{V*km19|@Zyz0NmA_Fo0~4W`ZbW8h@euaofdcqH_R`Ub{0Uk-G3**u z_Wv$$`Z10K%I9tKL)}%XIw1hr+G_wW1u9NJS>V7!dN^K~vv?pm1pG&Elw4E8zgH+p#)% z1r~yRa>?&+0{IcQjT_XahK#1=AKh5V-HeRg4?&r>KIKF4w2OV3`Lg?On_EFjm#7MP zc5+)zJQGuOu17OO(t+^iN97~6sw!5Dr`!R-EIH`iYu#k4FJBjw(M)D1G$3s93JOh# z;w=yK_NL2FIR=qOL}a8m2Vg;cgA_xR>3vyoNrhdV`x267JewaMx6XT`P6P?4*`%f@ zgEK<3M~+<2 zw1(n5JK65w3fj3ZVOrDs(2>#-y+h7$R(H~c^gCh8;#5+Y5zEcPV+pZ(`4ih3W2_0x zJIoM!=fYGFU-HM$Z$f7WZriSfylDiz=3twTM!P%Q$XaKr$ud(mYg}kiV7&VD({IAB zrC$a%9f_-;*KJvQ|B?}@a2E`2K5CeC*x_h|ifd4~X{Khgpl+jf>!ITS zPDWp4Od{DOXC#jWh%}-Kk(9l6S&~g{p#)3T<(Z-WIvBR@x}>P&~qzduG_bB zIe(tT{l42{=S8hF>tK0^-Ui(T*_?#O*+aQ*;PtbE_NVk9fM=w))On3l;e6eCz zdx+ifkdcuQfWC0fR0`~I-tp*G`*zlIqG(u?+M=NA9D9C}AM-kE=j^=BJ9ue5hT*Z- zG~Zk9a|Q5IpZtTi`|+I#MZ(y-oZk#WD|b8Yd6e$q>1hX1wUWwx&Vs5xnC;;YT(;RV zjtR>a7hQpTkm3jKGJCDDUe9a>n&2MB_;tA(?=DTQdf>f$I3@;+;y0qVL>8Y7`*wxqUdx2FF0;b&T6lI4nU9M1x%7dG zfnBg8d3~C*aMzm#ju_ZT7Aojm-0sfc{gYAVK8w$KFlPD#o5Q z-Y)v77o)j~d<&dyCY3};$)%6{l#9vcUtDgIZ-HHpcF`|>JjMMkRyB8%|5igZIU=$hY*f* zw!C&lT6Mlch%s$G=Y_d}ero;dAC!AQ*58zSU_e-1T^%xjiW$oPygU(``#6&CmWzv* z3yfkI^CDtlSh;oU7SgB(O@OdKjTTQNweS8ZME5#iy@Xq%AF0gh($%vwx|~mA@y-IV z8fDtC90AE+Wa)+T-h?3zt0WPdQY?7#A8Ah9@jFIHv5eJz6^U{VY3A z9De6>X?77QlRdag;JzVtI3Xy#mX_vG;)OTA*^+Rg)t*jF*n01wT9?c3+1zSf(xN+2 zb*xam>IpFf)S%yInJJ-W1X(-CBci3Lr`~=C5AJ zkrJq$w^;mcaXNdF4vd6L_&3?FDbZYPcoQej+puGP=kBJu`lrs_BQD6NaXr3%Y}?#U z;nVAWvOawD+O>tF_w0T*G5tTtztSlgJEW15b7_SpR#RrzPU6_yTx9le=Rn!hss*RM z?P#&a|nR4mudfdcbQ;bQy*?Ix+4f{w)+c&ICLy5-B>Y!`8`i@L|#=O#Y8)jzqx#By&h-|rr&b7o0aVUn0DLE0UC4}lYQ4u^ZIq&+r{|@5 zQQ_+j@V}*{MXuQr4dFQ}fEv>WBM3EZS+SBb>YRg!}?1 z8)C4@+(b_951U0$P+Wchyu{b8cd9UkO=5l_EX}`^U5Gu>QdCt9SR6wryEOLUXOqWh zpW{%7h%I@^vVKGmW~t*zL@qIri5=Z8b|L=G)S{*G9%eMYwcBa$miirhT``j9YB>eu zD)Ly1vrE3$wx;MA#6#?tgG&2&UnJ#Y$#+odpDTrxL_(bHzptq3P@5!hfY2@JXGu7I zR_vK=k9%0}p$UD#0t4w)Y6nQje*#-GwKBjesciSd#oWOs^d2%UJ$*QAZi?ilTJla} zCTG#!d+wbkW<{Z5lezU71}VTkz^aYgZ*5h-vJ==X9bNvXsI76uagVyCRYVKnz3tguONJ@%xPxdjte*L23Z-P3ry__EXC}beCjwV|r!Xk;|78)gzy} zUTA(BFvW$9m8J?79>OS0LTWZ!<^!xoEolh1jSZ52s*>4cnYDGK4%KMNsHX^rx_vNAUhk9fT4gXLSS<{Y&ABLLMl zbmDXqBp5!He4s2Nv)cvOYB}d?;E<2cD`uD(z%wE%dv7sQi~24^`SJ6s4V{pXkhvf; zK4+&b7r4^%5za-M%jAFSzLdKUM4Hni4C1(`$(y0Za;2*j@#16NP@CKUkGaC?;M$*1 z@~;*HL%aJ%D?|tkmT$L$DUnO7=pNwYj$BMg@cICh^4P?tSNY`sZh7+e@lik#RZ-!! z=xfEm)gDrRWf`}h>NVBbLWsB;Bn^f-7S<&jeyBsQaQgw|U-+8u=hR;R{UtquTVJU| z?Uf}fCi2Vh_ly(Jq9d^6) zUSd|>6+t87T0_{2>i@BY#}rvCFv)Tw>`p@d=YRJyI9w7~`N)U5W(;D{nFloI~dse44n-SB*N&$wU%MSVbEVAmvtYZ1T(GT1@X)w^^jY%Ze0s5jyP9y4^20~?)E+x_L1H!i7_rNc z=gob%7mgm!&7#vrZs(It53@Gw82m2HA_Hi9kj5^z_iPqQNxG6#hMhAb0jNXJZKHln@IU0@ek495fp}+5i4gXp6i` zO^RSGI-m9LpO`dWL*kr0*=ba4mk=M1_|e^eh&|>u0BD*{4`Q~AoT$bFX=rG`x<&3C z0pvEgALg3BUl5upoxOQBkaSQ&$t^Em4VR_<+k$_LFC@=9yf0Q(P^3OA24t_tv9PE>Syj3!?`)}!PqM{N6BZ3=K zx6DSZSZf7)KisBhQlptPYzC`pmfdW}VIDrZ#33Xz0S_!hz{9#j@CNIqc@h(=!raSO zx9P(EC%@T@vYBt~JEfqYZNz%&Ao!(eB%mfFvl+R;XZW_DAQh+;?16^e8O9UV;lcsd z_)Uo-KVH-&y`oQ;wta8R%0SPZKKys7I`h`DTc@qaE1B#v^(k_6CbRZk#I^)wHatCZ z7x`tz?*?@~9o2L-V(`j|sT{5VIu@cmc#6aLDjTCMUn zIe_r3{-9}}_H(~3*>?+8$;I=#74Fu>8qv8Qe++TqzlONh>$5}L+vGcOK0E~{aGItW z0hL<%%`b1nRsI;>6t(+D6rJDD74~CeiD%j<{=}*?sfN!bhi?>?8#{v`I@ZGflv5M& zO(S}2v8`}r#aCjYnKco=SC|o<`n7$2Uk~9qOK>L#2$iQc5o*A zk;?Gn)kiEt)Y~y6cEGM5kJf%BDoAdA)l}+k)^_vk(KJxFE%ALWKH`F9BwP+=T@6aj zo`EGbXJ6qRRiB;Rn9uz&yFJ*U@QZ{!^ms;my%q_PJ3CHmhQxUhBjK*L*^zJ%f8)+n zfHR&8iV>C-;e+e&eE--ntFMR=`yJF~G9V4%?*FgHw&YY74=+0#o9KRI$x5fZxW3qz z@D$&6b?wHzv$4rKkT$x~q=w?Lx}__bpzQ==eKh&s)SNv>$m#y4p-nQDNZSJNjRv8g zO`AoRGz$w_l-td+v+OCnwLUl`O#f9^=@EM5#Y3_y7Dv z>IFIw$|1Ux%Wf)DA8)j{PquJ4G&RrDB~G1k&%`kU3M@@_(>7jPoEctl%PPLM235Nu zAG>M9%~54%!e_JCA1vYousnAk*jC5eSnT|~z0XtC46xIq1Dns*URJy#{5!5c)mxg!$Hk!)*=?i*1+Q~+=G8uKGeJ)R%^;y*KlJ(t3s}$^y`|jTfgHYbiNv?or?@~R$3Kxk${P`*m_J3>=ST=nEJO3GS!F}{s z$VJ?gtb;KEIxaQ5`6J_EZvRHe#oMttPxLAC_mPp2n35TE_x?d?SC=kF2x%zUl>Ob#>s&u1&_TDe(5EG}-dbfTlW| zXR$8sVfduSh@)^?TSaX<0Fofdj5BBSg!B#)#y@0+a}7lY?DOR*AJaL>ko3c_)}AxF zn(UW&YOd3X!`nN6F@{ei%!4%*9TY#ZiqB0Lxvk}iRZp$LjBoi zNtk{F2mR=M_jWerOw$XFuCO~s-0L6rBuzdVav-qgI1zp(tm*lQxtoO_BGFNry|w>O z=&=e%!R?&!j2us&K8@r>A8c{xw{xd{LJ3E`eu7O%@@UHKPWxo4D<2nV^49A&U9X=C z4Y#+fSJHUD4Pv}+Hv~s6kpGay|JDtWS5reqGD~7cx42B~_#95y+*#{E_C={IU9+8r z2~B^^Mk4gX_>rCNzcZ#4y#`7L(Gbtf4VIcvVAF%^^;!;{cN4-gUndC|Ex~+|zkf zbk(C824!5!wXcJl&(8`Wf$N0Ab`0ToSQsI=fMV#`8e15B5k@&S+^qp2h>T2sf6z1^ zJG)#y@!mp45Wm(Iz%XK{;d&WE-0Sqn-bmi*mg}KHwwlY`X->b;BRxr77QaB*>ssvl zy$ioN#V=pu%XBo@>8Q8fnGWR(cZ%bk;%SwcT<)YxD;o%VYm5sWQH(f!L}lew7HS%| zJQ^Wtcbehx+JKU8`-dAol{U1Rh^fC7yb{3u<&9q&Csp|Hhr)&l9B5nZcoG>H@Rbm| zEzD8=f+M->&P{>1sr+g;@!Ae{Kbvn78Xha)O(D)glgk;r9@7cQI(#$x0c9Zhq1v`S zks5P@{m0p^`r-}32Z3kt`Ap@^&Ej)jy7J)ECi4|G^52G2s{{{H?j1oPjH$vTWDv9Ynq|NYsv5FFT#E?k_Z zj-+w)Ev&w#QveM)*a*5(VMx%a9H@++9|Ku8XuzAt%W}LK@GcF+Lkr8!tYr{IP0tV; z8Chh;RZoYDvmbit>&mt+MDpDW@cC!hPWtwCLw?MSxzGnB1=#VgW!8=-q;U3<29j~1 z%>F;Ly>}p%d)z;Mrz8#SNJ6EoqOzi_R9042BpP-}X$YaMMRwVg9f^!6v=kZ1%qS|9 zmCUlgug`r`8s~X_&mX_@*Ez?!?(4cf@A(>FvxncmcTZj7=+UF#rfcA&c!3+WcURRY zPqp;WGwEmzMwb>_5g$+jnunUzoF#BuWR&%4hAhc8oZ|fARJa|>@hgj7EXChF^YohY zC>mrAy9N&o;lcidVG!1!xuf~27$^l{mcY!MhRFqH`1CUt!S&?j^XG5}gK`->6>4{g$Ic%knFep;tdu&@H44C>a-O8YA`Ga) z!@pe*UjfF$0I2O8tP!mf&I6G^bRun#qriK+emh!qSL~rtdbB&I^eyq_0#{$HOe_Y3`1McVey*K`TC^mdv zG5hwE^jv~oIZJD)pDZML!pQ5JuFY#8w>9**IQ6@zh*|U9Mg6(Ycxbp<{ToN6y4EVu zrjr$zPLdy4P8N#O*r^zxsk%fY71@A%*Hr@`aen^!l?Um&p+5;*$I6O|YKA%Mp6bk; zn=j5`$FeeatH7(JKX1&u=snQno!lSFQpd&1+nic+s0QVtV@Xb#01%C7vy-J9Q*koz z5<+(Xdgo4dcIl4qa|Avy%16rN6g!Q7L^QEKuZnKrHIW7_ZEehbu;FJ^Jq5 zqW9&u;#}?~B-BBC9a48#KVxXouDOOy^?~Dbt(omE0eO#&j%)+z!8wOQ+M|?5lCq@#G&GDzj%j5M>pB4gO<^r z1qB^+apG)6uUHyDtM&5tkFB-_$P3hf@FO3eGwOY~`|#`fuO@*NPE+_U_C2FLI0QA$ zIIUboXzJl&w_&Q06liTjAR-X3cZrz@7k~W-+$P3RZo;ffmNZ^M zC71Qs<$l+7<9+C5K$=MIF~q)48-H{ za_yvtS0XWe8*x}1)vS?h_v=01YcfL|A7NgI8;7(&dsmmu;u5hh$#~%wdj(ht6@Sf& z?BLBSDDV~*K^ac!gxJ!(Sxp>yr&O}t?T{c8q9bKsP>Tn5JnGr0oQ#ula&j1@LOD)f-`~C-@IdU{qy-+&;c|Lh=imK6BVIFB>65;rG_INZ}4tetlk*)e1I{okMHP{;_!c5_=W5L;d zhl}Ldw=vpyU@SLh6Ai?6SAA{!dE<0YQ;GP(F||#d=R2n^{jc9SJI@9_QjXZsVZ`L& z=SL{~r6SAkaoW%}`8haN%ZjylJeH;2eJVFU|b z;wO^1p_=3KsfNAYRK@|P7Me7TY<*tHv}}R`aDrp|0hki4m0DlPG$G_J)8JJl*0|LN z5*)z>i+4W0G)uXW#BFLsey@0?6#$rR+oq<~vyBJ4iLqee^hil{XZf$5Y4tp)^vPdm zPXFPlLwQ0IkVj4_ZGt8yL+Q}bM4 zw}*?y7(7Ja*A1b>k0k)=IItC+8F`i@2Nh?~e#D@GOc4M5*99%;e?Ib){5OGPlabo? zLQGLfDFeJ+=$Rh~Ikqk(A0)I-Cc=^E&dP8k83{fj_WP@{HigYrF!1@ zZJO{=ucVz#j0{C*3#n~LmcQlRT_ATD@$|l}-%j0sR=~~~Q zB2-Ss#2;ZZSmL;)ii+#`%(rjfG8Kw-20M?~)l9b}+KsV^HjS?W0|T)XX@8q2Ct3$d zNrco_(?J4&=foR^q^}3A{+2V695!!08~7Sf4aAS3*Dp5|X6$jO{~s>NGztO)KT_W)qI+nliPN)fjx35O;V3Mj~@I0K45AXDLJ zC3^@^L$t!N=pe13q9Vge_<7yw5JPzt84DVS{(rkP6k@BvsyncK`!&l0BOco!Vw8!gSOvQY zAQl+dWl{yH|4G_B6wgY}D|Dh5dr|Ge658nJI}8p;#*_9)NA@ zjT0zAW;QQ5R*fr;d+3c}o$1!C7pjNebvQ-M^Iw;4H_Zm58+2qL!VPgK8+7CKi2`2$ z;fe6SE#fW~&i6DQ{B=l{Us|(Ku^yNaKUm9Gvu!-lhDj*F!nDKBVAc{*g-=kmO3DX| z>{e557QRn<;LUZ&yZ}T4>BVo07;QlLoHrM)Jg8{OCR<|^x;_KSK^E}R{o%XokOuXS zlC02vdcsHQ%x3I^5EM$ZI~xz;JwWq<$BJ1fWCZk*j4%5O!Rx4Z&==}rXAcs_G2lMn z`~zkQ{{4<8c{gj%GTOAjf$`(Hsj~=I20(k_n1;dY4X?!j_CZFhcY10!qwuU>mDR*- zly3Y4tK>z@%tl=u2X4=LV7zWTEeqHX9qykMtIOFc6fuAYKz*zZNulOt@gngBavML4 zm!uQ&MyKFSyt4pUK~94cqzaNjD6HCQ7`uo7|fLjuUiPd5i8yg#i zBkj|5Xa6fgEV!0MZ(Ar**<-%>%7Ksv_T#^AVl?)_*iF$?vwv1Q+xRabFJ%QR7Si|S zT!Dce+PMzIdhL*AtesF-nubhIw)j0_dDWEmN0b3iH3*Xc7k*ikl3QSgJRTf5P8W#s8OhKOt&T`<4Hn6U2jL zhzFtf&jLfQV)O|EN3L2w0%occ$s9VgJ11accalqM<((g^a6IZT_=TjHbx4_w0lm26 zp?|awWroz|g7V;|3;)#mNj`scGVEF5<*W`w!wvWvWbe14QD`43KKZ$DMis?pWjO%| zM`Y%dmHF4KNl{DT%$+3LbvTEzDYS&v!j0dOe7W?ReXsG9L0F3(#Hhy4%d0d-ll!tE zf+kUpg?bH$N$_HdD0zow9J8lz-Hj)|g|<171wj(fyAN^lF#J69sPlzch@%ceI`Rp| z3rhS19z(~GAegdnL=^UZgb5agRr!mVdz9yUTA z@}x|}Z{6VX8!k3!7b8IB9rfE}*M*>Sd=wo7Cj7Ab=9a`l&Ue8vbQZw6p-&(pF_p*W zC*b%%(N^cq8q6#pbdzclA>YAswUzTmNQg9$ul6g&o}(W%`0M4IdT#eL2Q5 zX&C8n&SLaSdb->8ok?x}hFPco{V-nb&IxWi6gP&vHq* z0BqQ>Dj%S0i)gl>Y%*9!nH?=m>(|GNlCEzG=Am!*w*tSJb`bkB9V?=VJ@1*81L%C7 zPY9zJ)Iudzt+bAwuOk~{Inw|-WGbAr<7clTtD<@BvuWlELI-{H=+T|K8A>MrA#~6C z*^z|4`_~I(b;Y@DU0=FZuzTsN#NxH^hQ?jOEwU< z|J|7|M*9G;$>mBME!NC4uQp;LQ}!+Fv>;R&`GX-d9~GI>wVsLXOe(|>>UJZ&n|>b1_#$ERAJGwIvQe|?WV+J^i0?`u2Z zo<8{at!nAd`Ofl60AO_?L;f04i;x)DkG`jjUZeWdOGb+GrY&ypm-;PU>0U%p23<>B znvkSqCiL29f~(U3E+fOAyIccPm)7Ys;w~A;(oRS1LJI^b>`@; z!2!9QnsV?3%9fv1_dolEHx;vp1?V92NfD=^lMc+3DKyC!T7=@v`{LQ0S8fhP2lBg4 ze+&#Za81>5-)Pg2o4s5Z@ZL1rNivj))440*Tn>jV2@CCZ{KAGg8St5X@t&SCiAK;H zdwxg3+tN7uueO75iN{*72Z11G9icUKlpBO|+c}W4jN0*0rUf1C zvP|TB$-!c7g`{;aa|VTO;$&~y#XHif^^&<^DZkY>zZ|}P8;$+)Kugb^ShM_cCSp$i zGZ)jWeW=gftb^_}W#ageW~W9j`jWv5gA>i1+#{VFqZ*=(MxgybYYg=%^k_Cnw?;!H%xXt{m^lO>2>2PTZel*=NwNH_< z=ZRbc*>$0%Cst@TkNO>d_TrJ87q*I6iGNY)45%C0CR!P%ciZ7$w+~UW?Hwf)kRFJz zD*F$>_9Ng%_N8huQPI>#j|{AXr{aa4UQjy6PxJC=GBAC2Z(YaXBhl&sD2TDGOFrnvD$+90nvhnc z(SF4gYStxHw23*R6y_VM9&9y88pie!V3j)o z=WlF!g}SHQFrzkp2HokoqmwWHJipT}h~Ll?J6N#MfQ_%L2`Fp+(JW+)t`4ZHm<8Rg z_Ma{UMavs~sG)c1>+`ujdbBAs)Tg?UV9cKfkc_VpVEF|S`0jlWE*&9cn(UHbm{qfu zq}`)?w^;MGCm+eA7SYrArca-4d=#x?*VFN(Ni&C#>9u4CbCDfHU&`J2+XvBh^ydgV zB_40p2?@A8r-3r%`Y2UPhj#GJKMl-}TO!)DRDfxrC4q#H8Vw?iN=lxo$K5Qg%~$2A zNOU~xkZK<4f9;G~C=5Nh!l0V-fR~}B=AhC?PtNae_g~k$;WJrr5FG6iU+0a&?;hN5 zj~25ki}u7^hxkP}*)diuTjr-Ufci;7I1?H&mgUyFxo`HtcCjq-b)g2hkY|4jshjr} znN-YL(#^!|GCflqJ+TF9ikG40YeM#W z3|~=<#CBvRmkj#)_+ToUli@v3{tEcRM?EFVwzWs(U77~aG zdFV)M7E#PWxp_D1YECsrbi6?qPs@xwaSr{0bhyOOpz(XRYG9DNCSa`@pPg4@JX5zU znK*4>Ix2Ag(A%4~=8+?uCmrTDZO`GDLV(WI;?yr-&1&=pJh@2+r1fh_-o(D8?(pDR z%Fb_d#9^&C9vNO3G)d$xJNQ@MG!}Y)A7YOCQyMgYjr?n__|uK@s=)YE@t6jjAvlF7u7BV2g&<8?6FgR3r zd;fiaJ>WW4{9hj5W(Dm6l-1_4m{N>ujJvVqgoj~5+*+8J!^_r@^A09>uU?@OcD-_C z0CDj6DC$CN7(Kw{yp0_LAykE`j+W~1uhvWA5)#sM3-<5`%Vx-X+x%KOH)z9=ePE>^ zw4h9fpRw=sI~BHahI@d}z((J3lB%K#Qa;mBs54-qTDL}TDVEOb~%g=f1eF+8jf5gw+(h( z#ul|0=0PMhJ3KPrP)0L;yo4+l5rycEv#JfE(Xyd~G>LcWOyb{nM#6RITwvU4d1=HS zs9{0G4b5soRNlix=<#E@1LV~<717`w-#QqJS$}I-s2LtlA*@#0SK)u$(!iIlS3q{E zIHPXG2v!Go3bAB)0ggth58Lj6mGp`aqobpN)tN^Z;YegC9ntVs}8i6U-_Shix! z0WwE(-V0UYip0HC8z@y{W>|LydIQ?v~$ zL-tp@U*3E1VjCkn9?>qaOISTmhp!Lz^!6^p2nWD%>Nrq9T*b-~gZ(Y2tTCQTX!es7 z{|K?@wgXarSmo(+Z#*^-k~%XUL;Z$ak9m4Pab`jOLZl9_!HK5`7pd$6!{6&}L`O#L z#_j_}NcrGYi3rZR`Jp~1VJpS1K330==BWJ zNu8hi&cgvj-FVxE@N5WB5J5QBS;v%~JmQvV?VLD37z-vhT;w-f>22Pz1=UMdfH1aU zFxkR(@m1x5-B=mbA8@+cMwFe88rR{BaSVTvM}k44MuggxR=n{3$foZ!I#`DlipJS} zdiROXWMmAL=kpYb?b>W5*Pkuk=o;k+L0yZo@^VE3gGVQiU|k=ox?*%byNIP0jM*X7 z2RHJAJ$bx%RzBCmIZm^-l ziX3*Q5R1i$-BRc$el&7m%@Z(NWJT*Bt_A$m%bc7>?C2LRv_oPAlSj|tJl8YARjTaC zxjtYFh)V+WNKde;Ibg_PV~^8~Q81KVbh5WvvmZQBTp&+)M&G~BRQ-;%Vah=P;3Aes z@wIjP)898;3tqr##gvSUYAMS33u*WZi~jlxgOh=dfgnJ&gXEEEHhg`*mgN#pz7Q$L zb*s~71>W6!qjP9DUB9%7F#f;Q%@AO=)}8ry&Cd#*m=#k;Z^?;?>(;EX0`@b*?B_Zg z5r`clF8CBBh-VOS@Acq)ZKTC^kx}fz3FsfoO;@q^L=tm=Vc9OMt$$A)3x1j-9mU#V zpy@5-)#7z_rrVexykP-u?D)xWAGVedjY_iKw(a7;^|}uQ!~G1zQ9d`o9ZMhtHqZ-W zV2`vlN&m!&)PY%wpY+5-9s~YGiiuVg&cJ`fP|-%5kFFJ7Ya<L+Zm14Ae%0U@wR!<|RO8N;u6$B7D!p`jGSpFVTi(vrtDmUWx2H~mZ>JKWh0 zVTd;35Ysolo~LVv`Oh-HRLCt@L`)$PFSf$^-;=1j{EZ#Q zCx-o)Wtj=1eoJ8V+FqXduZU0XF9s(j$q(Zt!Yc;M)OH9<*r$&UE1Pr1t;--~`@qXg9d5$$swALq% zhMqS|`#ilkpp_Na2ea04+uc(oj;2`bU()M*d|r7^GU(s-*tOf&$(S{sqO0{l@g#Fq zi0ag|{6j3tih;%A$2AXNbC2RPDIIQuJ`!{h9`zl$&5rP7H-VdiL#eF^f?X7&dRfg|Sh9AmY!99#~=>%?NnR)1a8>urGHEi?zY-CD`wPn=5 z*U;6c@C9v7`E4V}s%tr&1+cqY#8G?%rnk3txX7`3uz7}r7||whXwF7&8rSGhyVkfe4IB(>xbYK?JYn4)!$PhndxA=8*2% zuPN4bQi`5G12ogHF2AFST^nF*z9LYh1cW%|kd}TWM$O@D2QYc?WOp@dJWPl5MjJ1P)W2eg z8HnvGIK3d4AL5u?EFf;M?)0K9f82pln}oPHgN$R%XgaWH!C39DIo{asG;!oQhE)yX zOdCKZQ0;@r$ji(P6w!%uwXrnrKfX9BNnoVGhq=jlWlqN-4FtWnYYAn;eIr@Cp1X*( zQTinv%)}P$4w%l4BX^zWCy^q)n4K1o z`)S}^c-wz{7eK(f+50H*XZ|R?n2bs?u%bpi(HYN85k~qhhR-r>`Z)FxkLQuQfzhad zpkUf;in+6ZT@FQm${*R}KSogh)d&5x;iugrT>k%*h$m#{zY%ETne)F|GTj=1@4t3P zQpK;FhCXpbg@@;cr5&u=$8nW%qt!}wNpWfd%O$p7Z8(V&4eNaeQG0Mod;eI4q@4Ed zL=8%mcCV)jL9eMugF-p*3rZykNwn0r2m*UMS6x&?d;=f~m>owqv%4@#={#UGNPG%V zKcXTY#NMLXY>ND7ZrKiSNTG{(Ln!g}0rB^4C-cOn%q@a?gg)CJ_ZUi!!^xN;XsfHQ z#ZquEe67uoo`j2d`@8E#hwEZL`{){asHDjP0b2CFu^ez?2FFw;GXYZkadUA&Z7n|r z?bi^rg0fm-?=Z1#*b~A$rS%N9$Yj%?& z`LrSob}N8KL;JnrHU>Vv<~LBB1O{$8-lC<*8T)r$kL5AhrDPR*X`y)#R2+;=O<#bR zXl$J8?CQxC@h+^N=>tSa6C(APPk~EphE13~SdqM!b4S16uKKOpf01jJUN~^;h($WL ze|C&s1=0Yj0sH{EFg*HJ&1fmuzj0-`Mc^aYr8=hCtacG%MkX7Ed)U5qzq03*Lm6=C zRPkGJb>`yrSe|VuT$g42;277Xq~6P?x3EtmmIti!ME9kg?IT=umeqvp=Gb zTq7vRA#p6)*}H`Q4yT!c{EqY636L$>1ofFziNj*+PtnV&?qE{@x<=0Epz~+~ehA^L zM>-?0Mn&u$G;1|1png1xipBC6HaIq+=x`LX11~x;qHY1dC`<4$FTH=9TC=BZL&J9C z+!LcMY1zY85c5s2lV?1R%?0uwp+OKYAGS|}rkmWum0B$ad|L9z#t>0%Woc|bUT$xW zTb4_%Q!S*3qy59Ho+nK%K;AU}!HpxVIKWQ}h_JjaORvR7pdgnmlVwFej8|S$4)r|X z%!I%cfVNVbqZXJI<4E`0Yot$|dOF)&UVbo9AHbWv;@s#JXyfYO=_((?Y z7a4WCO{jo6>77Ph8K7n9fa~gwc8aLTBOD6QG{fJ#eaj>L1sWbk^;DlE)9zm|lZl^h z*kYGdZO7Zh+r)s8SwKt9^MZu1$)=UOyfKhL!=xm2=khb*=k_B{yA|jGRgMR@Lpl!p!J28a-H0K2Z(7q45lBCcKp~YW#d{f#F@KtQI>54?(L_<0bI8VXJ9{#>w zwe&U~3(Y5K_uhjcsVHk2jt*h_PmKe z$x$kXl8s;}gRXB<4+S}3e)xde)iFBK!R()v(uEN3yL#ZW^k~)R z?<6AGpF#1bdtmZGm>psC(eXwa(8MuEprdKw=nXNI|%baVqSd4d%4kfB>Mr+< znBYknpz=VL_>`T%#f-iK2^ouZO9SRDHR-;%X!0B56LV^+wv3>uZ}8L9H^!Dow2CNYNBaDR z)1omxcUex)`XjggwT`4#Kh*6P*^{BtS`G;~M_`Wo-t&8nGHcnF`o5o!_fm$G_O(7J zB)C9HN9L`Djhou5*{w*%i6;yfbr$a1zkj2!BV*RSkjl7-v$YtLqwWQ$u$^=Uk9}y- zG8n~cJ*w`MK48;;i<`wNYz~B8#YjGXuC=yB4Ayjz_`hgFkH5HNKXuFsb*f(-cSRAJ zHK4dU-aUl5LJPRn6~kkhv*5;?k>Nosqc^I5#!vzXF)%tHVTHyS>KeCob|x7sLQ(?= zcC^kTcbDytLepuB6oLT3y=v8o)(R`2!&@S6i4aaII23p4+=YWZ{fD6Fg8oidbr0OG zlCBy$cYe$<8Xc(YT~&OS$Xj2%J+@aKA7iasu@o*&PCSUY_fNQC)(H)ok_Q+GtmJHV zQY1;N6pX-#Oc*{_iK&}G;dS+n-MiOxTF|nkFWDOAB|_wi$7(U`V6J0s+Rq@ar|0H0 z5szY|=EVum+6gmKPybDSf*9?G$rNly$onbP3yEy@k-sXwhr~OHxFdg8h`x%Rc>z~D zz#{|E5!rryU|12M;7X!0e?oY>lmAEuZuZYTO3P+ivD~7wI zk!=-yD67h;_sv_l#vhvDB8r*XqAdW857Om`=`$@eslC`dDzwf#a) zHXcwi;NxM3<1#B&z4Mrv1q}mzeHTTS=8i5-2k`;Jd>HBlZaA`BOw1O$TYG`Vva zjIy0TK=ruSGUKpP5mnUivszx*yXkOS)Bk;lX4}}0*AQ8(Tvtk|+GCGtz*;=S@iFC3 zC?)Y)fpqakV5p9pA&B_GcQOc75I~g5fVH~1!olhuu}_NfaE5)-34liPvjF!6;XN)s zzPwaR6k0LiapLgUQ@s2_(`K5Dr-Dh9vQ#?yz|C5)1%sT2o}L8V7EmXQMek|o-IezO$OhW!`qsr7)ZIPpyJ8M3 zN1R*8h3V2rCa9@gpE_N;$!eeWWsR>tF`lNlDFm30SfzbMSxv12@=|2ky;S_);E0_u z3kVcb;e38>E(^?(K--F+|8HPKsN$9UqN3DyXYW&4TVce_?Ki|nsKS+2=-}aPWID`G{cXr}`vqo*3)yy58WT7txRpB3uHu~QLq?}wZ zYpXIdxKjP*ed`P2p@B6v6zy`JKL-JpxZ1=@_xLzQZfy{WEh=VaSk7kDdhI77g6f%c z`Y5lz3IJjhS>v=Em<#+y^1kpa2Cq z(Ri<=+sY--kQL_Vd%Z#Fh?$Gt39G#=yZFH!+C8{y&P@q9hlH3>h^cv6BB1Qww-2d- z9ge8@p{iA*eQDY4WiejH!9a3Ce|QWnvVN5CMc{xO=S2)Oz$(HzX}B_bBt`0cWRsS# zR|E!h+Rc6d^IwAwntU~|X%#}KI62{&^bl#bb$Ue~N(BLVgJknJ#DP12@4=zEtlias zu8-Q*n-X#6o@~GfGNLfV6)CeM;(iGkEqty04EC9b?U-A@7hV^8-U8?T#v8rMv#!>y zJ0M+rv6IFpr`BJzN8Cbn9a*%`ow#5n1K$pb;Rhsg%ZWy479L~QZdEuBlK0@o{=J?F;TphoWQ(65B6p3GQ=v zdCa++SSC#bylglM7axsr7as$Jxd>P$mAk_-Daf+aPAh&zm0UHZDw+YL*DW)j(dGN` z)ETdH&P127hj_bIcO@i8yp+&epLZjB$YiM#bkel5i5HM$`mI|Z5i3g|e0=JBw)#;~ zVW5(IiZEq>AW3gYh=<3rhr7&zr5xd3q0frRX@%2)nt{)s8D_G$ySpF8mIBK0%8zyY zid#Hoi$;egXBzfqIl{ts)u)WO-q)YIMkeYv|xmV7Q$k*TYB_9tLMdmX?I0~ z`Mgr`85NiVHQ(vuG|?&`!Lzlx%gxkpEJN?f&TI%r(yfnQ+Bf6OgRyD;3#vgk2S-3B+vZaTc)#BxdW0J0+-PZ5KF)4fL zmtKh};x!v12iMF8(Bt&9kMLp}JNnDeNxvqrHcJK8r9)qH=~=Z~lUt~cNrH!MzWq&E zeyR97W%*QL_KuqY0S^J*2LZ${&MDwcM$)K8+;D~9MvH?7z2&fpQ~c@~IOLOXsu3K- zVxisyomZFjbiY~iqxJx>tE38m@8j3sm}30r$q3$i7b=I57Ts*mbC|=a&!X2CFK*TD z2mo>iJ8xq1{qX&pKDVkx1a-6A6>e;9V{U9L4P`2#m*?1ucw9UvK-QErFlof-L=qMr4GqSZWiYmghqL9qJ+jaP+rE7} zCX+~VM~);~nT+1vIhca5JKSa_?e*f>Z0Xgv35?byGQs_g5Qp!U4ldz4`p_)GXmt+tA@9)2bzl`~8J)QGwX1n)e8){I47e$rB^$T} zKSht5hp9r_aFdvruA^XtuyLtG)n2XmZrmP7Y~4a-iSd>9PrD=$ZmL~UBL5i)XNQi6 zT(!MuB9d-jQS*>_@7&LO(t(sTGADoidVd$r${l5&yDEM-99!f0@szKFQOoSJ4o6|+ zs?BZPk-4C1C6IaEH)=orI9G2jEr0^FeS8^9bsrjw=m#8R??&thEUqgc=%B%!#XmeN zihX>2TS6{4&NSaO5n@T)l0KP7%ig_r?_2ymP1&z7`3`pEoYJB!qR*9+vS0%7nZ}eG z|Nb?XqZfRJ1=52jRHMT&Tz&z`q?rujy~F2$VXQ7$^#~eOeYz*pCsa-}KKJ-{e%3he z_9t?PYE|GyV|&-z5UYYQa-M-ItO}5TNjG{rKTmwuVtE=L@31)B7(Ve}x7(@kh0MQ2 zPC#3A@Nr|`1Wxza+lH0k<+e?hnfCiF82i!1+2Q4KsdgCZn%ux)&5@dOg4z)`diO*+ z8Vb!8tnkx^S5hq$!uu5CU$ZvAV(|#T5&26>VD>X@XbH=Mv%fWyRAUE1XE#}Gn^qMW z7PgF^|3Go5EaZOgUEeJw``*)LaGzjI4s$YC1_M?%6hfu{<_=^yc_dTWNw2V6OKrYY?kDbsu6kHgsV6dF<{P zPy#s(riOOq!~(M{GEN*AXS8`DqdCk*-j^_D;vBX#%5<}gGe}?0%}qFcU01GsUIhfz z2~=h;a&q3C{Nzfy8E3a)g8N1(v&(^^%QkX#vi_X0*$35XgpE+~_ouf`BbM_P*i&do zpihy865e3hD$ExHFu)T&l+|wvuOy=#rGJc?&qx6NX);&*{1$Jt?!X7_l5oYR{*S(# z`QiWCmpiUhcb|_b9+ayF&@3A{^%MQ>jJUXh%1xsr$TU6=msf4`kY|50Ow%1xZG!z& z7UlD*ZLX{lM)kl^SqT~qWOw-_C|s$KJiY6;5fbtEpuW;L{^fH&Vsn7d2rWhkcqXY? zq7_d>B2$zt5kO4;)?WRH681Wg>P52_Abq%o@)q_Q8L5)%`MP9 z!FB-dTJJN1#JZ&oTs^UjXNn5@=m^xdHJ=$J?)S>9K9SdT!E`_9$hvZT8$7^^}a{uXnDTuwK`k zzAiVf=6H=&n##?!vbQ?m84xbf%NVU9zRqsR@oZ=J8OIJ4**3s;-eWdz@%}YTy39rcJoSxAZc|(^ui#)10DR8ifMBFQv~%aqo46$Skz_kN_nHSL z-bc-6+ef4Jmsr5>sWeyGMA0)HiNvIiMzo2I%86Uo3>0-z6LCS%vta%KObI{01LKHcKT<=04d_eQduUCJ!BYw?Fi0j#9og36B!gg zbHLVgqZ3oaw@kZy7)8c*3=FUU1Ej}Q?8`rYH4>(9o()V>B>felyR*k6L4T%MYUU%V z0((pfZTO0QnkW516Di@Sq&yCpl1|=#RB!$i3|lPrej9>RDO zTrGBBW~ZdXPMw9r54l#CxJcoVPI|mhW`}{j{Qx4XcSuMbmZ`|Khp}!07lKm%iZ~e( z3d*Hph~@`TGow39r)QwZ1pNGpIc>PZkyejM&odfLg*lx}w6TLmrN1_<{-3wwFZ#)N zK>0&if!d@~YlOdn%0F}GpM#3=dgJd=$ava8l}>rwIQ}W{AOE1(fZYS9JiB}GC*kZl zg5ftNUPqyT2~tm&pc}5KT3=SRON{^`C^GkDnyKZ>F0tHqs3+8R=S`n6qfxuSU!>JY z&;`@%nm=X_7s*s3oO-sb-f?p4ed*?}B3>T}`;Zd)ZZIzzN}2VC5P_;1`?iE@oRwjD zfcCddj=#pqP_Oh`xfU3_>6)R3tiZwaA~|=>&#MSgrZv*Xkwe;2e}?a1&dZK_=5Lk{ zUQ^cCSncqxm_PSTuisnlN#A1W6If199z)f^pQB-KS+iI25r#s$jtS>pvqa^5mqq-!G@w>>yk2#&l zOHN70sE1ojObi=K#9SbBFyGT9z7{dds4hw6sTt-ziyFmqtI%@;d<-^AHk1t^G3@2x z(Fs|u5QwaZ_LDXmtAwdDS=6DC0oAe04o5+VayP*!H-*r~j}u$yXIQ|Y!Nht1>R-iW zfmckE%8=OkBoQ~g-$3b3!2!Y8z6s2VkDop@K7jOT8k%r;bbhkga{Q=ZaeCfY(g;KV z1!lfmR`8Y>Y4Su1#3i8yMKgY4@14^C`A6OlRJN%#~7;F&|jGaAB{j^{Lx{ zu)SV=aJef;89R0FrCO&q`Wg*=WJWIRrI!+`+8s|?_}1yE^yEVkXQbag)cGwpIELyO z=B?4notr(>C@B4XaJN#8DZhr`2QuQz7L{+QZOe@O-(|&)h5wWlgR%1UZ_JiO7fm4) zP-2G)2Wi%DKxqAX7%LZmT?|newxj8r6Igk4KrDduvI=)wV83FQmoAwE=2-dmz->Q9zYDEjpT~>J;`Y0@c}# zCsoI;=-qKPybHu=2$ti=%(JdWa1-S(R07DU;J2HLdqUOA3(KM^1?r0az@4C*tx=G6D zDzJ`2?{Hh{%aU`oB9#U!0!YpyaGtauLN;OH8;t5teXN72ML(!U*f6e;kdVOPax))H z@&dxp6ge4mR|NV0b3dP)j2v^vObRFL#!!-1Q_83=dB`Z{N3js7>gzI^BE$1T-wX

$XiT%W|#W-h&*_A$Km$%c_o1`&=lphp@W${Kz#LJ#tltQRNbEQDvZ7 znV4MlOM{HnjdzRc-q+k}2=e&OJ(Gd1+9%&8M`q(V@(u=Wzr9a4c7Ne5GE4s+0g?`n zfO7gBue9%}JP=7MW);XshyJsLIXdB*>~rYT7bzau>|FB_)bzIf0x9J-f@{{4cX&F^ zJgsZhd*zIR+)uJQ{@%QX5nbho2fh=f8^(mu@YU2q?)Fzm6k*qkL2oMjPziqQ z(sE04@aj;cRs^G1G1#?{Ad8<}!jX#_x7l*+k|;gco&xYjd7%(?vx$~0V)+)t74-DChzFF@7%rX%e5BjRA|)h zj9^)*cmnHM(9=-pYm-PDik9}xwM`TrXDx}ZS1RtERyD z^3fMdtfQ~Z<;kVj|BUM2Gs$<&E*)*{h>JajOtPvMA*F|9E_Y|MC)FwmURbb7VvXno z;aFK>RNBFe&Ty+os#Vgos>xNHjI6g-CApaVp!+iI>@>F!nQ1GY-W!^>*hgjS9FMtE z<>eQc{4l;eZ|R)dQ@GA@*4*Q@CQ}B7AMQtU#F;IdBw2hm~D}GGuLa! zOlIBVR=eK~9}BYoQpyc9RC(FFr4Q56#L_vHq9P*76O2Y*`S5#79c`M~ZjSK2&!ljB zriVFuoN45$=_{fhOu;l3mai$Oc!=cD+r)D|Ewe6bn+n_jUvA0Ljmji!P$&T_zRNFC z+S?GjxOVBg2S?8Nj)aAUwe4k=T@<03{&N2mdiwq!KfHZ?zgFFM>B11{d8}1kR*>CH z$rVwRYI^;;9C?}^E^TEo^Q(l8Y>JydH<1Ujii9}pv3om!bMOHKh7yov=Kc^&|H)eD zj&28DoL5V|eKN7{feLzMf{`G<^L}>;y$6T)>gB}LyA-$^Te@v=yl3xcxnP^BBV%rC zrzMn$*rc5{S>F-k%52S9LV_HHQnQ?P^=((1_`|N0pVHT7jqidRwjyR^bbKFYf7xKK z)&P|q9*vzlbGv{~7xGQ0oDB$#F)FTxWxND#c4X7?kLr-5QZhY=-8_?RA{1BGi$Z?H z2@FAt`=FjV!(RcA4DVh(r>1bYaL?lZ*4JlRCu%&qZh?_wS%LSp^UK)dHfe4d*>VWD zosaXyUtM=~4O(w9oxgm^ylZUkx5f8U>W_sA@0mVr>S4~Q>C#E+(^r@o2|FvxCHhAl zKV@LR<{Vs#fe4I;f*@@jSGx)GF^@)|4@AVp0aMCHv!=RzyYbZ15{yht*sXvORf&Pv z#^Wu|UC)?GqXPAQaH5@6JQ{3_ZCkc@Yd;c=?0>L*T^aUX=Pz8yv(?b~>T$}^@!Z+7 zH$T|}YR@YPX3Xd@zI(v0s;Q~jct-K<%yyFOeeFaS4qU>qvI3^g$`8UqBJ#KF?)m%q zNAK1FGvdce^U}yd5J~JrB(_RJ(-wIMW&$sv=iTJKr{OeP;o`}ZsL2tNG-cMFp1dx1 z*yF+u7V*JbytX-(!9P&R4-XGlol60~7Itu5%9AEdg6uec$G+oE1I^s)=Up+14qf?f7`|8>c20vbmZ*4oIpHC>x@y7WxRXGV*Y^9{fT-&xYFIn=TwKd;GiKkV2Pq^9&9J|0kODt@XsK1j!NgIn9 zZCBs(hoR3Qb?;97WJweS&;UCL0q8@q@i}O3?pXNV6c={mEj9Tjud}_E_cJK}r|D$d zWvyRO9;eG#JYFzoX1&Zz8)xIN1>1_U%tsVjp=`5!`En$iT+xN8pV7EM{^z3)95rPs z1VA0yu;C<>;b7VlS%3k@H<$eg3&xVG;gcjFa2H3@I9YhtT)a?M+}t8lF-J#D(T?MT zT3e~Vy*ed7FYkkcZm6gYIZsMT(`U|{IcwHZ$O-(o=CO2cZ$-G`{JvR=g&KJbYcYGr z(CWH1o%;PZf$J!TCDd8h(odPZVa~Z)4n3X=9`^^q*Eva7G6>L7IC?IKZ zSH2XR6kTOr#6sDylxNmWy0eqmF3Hl~l`3YHbgy%_=A-A`nwl3lv&!f6X^IlYz=yBv z?%et1!-pflf~BTofO+rU+C(4o2B21#TOS7rS5Pnk@)5z(&cO>iQvowG3&6g(yRWY) zV)}w@UqKH_w!<7h)z8C2Hq})(K*&rLBm`^)cE-GqZkU|=>eVgmEp`M?mYKEfD4urn z-%tD1?vBf~c;2~v=PzCapVem}_Iqi+vd&uF+oCo@`F@@{pxYqpVvr39n6;wCIH-!=gqnD<+b;OnP++rm`FCu}y863Fz|=|r|Le*5&%CGW$ma(jTNE8Bj**m&L*Q74zDK|#FpGFv0Wn^gVQU+`&h zRg$9k3+CF5q=`~QSpOCH(pz3aE%$A!u?Ziy9Q!H(0XOOV7OXI#&r67VI)?&<+e2y1EEd0oP75s;UUtO$V|ZIu0cCtgBji`ym4BbI-At=%62$xG zJ1bGKLX@|YXi-CV81q{r;3Wpc*cWp+{Rqp>cHX;puU>zGC-?}+3G`E@Jb(G}wZF*K za*rkAg*rg3BgNoE^z`-uHDcA6t*@u^^(A^nIme6V&tGE|0ifl)?taH$@uL^}dI6vD za42K*F{Yr9EQ|lKje`~)v@g)k#~oh;DZ1DG4cFMjJ-0*`(1&JdsH?jN2ODZ>g|8;x zE0eOB_L5GboQ}_X_3FxXz+^17NEn()zFt0MJW+mA;-Q>Qv@DN}wjqoBJx}Ng8IIjt zciQ2P$QkJPaqmkcJ({? z)_G`3!;}g4P~_J=bmZmXQBYHRxq=MDLo>#LH2$6Z^EhQZ8K2eNJc;t+$+bzflm9#y zGn~Q~QueU@<6?@kWWG`;pZ2_AsQj zc*DOR6o1%%{65WeqKYpk>186+k>8tTd~YlnuAH~k@(AiJlPl{bv0ZPrVms|-3t{rh|P z$MbyRH>GIvnUJ4@KJ>vi4;7B2_7h{mz@~DEBCT-s`IuI4rbT&cQO6EGljoJ~(_PZnuJh!r{Z;fGmQ7 z*mhI3QzwM2-*4JFyVm{Qb8H)+D+7G%LlB3m4WPP*QCN%%@jP#6__p-2Rm|%zU(Bm- zyxrwjv4=-UNXToztdLl)#0CZgC^xsYwS|Uqsu+>JleHiH)uv6GpnLkdThQ>)512e` z*|J650*3&|``b5S#OLh;5?k0tA_H?Rr!~St<TKqm6>v#o$~ndAMbKe)=u|_xQoG(8^KlX6xaThAi0mx66BRt z_5roIvsl)i_6H2^f)&hHzz&W^sgA7%(Fxl)uf~(n0whG_o}Bluj!l)z^yuE?G_)Zi zMsEG_jz`_9di^)J5$Qez*e9HpSiI@v+Lc71aJ|B&5QEYV;Oj!11}>L?@0s`db!C1T zTOM|8=Fva1u$rLe{1lJd!4@5VxEy` zP*gGSc~+8VOW-MN#+dS7v_NB`Vzzfa3i(KfZW z7nZQSFNd)B@X%1MROkDisdC`Jet>+^irI4VvX_CX*Ccozk(4d+@`Edxw={!4hZevq zFT4i@zqvfHHVgR=n6|h!U`b}oiy?yp5EO=bLYnh0c%VsGV8t$R?2;9bx9(P-$}wq} zzi?p+-Xon@F|HNlp{*`pq3opmd9riP)~4HL9QibL@wwt+^*)|!oRc-9YCo+500AKr zYW5;Q0l^+ece+}|wnBUmU@7O1<+ykeEVGFQHJpR{TFS2uL3=Pa=PEPoR&62uka+Y3 zMxqNAFSbK(pVVbb7vlwBhp#qU8*=&cSDxDoJwV&@Jj112i+ta8D=y14k~N1B1B&Yp zT2U@}X@4qb6+O5C!fo$$tQEEuiANXy*BR5@yzsy<+K4U+E-Cz&tn8$PoeakUQB7PX z>Z|5(h;X-Z0{E<8jd{kj^q4uVY?Aig1#chOb?+3(d@=`e@vHU=VLSrZgqJAb>|_hE z2GUA;2_MZ=3EKP~owc=fpOswSHsPOSQgg6}Dk~;gC@p<`b`jM_U+qWVb3$x2YU=Cj zaURjBNlHs+1C%j#{pw{f)!mF&EFn_-W|GbIhSt_43l=(Zyyq~`{?gI$I{4l~W@dEb?g7)M zO}ljcdWgt@MR$ZO%Xs?MWP$Hm+7zpO2MHXf0`NoFk>A`^d~B|bW>~bG!|d6!Ka*Dt z&E2_8^X>~bR75k5PM*5F;Y99D-t~EQZtRj7aYHT~cB2s7G)s74VQT5tfC1I9A7{>- zY2ZVBlb^LU20h@wdG}vE;C$78^niH5sGsH@H?2{*tM3^Uq{2S}ZBbCedGo1Xq)*YXel0$gp7Y;>Bl>rO9|&q&CEdh`~e3FBDo5S8U&*0%nXz{ zPm~+}ss)O&pvSsoXs!V?{8@eGLt7o+seUfx_|(7Cu?cQTZ8mBuwo@{6e^)(EgL74a zBznQ*zH;rF@qrW+-YMp9=+qi0l%t`pD_n(3 zw0&Nf%`b^8A=W*nQY8wNPm||^XkNfHITOT*C+m1`=h^W(cuwiOJ1Xh;_zZs~|+$C~gQEep4A3TwGk7fxtV( ze2ws0jf^-rLVyh|ZJHtYNTwGNxIl^QS#RFFx$98+1-@e~WfoW~tu|u2cT)-w7=V^A zAHs^4uZcgFHo%}*F7zL{v_DMCbdB(m!>hep{1zV{0l}3w3an(%I|YW7n+dxo?$N%S z>0P?06E@j?WiugOwd(=vP##oKRgFAmYPu?cXsUMEc$=4mUw**+0xDNAJZsjpV|b-? zrUYDMN&ae7_DeTgTJnhyJ2?ahJ*ifmpsyHRMu0|#k;}6G@v5BE)#o&V#&xlzM=*j{ zK5o0fT@b=S1v2(S#9198r*lPDVD;)DC}Qw@`0zm^(PcCV2yea&tk<)Lx|EgR7SweM zlyyvNJEbxoJ=!T%qZNhF$KwbWD%{R>MjEqdr$YTsagRLPjEMCI!m3W=-n=e|?)LLo zFaf5V~7-U_JxdN9myg-V$1e)=|%O^If`hAh~=0)f`h32QKEb^4K%?aR5i&fD(F zc5*Dd`&?Marr{%M+=RD5`v}(%|EN3s{YR>F`5k&&dO@y`{84}IIA8F+&R1v~-7a1F z*4z6I=oFL4@NjigM6yMzce?xfZu1E_`H^w!>z2&J#Ct$Tsma|@*K6*gMce!mpyYy* zyeDH`Y0F|d3^L{6i&rQwKv@sP>gKN*cPPo|?o zL(MXO{_QFs>g$KOmAQhJQGu?O(6oOvLchI->xDFT?mXGwWwI{YRE|N}WhSID4)gxj z>HKkiBUPdQm$0&aHu|<&is{UH_McNh73b%P>U{SX`On(0NU2}Y10{%7K|&Q!UR{%Q zeHBS(`Fp<>+I$_~`OdQ8G%sn5|Jh>w)i==|>(43T*!enr;AdY+{f6eR64v&nCpzwJ`w`WQ1s;NH*^D`QlCVZ#lleXG)uSHiz8nIaD{qp7$ z&fzO1v1k4;=hfD$xBp>2dGch8wCW%a3#kw|D)F?w+rE(LZSa@NVSVGn)jxmdrB$Cn zX8|nDx;)k=siaWh)EjVAz4cVL*7jmfj%!2LQGDsrrP^8p6YWWPrw?%3?~9fkZ-~hp z;M%#xlOHgkS>LR-cDh$LWS=mT=`6vf(miDcObVEou-JNw$d}jFKQK@cbMlfHI3+MYTOtww^kF9lDTV@NFE1}dw_OGX+ZWwI?GIFq zw(y4lIMRk%LiK&FvPb|RGiZ?Q@iTi15mBq-cZubF-6HH-izWuTXgd)j$}upHc#bME z#s&4*&ig*5Db?qKSuAHyOt}vhn%dxW;jr+`oj)In-HbX~zYPxcn{ME@LR|2<3SapF z)lJ|5V6AxjIf_;D>)<1p3_P5#v1|Que*XN@Qq6+B2f^g3tW4V;wa`U(atg!55qIr$ zG4TfC!8DhZ6^TYmCmGA*rcM^v4@pT$A%OXBQ_YO23~Y(pmFBj+Iw5-`10qU%8L$I3 zwvF3W=(z4Vw+&{6Kp2759NJXi4;0AmcS`qJ1?ffDfBQ>Uo;G09>ftD1*Toe9WJL5O z>CjIw+f^=2=N0Yszu_~gs`&p(d+V^Qwl8WFML|?RrI9!SN_Tg6haepyDIti2gkqpH z(p}Oeq9BcgfOI!VhjdAE=ZkUlob&tcz0b!V=Q%2{_q+F6YtA{wm}B0*&sLlPB2Gle zqEl6gPcAgRGp*kMnE7AV&HAB-0Vd2FdK?%))vUG)Xt(OcTX|9K*VD&;EUfSQNgccQ z#ep37jsd|qU;;!PDmo$9+H1-nm{@pHAzTj@@!7e-gx>^nBGttH6I5`}wJ|X}8y*|m z!g_s+z-SgeI)r{$-~lgQ7%-NB=xMjr>9^eU6)YE<6eVq45jzD^jfPiEaq+~ zUtL=0X98d$K zr%P!joc>?vk?K`|jxAt=$jEr_JiE#MH{AYbh7_T7+PN4cbnGlFZ=qWoyj^D0xcr}e3FaQ3w?<|bYY3;5j zTCLE$SqzqsDO?t$zi$>KB~56XTmCSKu{;KK%Mc8NhEZp`pLOWPej=SGi-P8?yApI!(i#z!usr5&c=yn3* zWU!R-C?n^$(WvFUNmojz161|BWR6h&8`J#~**TrT^n;{r}JA5wy!r zkoy0+p+DYwb(8_j%a(4EI<^D60!ny9JI$SxXv6QwVLF>D6C`E`@iY{&K(4^^3fZ#$ z-al5K@y%sgUX7Gt^X_-V~jV9BAjhsiCv6|RxZad~+*vEE)zaK@-^R&jXjFL8$5 z0@F-b#b99yMw`OXFaynI@E!G5J6Hz57|IIr{G94na3?DWYqfdUoE9XPE(uM7{vOWM z5Es6vo>~k`cf z!L%%W7cidw8DKP^=moFR=NAb&^KVO@+ytUAm5h$gHwDsZH*gJ-i4ky%`m_Nouw`Lq zwtCi^8_%Pj{$9>;corwXXaB)aLBG5w4h}A^i<1+PfNKO|U{e?Yjr?q?=?O_SLkhH{ zmGw|z%g|7C3b%!<1ejSkySR9B`_gL{?(FS_&A*#iwMbCE_MMVFS%Q!JzA(%|&JPm& zbLi;cY)V5rSn9~Q;p7Y8C=@HeSe;yeNd0znlmq?*RxxONfGq;e(K#>Ax1ELCGCt(bGu&S?)xjD6FEwJd4Vq$Ob zM!{P(<36mO5CK5Y0PDPk>Ag00bAQEJh+gRXF*SgPU@87i@Vq>!0rFB*)Hu-O*b;pM z=4S0URDSN=~&!Fq8A~#h`S3 zk0hf{jQCu7EHE+ZlqP>W}j<&|ruB1GoXy*Vl8>pfcp*dJg=6f&TFfy@5=HhWFU1VKQ>^$Y;p!ahpkg zUqTiW$&v5L^HN4EJ9gW&U4|9osRyj@Hn$4@B$DZlK?z z$m&^eeTl9P{YMDF=SI3{^uOqa+&BD)_>q=I*{1(1E;PEtf-wgedoVFEv9aB6j$m#c zY5884dEML}}b1!wfa<4+GjCkerD|D+=N#03>miv`GYyS4r&ogiMY3C=MWN%%fM(#Gde z_2?9gPJ)spF$P{r@C9(fRDo7mF=j+zuAXx>4 z<)I?REk%ES9#S~YJwB;DP7Tt|HxdW%TNZw751gn%{tA}n=Q~)fiOONiI)(H;hy!WohW)o zi~(3bX6Aelk#+O^_17aBUW?c$6GO#QHABGvNJ! zL%@X~tsQ~W6nwqclfWC>5k^O}wK4MYMo+(Jfk9E9uL4YCN80nd z0FDYg8-g6umtO;U12b&FOX%|D%Zty7poFIZ$1>uS08&8(WCMP9_r6z9F-eSLPeRzq z2Ss}GBP-7(R8><8iHl=6qULyS*wsXt*8{>I*t3vifzMLH4Gkx#ORqgKifrifC|2ceX`bSiX|Df(cP6vaMtq|G+BqjcjgT%iH7ZJl=pcxU0 zelYQ0MPugXhH5Cxi3~W2N8t_s5=ZE4eE~ZlDbO$K<$~YI>QCIC{Ksl>@Dxn&@%1f( z5uf9TsZQ{fj{9S+S~MbDj-b2n;QNH2zgd)@FIP`VDdC3>0I47CXO#X&P4`o|mtpCr z1In!9Q`unp|GF8LE%6_qcp$6YT%25JNpHiC$bw|hFcO@hqCrp$ANUwKl$@GcIuhE$ zc>a(=7zE0y5T@Bpjmi{s-;}^mi-YM`qW}K3K=N34<4V=fKfwj zR0SXqLH$F`A;Gj&kCuIv`-{aYdCotXjr5CA6&FE=_iw4k|INR+LX-VxctP!_ewT=d z-f7>wgHW@ad>QDoxkcV4mps_Y^GK23!!v&!BtiJwe;iZk3Ay?tO2ug5h{yygt6)8 zZ|m!s8hN9T3DT5QqLmj$z2Tvw$EZ4I@|70EyHR9bcVXgzDNM(kTWkTP1R0qphd5)} zXfjXzPQbfNR3($ei?m7s!m-KEdxCUsY^`>4UrA@#lIFika1rS}?j+n*=r3_j`o+Nt zxnZS*kuNiKY(p^L36LjVE3nu@csO7A`sTjj*%K8di1a`P#-+{b>W1dXIN%L8x3?=M{dspPAQ$R{nYn-~K`G+BB@UP)k74y0 zLYnt5L&aq#(ll46xZ3+xFru3bWDyIdO3-D_8H%Xotl)d2I97^hNeUYW?P{CBv>^pT z7jPqBjENRpI=Z9jR(?vV^^HrKQ53m}ce2qgS+@TKOTOGarv;*dwD3D`+5X8x{(2W5 zoS9n?BOd7YAlRgAt^hGp%R|><=(7&5SP9`e-qO;NB4%VnwxT3fumrk)GbtEHi?BKc z2i3tsD_V8+G=w5<9t<5i$DxW-DvI}W1fNW2m%v3$Hx_QH~e63*5 z1vxOruvaOTgNr7Nv3kK)Z0H7tV(jhh&9`!4u->YEKc8XWY~Pu9etHRolQDH*280tJ z$Z>OZ1&D)+zSnTx4<|4$p&1dbXQ00vG4>raOUiqY=gqp=u?@Yd2V(&vafi>9JXG)r z%?-*}p2S?8-rwErp(ceEUgjp#E%7;9s$%X7mA0=-OShVWTCa*b3TKfW|D z8v(R4`Kgx|>d9NuvO#*q+Dw?)wICly?k_`!^OcGLs|njHt(Ezqi)LyBGSZhnqERA? z**?oX(Z>&-MB{b2B5pARpVK-f$v;Uz_|wZ>q98~Sd>rtzrGf~qToH)`LGAT6CE%&_ zgtO^VBee~-2o{$FOs343KyTbPtgI=n{g(L-kHTc?;OWJzP{Y2V3ygWt;SVwW)O3>$?kb4*mmOS&?W+24Ak*{Y}g~ z|D3SYREvIg5Zzi&ZleY4f-CnHvNrP&cbX3(#Ls}KXeqbCbRT|xexP2P-M^`bjf9=M za3Q%F+!_4jl1Ke(?}N-Z1&~0|eR)~gtIS5Awc=AYnaK)O+W>vfLujP`s>X*-+eL8I z>#$$i*mw#+O>s()fIasn*GLH_$$#ItWo}NLoWt4ecNVbj_z;^J-hMP?_a5g$p%~2l z0OgmEP!+(sk(7qJF1+;A)L^A+2N-Etc{#8#V0hit)z$RbWAQZrUNE3#aaO3CmxlJ3 z-TxOE=c6+K`jVU-cK_(Cu`>f4N3*DNfjA!SU%;*x69XfaMC+Z|N4r{#x_6^H*O3F` z$D#F$0*ZdVA($eI9g9yIdah~q#Y#g))=O>^#XOT6-_LpWt5*6qtf|_ql`xL)i2!T% z(vJcah&IHR#kC*jTf*sW)Q_iZk-=2w5C!^7f}va zHPoFB?lr2+y!(h$MoF5@jlfsO&ohVc@a)kyk7EtZMuIoQAO-TBg(*Bi4`xHISA1<~ zY<%hMT>`=AZPi(>duQ8h`x9%8prXlw6doFb;Pu1$1MHugj!sRT<+(AV9g)BO^>OT8W5$PWEl2{Rut=JM7zkz+>z`>|UwQyh4=9@#Oq@qWg^65?(0PzO2=lW*jqK*;2AzI8 zrz}ih&l(j_Tf7WEqM0u4z8 z^KF5ZGYX~wb!TjE!XJvt?woGoS6>l=N?`Ezz!eZ6OfDz}bL2BbzUx9WuI0#NY=Asd zr=4*xO?UX!F#GSa)`*HbAk)(iu_0QeeV&qb@~^o8 z+@t*qqqBk_p#yXcEGt0(rzEI04n96We-0IxVrB^f&uHgMm*cz`74|4<^+I2wDC*Yg zoD(%G*n~A3r|0F3qIC@Un7EZs_S|Q=tLk3na=t?>SFZf((*)#S8dK}j;+un?e}7iJ zdHRaW{x&pk;IuI~^-~4@W25gI2`}GO#;02aZ@p);Fi}P<`xB>PERT3JRFg*0Q1-n$ zr>=oDp4EwP=$QT-$_TA;Fk7>}FBR+tNJvN^GcgGTLFiSGQoIDq4;N==z<0hu`c_8U zB6k*aifdb2^jY{cE4%P~!&a1^UPQWC;j~6J#MUWh>5!qi>Bw;$y8oT@dx23#h`2X(W9upCWv<@Guo#JM;TKu)_@&H)XoV9A8h*7 zir>8pnSJpm9HK;kIHY3S03j={%by{8WrJut%HEEe@$+$=@Xg70_I{AcgvgwjQT4gYN^axbHT%1>@zku0jEc?1R4A#A zBAeSu8u}|v>>C5A2Tq!2-;1yX(W)wTPJIJt*E}ucMyV9)TibCFapLmA^^aLt*`cF> zo2@joa6JRmpP~=QE#{jHrWSpkX^3p~Mn(BZRb)6#y*+6%i7DaCe~}KK5jcM58#(g8 zPcIA~T$eceU08Yn9lfW?7}F3OAgkTu+z~*G>E}FhJ~HhJ*W!=-VWEOs=RqC0(m-II zR8$&mX+CLtR?%E*4Q3q#W8w2Tt-_LMrI{lxEfFm@y>mCX3r?L%@FntYX6gCu{Q@wf zGOM5C^yLP(-e^JUUF?R?jOBvtKV+{CN|##-F;LsEpzI0xB8a;Oc0}eF(kt04bL4>+ zb6AQ_|JPZYrEHeV-m|-40VD+AcqTTU?*TvsG%~7(%cui#%cg7$eFoJM=z)ge7@&K& zt67X?uuaX#7;ps^eR9gi_b!=D7JHyFXt$fZp+9M*Gv&g{E=ZGzxop1xDP<;)P!I2t z;6NLSXZcY*6Jp_$MN!I>hv02EQ^KZ6O@sL^*X>SvAx5ycN@+E-Es=EweSEzH!Otlz-^9(8zq^@kOt|D3^MoDmB z7$zeRPt`S*m70_-1KHEn_Dzp9Uylgwk zQBx`nB+$Us839(ZpJ3h$;JfLJNk2}_#R)YcKT6yEViVJy^oZ*lYPUAkBao)`fB%CDKn^}JnQLFrmDpAr{Fq(|LM zCwa_!3h;?ut0%RAHU)L{u(#P9`7nN9n_g)WUU&66D|-3JA&l~EZEjYZIgO_}q_7X7 zL30K583a(7fP1p{GO=JX0SE`cmO#|l*chxSm+s#~9KiZ*04U!AIT)J<_2|nF&W?x+ zBOy_pAQs!=uGiUkd3mp2PXW*k{4jv6@ec^di)F~oKEUfA_|^r6`ryuOdt~X&P~B3B zT*cz_lIU+N?q>*Chu-s5N!#X1aQ2$()waoLVZcC2Ky~jW`1kj+0DBGEaS2dVBmE)j zMP#oi&`B({47er<^f8*vBJ_RWvHKr_-k+!I<>|P{NM}GiAp3t6{&l#R_1}WtpCNrg zLu413$L>CCN?!P`N6B%}3VHTt+ry7ExCh~c0K&S-l^>Xl(j-7KOm;mH5Ou*d9>C-~ zKj5^4)RRa5{edD?RX{3OFOds=gvUSr3P}kkki5VMu`f=()aY;c>bScf9Y4m+S;0CK*g2hMuA*6q5fV&<;YWJsf1c3#{U_hApgDN~}-~DnQj^rkA2G997G{{B# zD??8=srrdN39%#pO4Tz7egRraEsx;3PS>%2(+-^Ux+<-c$JMn^CIVk+5zGYa8?M&~!g?K2t5SIC@y~TDnai zL=V(-N>(3Z9;Ih8u^@hqIf3A5#v=m@Y5d~$)|Ah_!`&H-q=I^{1Yva3Fnd=J(?HZV6j?c(8IO5@E+6vp$KniY@vB}Hj_RA z>(b4u>$_+6#L|(Ygd@(MOAUK^Ai|4>&n?D|ad4n4f7M*bvHMk^ zzS-<)aY7gY(6LiSn;Z zj;WJdLj8G5yU!jlfOClj(Q?_%M_GsuIvChS*-j}!{PO`#!A(~+T+6)oVMF~?F3SJK zjl@{f>F#NQB4K;Ef$}%dWZs_zF$3_0ex1l0*dl$)bp1s%sp-v-Hlw`9$LxTa8?U7iFv4-{mhCn zo>;+O?}Gx<11^gpnQB9>!^K9%clPX80FNTq)LeCWIA-@Xhgp%2dAU6?zPm^6 zY^fB0J(30nN+Ke>dt%)8=S&^sNs5b2D-O5c6dZBgez2V={NzYqO^uCM&CINy^r+o* zYk@vn3R`K~@v{p!HGU|0Q3vFx#Ml!ue04ihH4g=bwCkMU63EN(=}w z&YgosDeSi4x7VZP@fHQ_qT{wi+}e1`S~e`_#1=Z?`A6-kwLO+#@LN;*#)x)7qu!8f zZGC!*2qXx@C0kbuGt}QtGuh#71lKq(=2JRNzqhPRh}mAk!r_P2KEb>EBAlq=Yzx;# za<9qSuSWFU;cYM0F-e+X{GfD8)ce(qjne6`lA@8`sQ?e6cNRS+4lSRbW|T^-bozLr zVdR2q-ZB3}e{XS=eT}E=AG-~tySv+{5eub|)egGttfZvO0!T1(Z>{Am4Pm;j3Y%o9 zTSal&>>hrj>fvwk4_!RGMQsL&(|hSb{xyh(@{DP%__m90x3rc_nhuw-OgWvznmTfjteytCPAI{NMbeKMn zx!B*Wo9s9*{xMspm|37=-tF7gC7+AidFL!WGcsPKe~edMzp$ip*K}&Ryl9C3+kG6X zHOH}7v*DdW<>WZS=*9CLwII;1rZ4KY8Yx-o&Ea*P2A9cU_x;$Z#jEVzt_vBn(0oo? zc*UlS)Sc?Xq(4`QSKaxQ%46n{&TiN7>WibspL_?5Yn(K&!AY_5_(*S^-&+{9&gv>A zgY~_;-uD(97`V|KtR?bQTh3EV-XhoGqO7b8R5|lJ7*zjBy&=JV2fCtn~{-QY_?lpKR@q|SY2{~ zWf)xx0fKR0bjw38Irrej3p25-OZlX03PHo_&m@U<&E+e8&fpSJ;%Lv-)>GM-AC>jY!Ef+?l0dr$To}J{`R!9 zlT&-+&8eRp;wEn4^wqZF=BZ}YP`+16vMrvOif9Xcw@SmOqvUf zXzTnqtp>9w?&XX4;N}2WU*NXyp4k|^ICe~qX0|ploqc1zxv$Uh=-}Bp|GEL{^<75( zdv}erI*X?$$~Ib2)iz`{u9}vL^>zspQ$sR}VQ7f2!CyG_Q z(WaPe(T#l5fPb#1t*^wihuf&T;bk}awRlCgtqg%{t~6bx?XRAM#_*SqPCaR?-2Lqx zGh-0%MShA8%J-g4XQwMZ+ifbbIJ~Hv{bHbCEcGmHj+=Qt5aT`$B==8e&^plK;<8OL zHIw*{Iw;4pm9u82F_iD^YK1GtlB1Y6gCd)s`eh;slP+QBMPj_^VFwME;EM8sJ({gp z*Zyv{8b}vzQOyO)=jZ>+m>yY7Sp1YqVIIcIU(kLKuCo|3iH(R-kOQt*xDO+^qwkA!W7R0)S}PGUfON6*hw|ZzP3HuK66WK;raK7 z6MxCwoQ?6_fkBUHfR~Alw-V+J{SQ3rx|Vu``HK8&O{LTM)*~DXra<}dNhTVS7d@JZ z98Zy0HWECg_44fuG4E(g1#)$q9hLjYQ{3ct!)hORNf0l_m(+i%WR-^_GDJORP-k{V z`Xxnu-q|IeZwZc}U7Z52qZa4UvoZ*^`D6=Jwx&{-Rqf{MN?#tOL`{)(i<{I$8Cblg zu-KjrfrKc0!D4EAk-OTv|3;%FgkJ~ixYVyb8;L|F$ZUC8B}1{im9r^ESS974eHyTY z-8OpMK<<(B)HQ%_6GO&cl8=5xXN6+*-DqrbZq@U8!A>ere@k#r9P_TU>aZ=Po_jD4n|m#y?3$J7TbOh;`!cn~F$t^Fd&uwqCbl3O1o6;i5B@|kjdGY}>xH)A>f=!#${OHl6qeE0kkr-84Va!Wm zQmRvA_ia^0Yj52qMt)22rf-au!=kL2{Yr|m5}-!@dx_&cB;fXbxIAw$H6pp?kYX_v zFy=xlKay5*)4i@VK-9~>J1e1qfXTqN$eKS*(Sx3a#aowqm!xjzaAjq((y{i5kM{6o z#U|c9UbJ1D<2C8v)RNE=yQ4~d-5u$#n!ED)%I(o_Hf7iYr09~$3)&oxRJ&m?)R*L> z`Q$rIPe;px)8rqrQ&HWr7clysw3c&!Ou4oYT+Y>6p-Vn`qs+f1FWWY@$%d9BBg1DQ zgmG_r3+LP2ipN7`ze0H1NU4WX+05F(M%RwrW7Nv_^(Qyu`CGn}bKhmqgEhYKp5bJX z6F+G3EGPfgAxZ1(FVZ7neX5-G_5uU$)tTwTDz{0aU0$R0c6-q7&wT`7_2sJqcL zCI$n{PQAPfQ*Vt!W?S~tCgO}MCHCs@##DkP`lN{U`h0zRj9Vrrq6F4zx%4F8N-~=T zBqkajv|$E%IYYk2c6{(CBJ4h-UB(ZoVw)ei;cQc|} zZr8}pk2!p;m=aFv<0ZSd0;PwDhzMNxwz_)jL|qdgPrK=65|I3+vW<-DmfTt zh)1<$cd_@B744?XVcHHm4ySC>U}T)L_gR)HAz;@h!&|{F9|BjI7J%3HdUM9we14HyEQb zYobi1eq>;QTdi;6N;Vg0QEIBkUvI+h?09K4wC!jtQgL91Il{=sd!15H1|$A(o~iDU z#`J5>!FuR$+1?B?0aA3>aj^?&!pnto|NJX=!jLAM$m7)Rn_lnHRP@akX z<`woGdl{&FeU^NQ>iYCo$VURz8}=k6$M$w6Z`V~~QWfX!P75XvXKVUsTW5R)N-9-; zAGmDUZ1uE*6=cBGU#ZWXmMXb9+ioS=Tjsy8q*%^R>>*eloAY6%Pf))c|q78bBHR!K=q;<@)Oy;Nm&#F3Ih zl46;k{}5~4hVAnGWmm?Lt8^X%Z`tpArxRu=e!CW%3;Q+mTinVhqM}EXi|wi(sMPW? zT998$?7d0DK^b8IgXE5{k}HLSZ?Biy&yJytgZ{Mi3+_ri~d8R9!;`G-Xrv!HXQdRcyVsB{>wNK~McjI#D z6CVi}CFoTR*K?~gaN>#yNk95<4{N0)D-u#7x6Q88233zfTK2j!Y*S-I5D_{^-B?;K zEe*V?OeCO7PDsBcN@BCONl+G=MB0+bQ?|MAaWxQ|`=X_1$Ixx`Kr#8l?-59n+)D`m z;z??1pdh4X1#xt);8nhjK1t36=u}K(cxwgnIz;)aDtIt;szpsY9dDxN`d<-RR}giV z!PKQ5u9M;gU?Ao9u-+5eKG*N8L6_lSGS66H$J;w177Jt!0+?p0GN&al3^)Ey0E?P5 zU?O)({>Lv?e9H3^Cu#ub;c<|t+T=+Ii#~HktOs7rV}Z*Hk4!G5Lfh8$YnAP-XC*L{ z{l4cJg|S$}8|)-$D|Pe4C2WgKTDz`B+HPc-Omvnr0|0}{xQNBBB-z#-PJ0Wq0OVb8 zm9s*AJ^6*Te;7ShzrIt~g(gp7wY-1ayhbycWuLn+>FUxDWu_0C=mu&?Y=vIOuq>pq zHkJHzyKa2aT;-w;z-`BQkt|%ja%Ew9x+g(c%E)N8&L=!8OA}+(zgG*MxyBrp@(65c zJ3D`{*MfOoekBd!zUfCPTGrh3D zLp-p|X0T^CUrt7Y1A~yY@Uevttw0S_;z5D!{sN0dcN^8iGezx*zihGx&gju# zDXb`6mh{JldUeE6KR{7%M9e1*4om2^`4q#wl4<#woN)!uA=ORfsKcj??kO3OJIZ{E zIT)s^>~9|@w>sj7t^AdyXw+N1zQaBVxI?G#64X^~_qOEbnEFXcj6Gf|urf7vKey## zoCLhPgf8$12iQfw| zHunQ#b<^JL?wJ;C6&qpUkJ8cst6SbR80Lp;4;Ig`++NK40FqQ9bx8pQ5?T%G*F2h< z+}^Gv>%pr>`=r6Kshx&3Dk7pa&eY=Z0*5P=m3Vp7g07AJF;~{vaRmx4dE4#~yyi^n zDCclq*WbO7b0q2O#6m&Q4Q;BXZC*3Y&PA;)9@CLy!WZyN=HyRj&p`D{;d86Vx_oZ# z6T}Xlv1dU;I@!A)Q6s@3A<=!X@Ngtte|Z*S}MbNbad#DH{QKM{;idlRB3Oc z&CzXt$#7ejSTE6SUu>OKyU4vOSJz7+cG*z8jc?QFCHh1{&I+sM2vg$1 zlNa%?I`-ZD+YWOtSXk7RXWrhiTrSDdtaL}d_<-yFoD>i8@qy{5?Il#bhfDNYuHab0 z=QTN%52ZSowP?9m9C?-01h*%N(-JZ{ytNv#))HG^F?Rnpf~Hm68ebXM7eZQLEUAFD zOL!q(^Bl9UD~frmxfjNvoFmpq1E1;Ym-+@}I<0rU)@_ljv(KN2uM$Y#L+wCM;zWBB z1t6OBE(e~a$}=3=BXN^}sKAGbpQp6@XGRAY`B7G{*1#n8hOGV0Dm5u7 z>7Lmv`z$OZD6ZGl*UQ_}&0-}jUvQ4aFotE4(=Uk%3JPwI6|G|c68UYr;#sdp*^4li zsTZ{tQIpuW;rur)IXFCoF7_SRREcLVHF4Tq*@^N^g5|l>iAd+Zm(E|Gu%Nn0n1 zjeX*bhpP#k^9Xw};<4Ay7o|VcJPhj_N32`66r~pQs};D>vI`rnbh$>+)w2?v6L zV7*%3Xbf6E=2WY_3JaD;!USbjB^1Gma7Fo$GzovDfW;1I=o|SxwdKJr_pq6-nD@F8 zgN6!bUwn`zJ;>6qNqibFRPG`W0v-*N6gg856O~~jMP6~8GIIU9FnjL37#dnkjUUqPN$d)2@97E*+Wy#hJ1P|0+gPybT!?RsIo$0KnQh{v)nov>b1Y|;`F zb9>=A#)_t%SAaeXUQm{tlo=jAes|s&(p!mUJZX3q;MDlzL5QqDw*GbaU?vVf!Wb-L zL9g6@xvWr4zlAyEG!AFT!vVnY*tX0?-ff&L`ms!->eg0)TH3PXqk3=c=rLgo%2V&J zu|14#vdo-iZsgBemKVUAp2lI&iD15aOUBmrFpi(p?v}nA_5zXk9C}ZKzi&+Nfs9}f zaupqdg+}id}sFoublBX`&NFU{~M)fmT8;kya(SLi`2#lOXhhsq9#LXOIO^* zd~md;BY4p0VB!bB$Zhjo`p_jX!O=WE-sCtHQbl-y3$KOS?|YFnaARL(tjzg%3P&P< zq-{mGj+i3y%&itU6vqz_j<;L+sWQL%Pg40*VW_*VZR3b&p`KA5Qh3AB6_L?u{OOV^ zJFCOQMGn?ifm%wJOfrb)o6bQMEI58fP0n$*JC6+DSX= z(dTQ4{L23=W`s_o>}o~)E=IxZ@EAYiavcBIKL}h5jQ&dEL&z!2*xtL~s&Y3m7jfY}$+xm?UJxim7p!AtwHFbs@2nRb zYJ8%8yJTtbVw;`zg`8{Yw`k<|?GIX8Ey1TFOZ_k|s@!oU9wce}jDc2onm4CkpZ4+o zTsyXQwJ`W=L(p?^AHA&X>=XeNw4NSP@H{QwnaCSZZbNd`Nahe;aaJj9t=*sys zVkRbHA3qj${VHBJR8~#`n)V~RAOBi7Pi@;*Idp04~o9-})y8QH7D zZADzh!j|iiXb!|7>^)ZEalU9TQ)S%iKTyfBQ~_xV4^F6XZcT)-{GIJ+nf^RWTTJoX zH#1~w7kQ7y$__~fna_-k_-2C7`*I#K($N`BH3Yd!?C)$)1i+sn1^5O2G6)&5j_0;7 ztWjQQ)LhU)L0QhxkC!YA#j1;63l*Jis*W=IJqG-9>*l0iJCPQM= z6ei*xHeqG$)WD>2FKLmVwosn~FIHeA=oJeic=@bnT$rlpUNahs=w?ln9aSsBab~%( z$Zhxjz1MCrkKwCRsmP$6eRJlmIOZ92zc-&w&yYoTm6Yx`=Cg4W`glpfyGu@{e7S4! zG7OE-<)+#qc$lm5UBC#w$b?|NA6*ZmclpcA>?u1g6JA|RSZHW;RLRqFYcnorl^W>m z>=_)Kv<(?tdayKE&5!LHgA)=M2qVMkXYBvJv^V0?j}e!i7>|*VU|?pBJcw>FiRRfN zu)C+XSa+U$7OR>9Sq>F~hXER&&Zba1))mBqVoDSK?GxPE6jIbcEV_>|DDzLHQ6ezF zOS|d{6)wMJ?_H;@o|MI{FDri$x}sU|&#&_<8yZT>H5)JWTA4@>@zQQ~+h3=kxT)6t zVpj+8JCvKv+gGt~5E$?v&NbP;v_Y^id>)OUC4P+GX`{W*5xix-di&L(WKil>XoEX@ zD4mY`kw0vt?knMM^tTOO6CbCIcCDmHUj59UW=8-0y%MtBefI0d8gTi+K{A$q-;|Hr ziQH6p>)RL(Yq`4ah`#v?23^80+=>kiyJ7w0as5xupg%)A-+>qubZzj)`X6r!@gDAacgL%$%7=&P zRa51XaUs6~vlENk78VPyUYT?xX40f3F`hl^;p^?a{p}kjW zKJL9uK7vMi?eiJuGpWeP?6wvbwTq5wFwL)DWhj#0nmQJ7nGPMJ(=OD>4o8!~h>+DA z<*4JVYPapG^Wg-qf#gaVBz&TiH(+2k1x+LTMz!bC3gTwXT57J^99SRhpzUvS4-GQ# z5~aMeg$^#f3)&TYelbE0Tf#hQI%W3NPniUZgQEHOXY2hB%QNN|vo!_Xj&}85w0sG| zw%wc?Ye;IRo+6@(;6s*ejotU`uio73x&7(s^6+lj_V%cvfcap4vd&d3Qo+eDA1mLz z*G)3e)YsB}Xm-ZVjzbwgROnRp8BM;7w{OGFA1@Zz&$j8C7O8u4FI>T{A@h$L3IYYp zOFuvIzQK-r(^ZdeGcyOXvh=&)ROZ4}E<*a9T!&X6M@5Q^xo@V}q59 z^Otr{xzFVkvEWFw&hve&ez21JcHr)FP9Ar>T@P7JJw5qGQ7?scir9tA?MC%`P8DzNxDW#_x{0|A$@RT04`^f#YMQ_a4RymN z*Yo#tgj9@qPCKiKM|(&H z?Fe}c-Q&cdWE}3CrrmHU20v%8>%=ZMvn*}Kys1I@IwqR)xCP^V(YFPRMqeF`KEe0T zg6+RhE-dtjZ*LA?QJk9Y>*HL|N~g^!*dUap*%Q&DqBaMmn4ngf6T!fh8ENdbGTZk- z8?VpQ7+#iC!PxLz#XHT{h zJ|XwEU(B|t!Z06I@3%!I$$A|3^wiW=MeC`4%Q&`SY%$^t`E{vC*2}W0n{``DjI|XR znRWnL&qOdM!X8`X&|7>KF)5AqJT}xXZV_9(W4s2vr3LRDA^wN@ER$F0;eGNr!8?d7 zxC#^Ph}1LKEDZrAbF<#_Dn4-Z?nMp~Re<~Qjz>dxDggEKB&;2X23C!EhF zuK&*H8~W&nwf8)Fi)Z(Zf;|+c?KF~;3f^`E$HM;7fRCDz^3YkF1(w*?RPV;JGS`Fe zmqYn^xz{^8wTp{>d>OhQ^5D*Q*08cNW0sc6#l`QGv?x^9ki+(#YX{Xd7UlE(;!^HF z%7K!~iV9{94h2)Ri{uW{Rur24?d|!-)aabN<0-{cZiyy$N3^=T?-v&DR!`LBfuY1( zw^d^ZI#O0v;nZGyDjI$AdKp7kF7nb|<*%cTjO1oJH&r`0$e_$<#Pw#^ZkXGxg#7v)Jt?Vk z)0C8L*JhLb@rFKunc+i2!^_ZMu-V1uE~M*eXnH3(jq4dWTd}ygxC&KKm5wMJ%j<1MCz0W>t*kTOxn zeLailR;em_1eL(8(@;s?xig)b%5ZrD$Ku~%Q?)M9F#YM%*B}94j=z9-laFjpokVO1 zi_aPw;1mz`C>Xe_nkFF1pfG&nvgbQbL`UQo?eDv-SOUYdA`g)YA?@IvoTNzPsQkJo zeUhv6Cn*w=1cpn=?>}_PmuU0;A2*55JvV>;_lr9_lfJODJV!_fOg-uE|M2YD(8Gti z6X&mbJ{MA$4T65*1x6XTcvbbu{d_#!|3>V;G*pOFaqNET40?*dPC`OTinxb3LW0?g zg7{S@U#=HVo+;x0SD8<4AMw9c^@xY|{fl(c!@Tqcv!~>}ZNy(liz|rbiWqqQKgH=_ AfB*mh literal 0 HcmV?d00001 diff --git a/docs/puml/onedrive_windows_authentication.png b/docs/puml/onedrive_windows_authentication.png new file mode 100644 index 0000000000000000000000000000000000000000..d87ba7490cd1cb8986d059e52b67ab7e69dd53a0 GIT binary patch literal 95227 zcmcG$1yq&W);EqKprnE{f|Am;=~C%#1eEUXE+qw|ySqUe1PSTxZt3ps|3c3>_j>NR z@ArSdF}^*9Lk4V~y`J^VIe#(d!dFT{5D6X!9tsKyNmz(a1`6sy4ftpL@ILqst_Ri| z@E?UWzoNC4nYn|pj;=M7ppL1IrG~YR_H#{#=la&x=2q;qwC2Vdrq(tl#xz=HCbqqu z1W-`-a}4AZt^fQ!)ID$=`}l1IaYaICzg7QAYJFU-m)dZ zj1gE{UVHCiljB4ejrj#(Mq7!G@#x|HO6`6-cX=T0+isGsb!yZQtL5Y;{>}P%S;(#r zBR1d=D&fBgP(5asNwy47xrvyQv{Cqq?cnFhk%EpmK*jfFQ#^+;Mfs3|kCOap1u9n% z&UflF*4@vUf%Fyn+=^?dlF&Ul%#V|jBIsuoaBN3xH0-%ON^eNegU@d_?~nF3_&Tc+ zjj0=>`2N`IVq`GE`cZhl0KsVT)7)!JiY#lNz=q+?mJF^B8mU%gPu{g%8asWcNHIHl zldkCVF;HTEE>{Az3`5`o-(_HG0!2Mes2MA&_wmw6C^Fen6>c43lbnktq>FnpCR7@VauMP~OfjPn`M& zUp;L>yUq9mmCrCCYOdd@+slE_gsuvuqW=GI8eQ8_l@_Cdmb)Jn1a=qqZkWIy>D;&r`|@+Mi_?qBu~a z<%asZ!G*dKnwB997Al`{KiNgwSCedJ!?b>UZ-&5h-CNT=xt~!WuCiC~p?9?BGPa6% zQA9cN4;kv3$YZP8Mp{IH?zc7~JU16D7ojw`i)T0w$1fOS&(dwcE)p=cZK$rp z!C1fT$ZfpmySiul6dqdFs&?sA0QBnjmDM{uI_r1YB$;g}E@u81|c?0lrff%ADi+a7s^$H&Z% zde~#3y3r=Qw@~Q(}RGxFP;TPw6GPGKSU_j1`?hBG!M ze4eW)#XMiH-2AGYx5>eC(cr6ROH_%uYZv%f0I_7EgDib0H5*ZJq3Sz+9z++ zjDQ0Bn>n&9Hjduzc)dijFtQNm*}~l}qm^#9IjfcK(|9(5!|vuLSdbq-Uu!>JkpB5K z6#6U3T^`=O`e9O^{Q3&&1I^=rKfZ*r{;2-|YM7t<8nQX^_I7vVZB~=|PMO=4*2>mI zVN`hdrzelVTZh%3JZD->KyF$uF$umP8Z*Z%BrP9(V|qZPgu3 zFIpS!(=e_3^rNQK-DfjleG-6rl>nXkM4=WIya(#X=zXvfsnrkqp7MpS@7DLV{KMTM z*q-BPj2Qh~nkn_q&qMwspm>bKM+7$Z9m4$|b7>p#nbs}U?J=7y zi;0TDEa!76BYh{n#^v$bQnE1y+KB^NG&}luy3iOI?Ht0b_S9c&h;D~I-Nc-^es=F$aipz472)C+;%gaEsaKy7S2=Mdw#YHuuB{?(DvsdTv%99qm zyB}`wMQt~sut;ekf^=acrqdUK7!m$-Qi#fC5()$rMYZxKWFT#Me|{wq{4;6=?n^o&4!3iB#k&(EkvWy zKO{uD%;|EwV5M8Bozi8yoJA+v)j+n=CIVq39sjAgX6JZ{c7TD=c?13 zvsJahknUnDSDUSpM-=UCw#%i&@y5CB);I~J5ldfuIK+SQ6}9iDPwBmE77Okr;RltO zbFS}(OLR6hngqxhUtWI1jA1h3wxP1)AFaRDa=GMwSvZjJ`vVmiIGfOO+88^Xs^f~1 ziKJ4R%~v`+I|)gI`j|i5sx6RHQnD{fEm35m9a={n|L7pB_WHyrKSa$fq0coj zS3-L7<{Cy0hMeVv%fXTxpWs|;g_6f^HRZ`-P^Gqx%SgFV<=3xH<0-3&PBGTc(dWuH z_uvSe#bg-i_hEF%=V4lMWYvhS^ShI`;8d5se+)~Ph@$RNXA^F|xV)rQt&vkr<3PGg z$!ogc-0u+p|kwzB%t}adiE9pVPX1V^P-Z z=Bm;jRwgdpU*E%%aCy4sTvW$G*mdXqxt5%b-{8$*Qh{~&gXmz1+|PqS1hUTL*PkxeZd9M?^loHcDtFv z5bYmG<3GTeG5GgH4%|Qw=!$ZCij7@vwVL91>?~|*;MWRvV?aQn_ca&KH#-82Z(F@Szy3*<8&u@suGaH;~(yGV@!j(jv-OT26S3XZa1JlmNG98b&~PfvZbB z41&xE_ee%QIzb~gw{w{)>Wj)1AMad2t>a=Okvo(?&&UWY^@Kg2K7y z0}_|SXScpSQ>%@plcy?$45wf@DI>*-7$z>v>I0528KpCEab_`w@78~;T}eHMKafF%8b?@0A{LSX_Iq}A zHvLr9c^{pN2L@&p4NWbXb4yacJF)gdhMw-u#z0zDi)P9Zjf$%%j!dGOK4PUW+U@LD z3(u&iD4)@!>F1XRUDw1S!k~?kj zEZ#aqoLpSgkdS61FV`Ql=(Na`*rJ*$aslL8~}V>~*{s8tAaDm8|v z>y1)kbWBxO8Xogto=dy)?bLu%6A7F%Zp6-#eT4!OfRFXy=-=2z42PLs=xR)s^q?o_i`7H;ar_;tz+FY4%?2-$)4H}=j!Vd1#a$Rkc&wU zD;a2HG^|u4IbP)(OC1EHk`k#S=v5}utnL`{R44_{0de4RKtM*uX5F)>W3^ff9#usn ztr2Pr!jmyt1(1HwKGhL`Ar`I&kncr8Tn*Z+%PFsN*ucOOqE=5YrUk){i&M9JvpF~i ziSu<)3Wk=uSY9}YZ}o=ew>LIMiIkC&64P3p&?9q@6H^fI{!9l2N^F!EOkwQ_Y=@kg z*)FL#VWwD&l60Y|EB8DdMD~HxkB>aoyRMav;|Hr58?c+=j#R4vBb~=o>JN_#BrP2$ z%h;w+d;GcE98CZ46^d`3#)CnF#slT98Fu%RvJ&lMzVgh zeU`Z_I+WRiil%0+C?bPkG$Jd>g@!u#ob6nV!wz4$p4*?tB$-h}Qccu1Pr>CNZf=?$ zV4(-H4+?=e*WRvM*6I>oJeSkGf?`!Smyjk~%dQT#AM5LE4hKAt+r>EzvNEHwk2qoy z08$3;2i|@;26%S)6YaxS!;|XL31t!Q5}u~dz?ilF(-*Zlv&)nBB1$FN4uIAU4UO%0 zbhZaSfqkws8V7H?)t*LwbI>JAj_Z(&*zKX}nm1aO23W~QXtLUx8vQHybrD$>MYBOI zwm6o0l~NOdOeS5&%11k%bL2P2h3LclZGj201PrGCDj_tqMMwEgr{9Ei6)A7RZVD=A zbwC6rZc~hD#PUdHEaOa(ge)#D@*a@$+~y}G&BZb$9mFGG)(s`U=5-);sX>LTfv|9?&<_|`Sli9f>kcwB z1Er3^0PFi|A3~2ePamtDfJa)hC^tGp7_{y2xuHm8?*-_0RyiC-buNTnO;=e?N@u+^*`749wjTS^#ObB$@c}-Y zX6H4V)=P1&^_LRM_~WO#y6}jcufz?7${Z+X%NrZC zy!%oX#64+!I&&BfK!LMk_VbY5et6Xb-oHu7`;m0BpYN{dRe-ro&b-PodJ``f8Hfbt|qJwza=oIaAE-i91m zuyjTloyym}XUcu&XJ=BYl$#Opbv1;t-Sdt+ljXOhWV8x>C`Ii0g|*BaL|Wxoge;4o z9QiW_sxEyMi+1_w1dQ}t+~?%WMHsaNaRo}HN>xIR z%MrJXTYKT7X5FS2&!TiWNKSe=2#Us>;x``XdcdIs1nowJLpT^1{6x8ZvS zVp+`Y`EGTZ5mKBiu^80=w?jDCyp9U9xk>Jac$Z~ssyQf? zt<5hJtK9m=w-M{T>yy|BH_1*G`!*6{Etfb0Mc?IdcIYDfZJZ%nDwb<4vZ zTg%I}H1Qzr*u9gf-2t{xpR%$D>UR}^L2e(OG0i^e9vm7Q7%`ZsrcOYRw6?!E@Drmy zccfusgYjpJ9@J-upT8?{mc=br&1f#~n@Tbh=in zz36}L?&j2KLn7BM8fyKSW|H8!;CR8w?Zc~2WrWlp9FNxc3HhoLqgnTN18@cH9UL6= zueagrx(@qdnQj9)CP(sCaUVbKuR2sI)uW}85m7~;eoQ7F`^E@UnQmZXy9HBC!$Dtz zFGJttu96K__kV2yhJ~*$Z7}7@jPtC}3*0$$g||*8c-ZVmz6u4dyj%kKrr)S_hqJTB z34>x9B-91;FtM?hEe_wZe2%|EIkCFLIPRIUGtUv6u(r=#Ro7a~BpE&Vm~ljhrmJFu>4DfY`g1R4%ufPX`J^~}S`qG-B>9fwu5p6Sjlz(Jl>XGoySf{>Rp%>P`RmqpT5v?Si7m>#2SQyRKp+Qt{AIX-TOBL~ z8HR#=aGRQ%x}M(H*m%3=a|1kt65h^vm2(j!Rr0%Cu&nRy;?O95prhl`VoS+W^_E+A zNnzyXS#0THimdL=npIZq@n}gT_0(Fqf@|EEzoZQ$c6>yE`WDqnW6)NgR8>>sd0yJE z4r7VPd_t4Mf^RYBp*K>JY^RD{3w&Z8p>BTe7jq9bbRpQD=5h0$;TPKAnhm)x)j#u{ zQ9H63E+5Fdru1ZZ`w1w2`*%06Nk3Teb%TRVYS+oXbjE@DHDIvT*2f#-#w@@Q@G=G> z0Qh{0o%u>;7Z;;Ej%7kBqsbk(>ZvX_=VK`;8mDu&mq2~@bg-SxI^}U#uQ{(KYzN`7 zrS%$5D_&NbObJ8#R#O663#>5X*(=*we)kxs*Og{{NC-xJWc+V!0iyt!HF37z*Ve}K zJuuekmeD@rIZocwj_U(6w>VqC&=z%(zTw3A9>SNjeEFb#ufYdq!tU zBl%PUMrI_YYkLf0^Du_>;(aLvJQmTD9e4T5B;2AM@=&RLIuj<@(DVrvi4Blrq)Yh25&G z$^#yuI|2a2M{9sn;I_!5SS0X9WBHK$fj64xywD6R!)2}&U<)Z%7>l#K)q!1xS?1RT zF5Qzx6{W7runRubw86FXuY60bS@_53^V`E#H0z#fyy^E-WK2Qf@LO^fjxq*D<(|qi z`WUX#=t10RwzOpN{y3mQfWt4QFsTXO=cS7bn;CI-*|b|fm;$ut7FdOHp0?|1)wenC z6vI||ec^;Fvz_{cBsY-68jh;FG-LzbV;Y_FLG1UWY-$w*D;;Xjo~{riHWG>-HYy!E z@jgQrxpkZ+4tPRpQkzS{SP`N=YDF1M7wi_$0-q22rKChJJaIyLG4E+Zc9grj`_Q1m zKtj0bLZkGMR%`ACyiZufL9;M(e{QU|qPh9Qf_I)%3wJK6WjJBdQZq25?9mVAb0CY* zokGUhQ?yl`u}^3^ZjILDl4DYtYcoYC7-q>vb01;p5xw8VGn)J@fI<}u)#id)A7xM} zmDB)>XMKH>v&{YYmu38_(-q{v=9u38EvAM8@YahAN63J>e|fk!pVkDb%JuG1e7pxh z%;e(Ji%tKRtAWoP5)%_?>p3GzLf|^TbFM9u0k`~Ak!a%fxnbr*fl`woe69)RbzVzW z*Q-O$^x-D4Dmp@k^ZmAWz*XCrEXN&-BoYkFkkaZ&D@wzK+9jxs9NOF4-+%G8OnhC= zvoY4;a@*`%Jy1N3Dh(#LH}^YVrO>7iA(69RFjr6;99s3&iZRLTgYq}jYm~Kn zlavPRfMUkl3@r-5O`!V=E*EM^X;q8b5PoLsHMFOBWslQ&tyW2Wd{|Z%rJwP{etdz| zC5#PlCw%9foF~TP@R&$s3VMh=y)Aflg4qEvvl9P_|qPoUJ4f(4I1{)vq1BHFwDorqotZrrR%loMxuGaJW zYbonxrKd|n>$82~G<59F{?<6Joxy2gNF9+O`ii{%yyqD(HFYHvdslkQqO6lw_MUd1 zePBSv-8k7)(W!kn)@3#V|H8&^@+g)fr`#aE@{fB!tOilHO$se z|Kbc3XTApzoFR!C>u-t%jfLos`rK$n51WW~`(IG{+Qi=2!}?j0#=Lw*zPi$*M&U1d zV6}QxKMh=tI7Yn}NcF(ze#WKDXVTj{dkJdty9#4opO+A>r#!0itprnJrIg_URv0?T4>1PG z^U1I6&qd=LO){15$PwyN1_q6D7U0ds@2rLgUZGA?jL~%7nfJ|Rx?t-}iXTYxbQvvW z+R020zxAwG?mcGzND%4?&v*xpyRKChTFffXa1uGa)GRvMQ}=~@)*?Gl0&q|9>N_{i z1`rV&C@gQPhtrNpwsp0Qm%hmbU)K|zs(}JQgb(?~KjU**Av2GJpoXK8CGkU;} zNGphj;+ZWKo5|8J98gR_YT_^w%UCg(H9HdWZ8?)A*g*^j z>T(?Q+aPWO{8$!AI;Rhe9Uw9L+c~~zvqio?7noC>0z}^3_i;opKyr`{4sPN z>1X!xcdoC!2(9Ok5D~R$O-^@aPWRhx-{2nLRIMk3>+(Cl+j(Dzc2+?w7k$S?9ZyB8 z>4!$7Z^%gM7eYf?kO#C$@K9GM>k6>3+znO$^+};=zs}2oX?Vxf>m$IaZ|h%uy2M3J z5G^vaNrM@i>!8xEZ+P7A5G3(``@p1(gbV1kLVcNQYHJ;8%N}M9y=7`qZpMW9N=<&$ojg)8M9VCSYqLFN-{}UyRS2i2=NPCG%Lg0 zYs#ZTov-X)?tizQGZwS)HA0f3hH}X?PxHJ}?2b#V{Cle>PDTYm!4-YELD1b~eemec z!t{hZAK0a+uh$Rwr zVbNqk)Tl$PIRBzc*KU|H?9F5Kp4gtH+q}ZMdGf8> z()#Psn`nvM#`R-wa~k)Y+DDm6@sg7VxlDS-8ItEC16Ez)X3q~r7l-9BJ`*sNh&<`! zJ{~R&-%qNc7N*_#>6G zoXaZ+e&wvcpc*Fs+Y`c?cU;ZB^IP;(MxxRbXw+iv#L4jcyM9;3yOTwv9(9{D#dtO1 zlQ6^)tsONtk%oY4#1E$553V-)wQpnEZYhO^ZqC|X0A}ZU`t2*|$QFecYbD_`X!9v5 z64j!Zt?`?;^vTYxWVEC65F4;L)d*8-%(=Yq>4}@`Rn=cQkJ22-#OQ|T1ghkX%Fn1ZXs~KUTq04n zDoFz94f72UNv?&@v>ra z4SmDR_NOER^-rMy``xR{pbK8Km!a*1k31;FRkFqD&&^2 z%-Z{h<15*=Ou+!FlNtj7J)L>axHEE-pU&jA{`OSc-d^kAmAvdB&}-)}gu4os3zXmMBdQTtMDT4}C z?0ujYrO>TMJo|2o%rM-5AhH%Z(=EPmhbIRNZ>>hT(p2;!W;J9u8yo5P(|> z+9xDEphY?TlLCcE6~BQf(1%`N!7*^0)(4Xl1R!N~LumMIb)=AId5dZi#ZC18fRpELp?(( z5n=$}Qw1rPj^8KKHT$-PBqvKBT3k zMJbTH4we2q_~PUZ5o2s-&cs$!DyQ^>rSO9xA$%@JJR~R{9w30eshx%4I#O3g-r7B1 zPXE|r`^~OCuC`x)WEZ}n?#hxUz!0Y+oSOZjqW;FFQKLmoFrsWi`KI`oV@F@GlkNHQ zWUJM0_EUM2|{_ZOiR?2 zrA83zdN&EQb&_H?pYQF&Yk+Dtt!ROAEj*R0RxYpC%8c4=Svd`XD`5}`P}6BR3fyR+ zuB>w**qS)xP8r5j+d6!C+gV@U`NxB0l<|NB3qJ-;xmsdaPqcJhS^IvO#3OB)!CRc? z*rc&O5GxV*jTZ0fYsa@rw2j9Hc~h-oTmz3x`i*j%{5~HEqr-fz#*J9799@2#4+``0 zU9QIi`$}B_a6SRc`sw=m52h+p729&4DFp0n3ODil3M|_Nm67Gf6Ux^g0%NY2X?-Q5 zUo+}F|2C2PQg=yZeo{L_6|QO8O#Ufd(YvnSoGQO7*Ym5xFgV1sGYPwR**6y z?E9I4(ZJ(*E%qq|naq6dmXdPafVDJS9z zY?{*?&n+96(SreIrurxBXj$(MIYcXaz?m!eZS}{b?TKewCAf`dv)MqzLTsfbH_)}U ztzcFEFkw6s%Ve`*ZFAeCE^jC-T-XzH=qN1gixtA+apr09*S&dQPVxZ<+8w7QO3js= z^d8g)e>WaJ&dm z>UiB)-?^R-evo~x4h7e6Hv;h;*&74uJL^AO2$34?%_S~xErP5 zuvfwQrwf6ppP#RvQ;xsBOa>3@kMBaM+|7;rdaVHyc5Q!D$KCJm;#Jd@e|N6;)tnO}j^Ml$% zty3kKY6QJhDtx-u*;*$s=_D#D+5+9H?uYj4onjACpR}v9QxwJj`0?X%xBm7=gNG(5 zizeR~7&=NgNYBXdg_Cip21B{DH2WT3+29s4k7 zuhrCWDlAP`SwX`gBf`VOBOr*1iD{#TEUQ%*D{YPzxVX63?M!zVrId72&o3{7n3fX} z6Bd5ahvDxf5muAH<)q*u&2 z#E4jQ$?55ya%$k#NX4SR4}w0H@HUt_&B)HqephLV%%7H-ghnPI})(fZJql!Zkx7*BO|<(`@;(%`kXXEgz13Xzuh+|JP%h05h3sr@fi`ubuR z^?+In3k#!FscuHzmvKxDnOs?1EOj{4ospevgo z?da&p$M>Mm?PyI}$3kZ+5RdK6>Ko;dEOQ|-Fv>V3mvQ!`M#hHdo;jH#BP`U4`|sK8 z$m?$u4aRV`rmGmKsEj+q$+!CJTrN2~kBNy`S;zbO3iI=+SXiP43*T2YmMkwV2|dNh z*gQ|+%zIn7GxLpQedO!cuiz~@PSI!|4Tds-j90C9ol&@fB|TTFw$2fk7oou!nHbjN zqj*cpGsoq2e}AQmi{nkD>O=ww1B3P999iMQ z!IMNTr|@m*M(&S`o10VV_X!9HPKLy5b79OJ9mQDFA}HTd5L_5w)Z1<=_a$<*1>m+J zPf*~;e^D%Uzh(@Mo1JNg zg=SyJ^NF4Q6kY=7z1Jt*VHQ_s7VOm)ivp80R8%#mvrf+{8tnhuU1&c<^|HUcKD`^| zl>HVO8cMI#>gnV2j_Phu3Y3hXWdK-DAK}k$=7iVTZck!RD@(DHcMK29C4N~alBS85?&sT7CFkZ*My>}q)UQy&vLRq{VzB%A#? z&g+r;^3Xqv>33=*Fe9P-C$4hiB6|GcaBfeMSgMg zahj43S>Dan7PU`E9?AyJ0X8&L>EE!oqrbDO%iqrr3^(COPL%3N-rigTv-Z{{EDOfq0Jn`ER?m5u^Cingrlk_OSs?}G2pvFLT z4-G}#hstge(nW+4d+C98F>CtBToc( zUKgtB;AfOX$TsmWf`G??=+haE<_``6F~?weaK4)4A2Zz!jl%hgi#F4p{j)Zx%6xyX z$~N<$L_kcZ?LqExW`Mf5B{_m%Zj&A^97U^!&t|Euudgp8)c)xS5s36oD_w2vGLxy% zxw(3yu`i(E9Yv#(Y9fL{r8khu@9ySM3$9``Q(azINW1>3N5pq1OY(Gkiskw95AUfJ zi$3@Z^jyK>=5S7c$KNT|8F__>$GW#QQ96y%MQ1i!v$m#x@E9GPB&q9UcTO^c=Ka^_ z_(c=0SNmo)FJHc-qN1Xw5AoGgIOf=`K|@31El8DJ zoRmKdH@TpW=djxWzC)Ycz5Y0QC_2pF!lAuLxqz5c$h25xGI?gPEod&jD=0oPF4^Qsoihh!?dUaXVFNo4RjJ9CofQNI216dm1W zb2Q+O`-|>&QT`dxjCp@LfsNZWu==NP9 z7ghdhFhm*}<&60=d4|aWS42E%-)tjr)BbjOvMswqA?@F~u%HR1?ys(#BQ=`5k>=;; zIrTI&UWoSf_MYtTOFL$ppY!02iXQlGZ^(<}`j&vH>7M}Bnf4yCvCJ|qRLiWVs|!$v zk}>$?@Z>~7LZZ%kJv3Me&2TVVIxUvPOrpi@^xNDscI!{5^|v=plNqy8D^De;!Q2%g zVWWa{IlIbkI0^N1u@+nv1tlfg;7`u@N(Bl~t;dNFS1>tR_6{8Lu%h8G9B2<$6@qDK zXmE9P-9q9j-yyutB>QgMx}o#UF=Az9MdkuTyJAIrJf1?1s;ftU+zyCMc4p=ZaBEA= zn#EE8EylbMYn@Irc`Ym~l$4aZJ$Toq;SjMvC53y}GO=H(o-G*wNhwG**|4satyr&o zEcuJXZZJF!c-0z!%K|DDam^5`)ruH@q#D@2)Ko$guOD)&ayy~;Q>A)A3@YC>YK`9( z%G8ntIG!Uhhw5nstt;d!Hr!mbR+vmFx|XA6BFbh-AR!@mc>N-gg zKiL^U$&>_?#)W2Xv$u%)0Ir#&p+Be&R{Cr~oXkZI4IEz$T zJHp6>vx+e>0ds+X5#YJ&9lNc5Q2W>TqL&tTA+2p~hq9Z~Rfo;KXeQ{1fN41|ccMq$ zOk4HQlf`D`kG)A%t9Lz<4S41IPvz%;z7If#pr9avTULmiprWFpy1KfUShwdRwB971 zLUJ5Y5s}u<*l76prp1b2h=I{;mNQO-)PhT3YOysyt3Kv^jXix~AW>nI-cbIq!WTy2 z_c`^6xiNFMW=Rp~JfM!PkK5+|-ik9azHdwF?za~!q|$ns)cxFQ^l*ZNbg zD06<^;$X&-!gVT9cGo{u@+DrQ;cRL9emQ>5^QADrtuSPJUQuVMXo31}R3a(uux$-a}n`Yb-;P=(|>|u!)7BC6tdH-&>%e+bq3<$BOLx?d``{(&|FYZ z&l|0^Ky@eT4B@eV04mL^ZCHS+8k=vxskk(Le~cqq;kNE>F$>tc#C}x(eh3bnwF9OU z%WTrQqzO7JpdL?`>h+O`M*a}KQ;$+`chlui=4fKz0O-I%qSKDSNbbnl{-PZ?_o#c9 z39l0FfzSB4NLcdLDKPs)K_POW|Kz{GzVY&0ebr(h2OhU_^M9&Cb^Uu#etv$Dv}#|@ z^`)Wy;TkD0|2uvCUpNR@_|Mnhgz+EWeFcV}{`eBA`hQT}-z5Sr==T58r(Q=h-|U?1 z?XM2HXx})ZH{@Odd~bnn?dL$qqH7Uewy^5+h?NY8rGG^Y`#Y6*kE<*u`XCZ;)q{Rs zlxJLya^2eaUxcI9hq7dI%E}=7AK+h;D9Avn1xDnn_4*QTt}lJOykH(YXl`yMCMCtd z#PpA`HZNFxP2E}!n33M=-(86@??{>XtGy-^ULWM`!3_W4)yZG8$cgwA(=PfO94WAfGk4pS@pmkosm}0#ZQf_Y`O= zPa%03egv(6BJ!UJEn8UgfSp zT@tCxl1$o}sx*7`>J`94Oe)3i8C}a=QG$VZ*f=;isHhB@&7VMGTJng1?0<}f1;*cA z1F!^IxQI{O!v5yU!OhJr$PX^dAA?$R9UB`P$cGGG(8=uS>eAweND8uUCUDq+(PSzs z(SA0d^&?PaWn|hJ4cORW4s1`hCPbrX3@Sddg~;Vj2$2jsG3s_dmhBp_Hp;W^^Oy`U z0^+UrZ{qDsElqwngt!_g-N}M?EQ#D;O@P5CxnB6nTpX?Qb}j)30c8W!gLsHRXu;LI z_VvZFm6cUYCAn;7b6XpNDY+LW@#_0(;-5nM!&R8~i&ZpIv9oe<7G8&J`Lcn$x3n^R zRMN7L(04sY06{=0EiNmw9xu`Wk*4Q!cUwjcxI~PMr2tmJzNKx09S5J+tw1x?K2H?H z*_fcuy&ZmcPsklSomK!*UL>Tfk1DJu!hw+sKx*S*ev8B}O;1II*Z{DQf+LB|j}m+5 z%s8D+sJ8FK@^)rcmLMNr(KcW$S3s#}g14E10Hji@i^n9(Ab$G3+z>sCrlq9?jDnn> zpHsQ~Vll?=9{G}hV@*AAEJL=2hK67)A=-W9sjg#caZyq5wyvID7)#vE`a`_}i~#Vw zQ*1*OmdNo?!)|VFnB~c9zdWXpXOVeiAlG@g7{mj=&U|f^quuvYg?yNNz(-{Mtej?0 ztgojhV+OFhq8UIrQ1OkOK&+;--(1e!n$A>5gl9e97>0?l)}Z+0D^K{Rvi_{IC>F;| z&A@N~$Vi#X`XEVXsr9-{T&{!VKz47YDLTiqo-5~5;KYD=*?1l|?kN(5Le*!#RI?%I z6{d6#%W;uO$WzofpUv3KNo&4-{gELgWmO@ecN%BQ;b#eviUVhUzi3H8;DZVjzP#^9 zf`p3s1qPCyL#)^O>%n1!Hx{+4Dj-bCEG~`=BELMkd=OYzXch+V?Cd-)$#Qsbkj;8) zy9QPmGX+RzHcYf~(0W3ku4{$U_GCFw*nzk#fzJqg4ek8M+iz1udAyPvwhfAgksB=2os0>>CozrJW>2fSfql6640 zg5zF5akj{Ww*V#9gV5UAYRP}JmKKDJ&9Kzyg-~JXRjSj4u@k)CL6!stYeQf;-Zf;3 z$7QpU3mo8~qK1Zt^M#HWmbbRF&{zKC)S&v%qo8tSI_&kiBZIoCKe-#Vs> zso>9BYb}EbI11SeDh>|2=``!ITHfu+lo`3zH`{G{zY^9CTp6YfGyBN!@Xy$cq@0}7 zn#E;zXXa`gYFItWdH_}O5+f@B_-p{^+^0kFM8w6HO><1Pe4>DR9Jxl{Gc?fM zXKT>Z*B9jWD#E4RdNf}t0Ea2{#BOtxnuMf91NZaWT#SHiYEe>MF=k296Gg1PAv6 zLvZqO&^@RP;!sfR(f(HKkNd0b_ZNUq2#8T{dwY9yG}cRiW`bN=P*Ui>yU&4^`XStS ztmf)~rxFnorlzHVV~)@*522*o*@9r%gD2V^zPRGQ=6eyPy zreL9?w>$;>VeLil^Sh^&J@N|Tu2u!s6-pusX0iu>$pwP6Btrw7?I|fOZOQ-nOu?a5 zXg;FH6W}3ywOBN*zdivyR&ZqeHOnu~EfodW$mAYeP&#fLvkCkRIAjlAuV#Q3JRVX* z(~Zx$unUeGvi{Sy_z;lRfzJow1T2DPXs8d>K22cvWdCL&{5RkI|AEi_>z4oXj}-j+ z{+)eaULJ_0b({X2}<9(hMT z*Qz^PKrk~ib3Fo^PD}*2Iq#b{0-v!gjyL2ZT>(OZAb@@F0L+lOHn_ux(LVYT&Mwl| zSL1L90`dC#8k}ax_DWZd2-*U!S91wDFomI?vD@z@XJxH^god-f0CJoaqy&(HLj%BD zU}Rx=OFN*hMeJ)hoVx!vqr3)~LVc~(FKKBD9S&CxSNkg0A-*~wKlG<6sF{+1QzGYi z370Gq!p8wc`Zu6=7zVFrgjFp^|HTMz=TA!{=qz$DPlY0nuCdPj6bWU;=G3Be*2{TqHi=^I1C)XN{kvY3RQJG zaQ~He(f9d?f?vE00~V_c6&Tk~*#AB&4kuEibr6!1I*BVyLM^ap$-2V?;tMC7;)(vLty z*Bo?GZe^>6{b8YgZ~{Dm;epPZ*mS_*uGq%^-Ol#v|3^EURQEsZY)rOacJ?A*`022# zy$MraEwtXX7lm_H5HX*0&WmEhgJn zd7uS&1~ePLJOc;jjqKgJ%bh@u9s7&*EVJpa8bb!Cf4IWtD8P*IzF18L0^Ld<`f~#IAE^@ zL1U7pWFK_Bv$RW6Qc|o1GBPqaaW}WYViKLX_nXnug_zeX{~-B9+6M4_k>Bw*Yr`7@+;n+RF0d-4{#GLXxA4%ehKbK) zI1H#|i(HhHTi^)!$<7Qe7FK&5(*{E(YYwOUev`^8#hJCf7#tFv$rcv&nIUQS>uW0RILZ9uVrFes9j!a&Qc1NfIb8 ztNrTA{#!@&y^PGkkR;EeSu8-zkZ_r&E6oT@3g1=kj2CNFSWXBa6F4kA+2PZNo{IP> z5sx1&v$^Q3kxL^;Z`5JPq_Lf3<@ z3rtKkQedz%lYt2oqKIz?P^=aU4@``K>_W}?0#cI^r5Tg~A-w~YpU?R_;BdA_s+gJb zOw=XdPXY)gzUgdeYtuVi=_yNyjh#$>4NZy)8QcGNc{s@mL}&)Vo#CDJQ-tna0lz{X z}eo3l?_;_a!7%vV19>0`SQi=&i zh79~?vtoSUlCJndcoFCdLL#CJe# zk%vfJtB$KL(<$j?Xs1TQ{UfAf99)28Zff25(G{j z;i;+3k)ofSofWAe-T_{9yqk~}a4w8p{y)mTI}pn~?Eh)+RHsRiqCpfH*`*>Xl2vAr zvR9$VC`v@itgNK$ku5?48Oh!=l1=vZ{6078Xq@-`z2~3PIY#$$-{1RtUDs!Q)2`%G zrwsDdztnzzFz9de?b=SRBKr-D70SvL@j9MwR$(;0CnP_*!s6hS&+FIegtTbXgx6h- zCxH4=J*(zc={)(*oC97R-u?x$^H+Be+yQqr-j(c_wnbJAw$FZ|^K1)Goa$5DF8sHv zXN?tdq|8vx9-pag>e&|Yw_yV;H1t}(Gdv4DCqS79nMmK6Z24 zuztNOmd0qGS`{Dgv#;4M!AG^Zb(cBQ^YU?UxF-Yh7q(rw#qi7fMe?ts8yOVCjW?T0 z_fO>9e)@UvvqUT7JlnxOucvNRUY%HQUn>XZTV27#yFDKpIfh?P9ur;cgin^tV)L1g zK7o~A`YS7c;%*s`K!1P#%3%QwH39X4?X(M~+1}zzlz&Jc*REM}SY7!@P?F^lT^4wGmWy*z=rC)2)T^pU*xMqN`Ug zX-T9k;OEkpFYn?%fPUoHr`hl!okXJNG452ajHWNQXOfF;#SnuUWmfbzTA*M)Nk36V zTjulkFCXrhP_q1~6(gFG#e0R;**|*pC`b7I{Qr=dg=1(Ap+PLV?(Gwqz_~(Dh+?)B1ALH!2zpKKCkN$s=kr70}?+OCpU6eq}GRrq!{_Hin zxZp~|+gmE@fI(RO!l^Jgm1;^aKl((SF1&e~-^BGjWA6RtsmE}~PqlCaD0%)HH;5rt zdLLI&Y4n`Ad=hD3%Yh44jL#okE2l_c#klw~1`zo=af;cmTiu4tn+Uj?`NxW*bRT79 zq$>1884ILW&Nj;=(q_NQc0vgnCpI11sYTVB!x;I!78H<&n_FQw1{MuZ`sJfrz#Xc! z)!*!2brLZVA1#c+8^1j#Gj^;G3ws>m8}Ti7e_HI`KFNa>@$VY3;1;141!?|lRfATp zaTL0!BOj%5TuF(#o}PbdM72sW=JG1>wS5N>ulKe{*)P zb}cq7jSTp|trZQ;>!b8f)y_A<)d9SpV62_^$L^_3;en{CsNnr>X=&NCX;XqpkJ3vQ z0O;rTxv$>Fy%k)^lh5(l{Ri`T|Jkyot*NOvTJ2;Vbq-i;VP$1yhKNu5*DnEqrkgoO zdwFI{4K1Yylf00i!N~V7bfqe)s=QH_nq9Vc7Z;o_L?__ zDz9W*b=h%`K0f)vyZK`#%{guZJ%B;oZd7pCPNrWY+Ly^dfyp+IW3fk4-fd$j+?N}_ zWEJJHq(c7*kEnfD@zyJ*K~*7&?!W?~3|FyC*P~RZGtXGPb>L5Sjz_&_ zulGMq?_SXj8pZndZ8+m@TU%?$Ke5%(3$~KlL66ux%YgKRdW&&90B8c9b)l8g_uZNo zy)P;;aRe$?zg6Pm;_f#x>^ES(=nN7brBHSg3zDX%r;kNpd3Nebb;)(5hesG5741E! zlmD2-+HLTR_o4bl*meHA+rHNu?UXmz-A3O&`RuIS`-ry0u+2IQQRybTPdz?NB`z=- z`%pMb{2{4-c=$@27dUOk{sR@XBF_zZ*w{+^-xQxv;!rO*oF#eof$amlyt%oo6;z(5 z*dXr+#LYbWZIk^15@`v|<~dT-yEzT33tL-SgpZ$i1^uJDC6Cl7^-ftk@DtR{Y5i}4 z--t*Fu~bQ^*!2K>P93V!8i*t=0-ji#vx`@6O$_itmt%V42D?s`tGT80GDgOmONH}q z5E?=#);w)Oxhcpu+IuX?W=7|YgYagQ!~-6|BXiuwQzp~fFU4)|8#s$yel)OEe=|8$ z9ee^5mC%E&l>w)z0=U_Zs{#U5Pe9-(%Git7J!~>(i7OJRklr0~um+FQH5$?ppLLry zwMa}IlseAL6ShxyRLq}sLA98;IEL}443#H`3{*~?a(LK6T*-wC&+2UT5rMK>+@}V} z5ttY7ZWR?3SO?WP0yamFLlIT6#O%6--=Ue!>RDOS>cSJxgTR*2ojk#p3S-GBBX#@C z4U94mRS5e@8|RJSg>JGA@ynyq)70da`27=Dd}VSwHEuZZPsv%lO`)44vEF(IdO&2a zbt+vIAm*LJbA)&C%(n!+EHNxpcUPes2~ZD(O& z3C)Z#A2*H~e7of@!eqC18ihgUNhP9x``Qu71%iLqU7y*)#=iaM&Q9WaO1b9W{eRo( zRZC}a2Xq<_0v6j4JbHu7%$R>pPL6>(h^ezK#k1Q-fIJa%{qPc>`bq+VvT9S;JWMFY zBUeBq2b64~OhZqPcmhQ#wc`We-q6v==45*ad?+b5zN|al?T=b>Sw8i z{4%9j=nL7drh&Mn*T+`2+7$=57a6Og6aV;NoBt~A{hDNg*X>{NT*5Hm zsgHMil>8#wM~ki2P0J`5tfyDnjQrlpeqS2DO2oxH;BsP(3y;R5_p5KF#GTW7I{kIx)vB`7BDYpoBrQp84MjI33M%*WVHlF6O5W;dlvy~&pIHMS$2XsQ1E zyHeKkUh~|fja6TMwoB@@T7bjeEK=XLyYg=Kyqov&##koxp`Xr5HKmn}rN0Tlc-jkVjhgJefQpQx<_Tbu5EnxF8Y0F2FZx-s`WzGB5$ZrRlAfpe+qaM_7CDyJkYw}p=~IO&Elo|J%8#BtrKF*$M|*_l zOCAk(Q0(@TI9mn|CX7i?r2;xt9ml$FX~lb*pzdgFY{b+1_St(}fZQ*TIUM>q6U{Sb zsw?Lw@PgG{Z(3E&8pgA@Pb~c45~Ix2^Ucl85a$L*M8unrNV=rVl_#)i*Q_Z6WF4m^ zYZiKy$E#-KiZ(h!=oa0#S~R?!XGTM6pi=u}Wg|559j2OFS{aO)kds1D%s_=5t-Uvu$O@fV5_^Xogw<6%j)(mAr_kt#Pj(6ILxrnA<4CVk%G)7vON^l%XoHSJR=+QSY9cEfLAxeQCWF_HK8p=Pg>` zsIt_SWx+biFCT6WBG zZda$gRZa9-U{`{sU`&(tv*lo?aU*qkTq|XF42+5*WwljT=#<8V)Hmk2c zddOkR!-AzWsmv?4>npGnFi5F@g65%THs$rfrZe(^LLheav!XTYmoKKwShHGnF&)TS zs}nBi6%cDzhaV@s<@`LoCP~|$)3HDoA^<&Zy!?HVm7W*zndXrWe|7cnh&&&27ynaV zm34Z-pk}`PICJZul$-x?!-7Xge*2IVyB4oG6tVfx)5h+Ys_<(Wo4r|e)XXbt|e!0K@e&tPn{ryDiGjnOg zpGY(_A6Qof3Ly;z1qE~Sl<-MNTHMJth+LPAAM$+AwF;c+eTYblq79oxo2V%4iz^q9 zYT~EwE9u@sx5mb&Xul`F+Y0aks5L*+v5bK&|(Qo4S` zwBEFXsaAy@FAn84xtvrT?K}xCw1+a75Jjou+GK@p3Nu;M+WDP9D6&vM-OErBv>paMnJxTdF8tLEvO+DVwdd8rty%g z*cB&w$WuMA6ww^gZ+Pk_5#YwTgKD_mGHtaN z2W)sZ(<~-S;_0HG<0BkXDQwK2j0MAlO{&~Y_#n)~C(jy!$pq#uwWi@T@+4^0ZXGde z)=|vxn$@{OMc1oJtuTSMx3`atjbSZzq*KlFlcv;_|9vl7(Pi(~D10^sqQ~r$>x!-= zPg$q}bbrHZ-1*q_wQeo5? zbU;*8RB9Zo=~+q1()fVgqwgpzzIP}0h<-Br_T-9`|7u6g=0>C%NSUEA;^5{st&Z1; zRy=pEB3N?w22-7qS9r>1p7jr?zr`M?US_b|-gF#fU9|_p(j)UB`LV?8* zi_y8s#i1z3*WIsrU{%1dDuLLK|`8yu5s<610xu&Tdsx z4b~KNGUlVr9>Dz`eh+x=QCDmEkt6Ssc##y<$**wubwV+qs_IV}&?-W=?=<)qd}lyy~0#rRpDElY1-~akWaOzm+pWrzR0s z?`KkDVEbt+7NzQ?U@@0*Up}KM$EF1n0qP@vEWqh~951BTbC2*^<6JFp0AphQmwW~W zu4N0N(;rJt8|SI+7BivD&COM7Lh%i$Jt4k>8*W}Jg8UL7_efK#*=tpInYV*V+s#jZE_ z0NS(CUhIQEJDM{|7zp|N$aIK$vm0}h_}S2iE@sq}x9>r$yLGMhwh$nim-E3!qcGcY_04 zN?bfVIQR*Rst|;ePy}MbVHal8=E=)Y2D%MY7 zU)BZ9TKf$QN&czzBM|FA)_q|6M1JLj(#E-fsBQ0UF|#i9 z8npM*Wx9-TLGpNg8a_>NadEvR**y2$_G=Rf%YG@q*AK8*z0KZmv|bJto_pyTQ1QfI z9ncBIGEY_8{85v|YUNyAulJ-38!w8W`s+7DKzX(SyIbC;PbC8N!)?X3E%ncgdh2&B zyfe4Q@?W%FzltunDVH(C-X&F z!)nF9FZg_tTpsa@3jKwzK*~xrkCeD&x{i_xaza8w%frq~Dk44Q+gN(zM&S)%MKDUjtwww2V>P3i#4!v5|uYK0cF7&tGk0I=DzaLjN zdujUqBN*|T{{iB;!3;<$Y0X-qbv)K8m1U>p{`m1B==p#gRxB}BTxeLhV*i`%w%0qt z6FRr84gBnI&(X1Tw`j%3xO;E23A9~dy20{`xr^v*=2PC>aezXHjwo*uWY$zyf5pkA zrLFxbD+>m%w_r88%3kGn#!YzoT!6pSO~^G~H|43>jUN&mv5I7&xx=VAh}) z+O>Q4ZhCqMhh9cRh~0iU=u@o~nR40lh%IAW6d1lR;OCDYV_VP7#mIZ<%9Y%e#5~W; zT(%^-Dq8LSr<-8(;c7a)mZ97nitQNWjFJDn{adRuRldynu zZbfUp0Go`Ilmm?bKP@dSJ-r$@V}{N0`FGzSkk*V@Ikl$a6&?GbhgavvCKW_%KKv?( z?U#jUu#A^Ej5aVNq`mheO+sBg_EBi66;7XayyOEEF%P2L`1{2tw^2eryC+#W_2&7M zPcQ{XL_|bJ`tsiBgZ^vaR@m2j*o>VW;euCw|5(?`m#I%Wu@~y6*xIglHy3 z1hyM%vh`6ovc_ypiI0!R1`0O~o=3@tuiDzx@!@y_A+J1Z?Rawegcm8CIhJwpnti{u z&Vu9W!8MThu16)ILhFaVQd-f?%?(H~8r1gYnaVrJJnyd1GQ+YVD9=JYAY^>-;DJoF zjNvs6k97KK;6)HmIltXo4Hn#^mSP+CrES{U*W`vk#u9_8N`^hIs`Xu_&`x=EG*B)c z_yCgo%fsb&ua%%ML(j}doeu(3I-w0dIONv_5a>CiV!_k+X3 z<5nAM3@FE6LlI?x%&^fwOzbl-oRaIR{sPsvp(qWrtxM4JO&8<~gJKTy!t!x$roC^k z&R3v70kygy@#|atPnB@4yF4uX^!Y%K#NzSXU!JVLUV>gwz4m2d;yESYY#|=lX;Ve{ zGmm|xeps?oADnAa>^!LQeEhJxRMV8hu@$&`$Cq0@i~J2P^sca}_XkD>m*c`0!Q7 zqBmm`N{H(!&>(ruIDxz($?!=aAHtpx?bj}YqK#Y zzj$%X-Q(fIHB?kM?`8X-XAl-4dC|!sDjwsoPERkdeY;h+^7WNm@$gYFTf z$NDTTVwbzELTpm_1Ify1W^lzH`Dfq+{Apx%$|T0DM3wEVzikTB2*6*)Sn)$N;Sike z$F5RG`KHW#ss+a&J%-%@)Ii^vVy!+)UhNS%t0`DszMClBa|*0F2i*0w?W95fWz$Vtp!65wl7-uy`<`xwsx3O$SWtU zwOL-b0D#55XaodfwnTsO&dm(cZNXnBZSgFc@ZCPw8=qY9iAuPePR|x{&j>0|^hHA* z<@wxiTV2HYSr*4!_@HxUUVfs5<&nI$Zdf$JaI&*Q>ZDVj zs+jPmH(q z-!#%BX|L77PzlFGaSEb8T{l0$BeeBoRTEWI^d@Lck@|rSIHR6k1f~II5|jaJn!$dF zGiM(9_%M%~@=_ax8=QYFKrT2ZAwjzvoBfa7K3cB+(Wf#LqCxq9OzGGAF96oAAFmr8 zIJsQ~Gc@Vl^ItyFj%)ZZXKB|>T+1b}R?Jp4)m|PDunfXkVJhz|4nM7tk&(IiBormY z%$bQd<*bVI^pH0S7^mi_s&#w z+-!oC3EJx7G{hpfJ6n|DD<<5b2bTfkT*Q({pdbA)_3&_W%W=R)_F@vWX~?9vNu9xR zoZ*0g1ht(zcOJb`g0G>h!bm@82yJc#p_ffBW0^2x?JfOp;iYYY@UB$mDVJCCDjJrHscNyF`W{X5TA z-{gzdWcDjm+kbpx8aziLfl6uS*2^8^O%g07O*_1WH1a=vBHUi>MsjZB^QT6Aaf(*A zuQ0*5eVOSP1*ej}gYqer9Rz>7`^5z;`2~9YG&a{8ZY_~|Zy>_&e50?g4-+>70|P8- z?KIiiE7Mj!-Wx#+yXCAR$e23f-T=%V$)4228lY?AbH=tW; zUmRxi8TB_e3i_K}BBhB89*bPv-E(5g^e)U_`%fPlgyp|A2;~#%|ITK0g*Rg!jVnGH zPX9+d3*lzVkEOFmZay6PTRZ)+*?+K5n8p!yA|4(y%>G0Cqy%~o#>EBpzwPF|Wa77j zgw zEMBwa*Lyo*mm1gFdp(IGH!Ev93-LlEP)v_uEr2IJ<>WX&t3b?-+va(1?IxGw%eotg zb(-!gg+Wx|bzgG*q%J@sC=^~;uyq#t+m1C&XDjL6Aim9}7UT5gd>`|2MHB3=VZsLu zcj9l*=b1rHC(@Y1^mu{m0y4tLqdq0OvcdF^d-N{7J#(jJ;99G1h8ZNewg2ONpVu&S zH4pcX;kWzug_zY8SbqQTh_amjcTD&1gVeOt=l2qf4@7EE{k2wq`S2eTk+1yJyYzpd z+mt45Gc+rJ6`Kq2q@sswNvXlbw>@i_EHmD&y8YXu4Rk64WxJ1kO*z6a3}AqQ(J_j` zetVn`Xn+bHxg?z87cLL|J6*1OCJxQWVLf1oG!x)h(CjoADZY*dSkQw|&}GY(p=YK9 z=LwdfbLV`2a(0OnT(Am>`hH>Q{1JmT#_3u2tme06Ni=*_OU@{$7`Hb8dYdvRTrI;pE0@nS#Oe|DC0*1Y6A z1I-54;d-yAsb!?6XN>?j0&=;Vj_$>a7g;vk%9ZXfC>^aib*f&-GJZ5X|2pgQ=c$jD z6T(_2Ee`7Cj#mM+b$BGQn1UJzepu$_d`}h!0+rL7`lfd+(jpK}A6aYJuyZUI-jevH zP{V;^Lm%o|{$-C$xA1x*O<CO0Gvab5be0qjA?0UVTOR-w2`KI;1dHIWy_=V z9ScAn(N-530bxt==pAxxcTPc_AA^r?YS@x&0=qwC^$nKS&Q~Kk29xW~>W@w|`z)=yCLbNdUkE&d+eW3x~&KlbE&-hKyu`)&kdz0t;GkY%a7>|y)M zE~YT~1hLyem_Nj<}n`xMC#@@3do50-k z{Vk(Cwnt$G2eSdCX%KU^qR!L)7tW6#qbadPweU(64`@5zWM+xSCXaPC;QG6`+6+!X75uCDyM#I5>!@ z*zFBNH9*~g{N46SAux~>L);WdHQB?~UcWD{D^#hf?mb4=!MvjxNpv3X$}|*Zn7d!M z_$W^_ZYpyJ33Wd0PAd(i_h(Pb&HWr?`!-c4zHjwf=6jbRz(bL&{oRCiAaDR6pe z)%xJMhmU=rhW$*cBf)!3;6?YnC<6Mm)7YG@_Km|9_wsXco(fo|nsUwQhm5O0bZf#+ z?)K@7tgI|pCpa($i_=a4?7(q(`67ql`N9BP82t87K9~$Of4D%Rq72aX11+j^*V_PU zR%%;TPErZMluru1v;@5_;D1~u==Q@w9>=nZgN9#EOl~CPWe^fZesK0$Y?hWv=izoA zGwObR{;7KMWPO^Zg=srznSHN^DJ(~<%X@qer|pdVa3kh{hmoj^FWBIH(&mZO*_J&7SEOs}~(%(Ky(RtRW1HY8G)8pfD(hT0RiTTy{~$=$l8Y((P(?=qU7#By#t$lxhWkVs8zF3YPNoiE`S#k2ll|~y=`pS{$Z6=( z+tZ$^d!0^meax^Q;IGo)Sr+PyoXLBFtoq;$)P@m^Kum_FlkxvFvaRIY9*8y~>9;oG zwFSkl_`6$433oe8GDu;6e3gbtaHoF9YYW+xZ|#4r-M$^eg*({hC;0sB2miA>A+!L0 z!my210ph-?5LuXYq`%D}={%jf%C_kg1(Aj$(c^6@h1U*@jDX*8JrZXXw z9tRv%r+C7aX8m~L#_$c4*ni+lQIM2WC2(buuH1BI$Ic7K8?mFsy^M-t0WtR0Y94y; zgm}xC`8ONIwC)J?PWD*(iW4BNF8y?u=NS+X5E~ndxJr2{xUQfwH2D(+WmPgAA83=5 zlKNT#G8IQR2mS)J=D}`-DVdszs-yS%0Ow;z$Ek^hFKV8nyB>g?IGKvMm6-Q|v| zBIyZJc&XkKN{<(dVZ)*2;gR2X;QIFqmB;hbfDZMT_n3{*TP>gt=_6bLZhcoTp-}7S#Ugn5p*nyKyFg281XmT!Tn9EjDP;s2fPXi|;@_3A1 ze_TIFg%ti9l13BAbA4UWi^XW+UdYoO77`Xd+34Jb3^{0Zh(a2l?<=r>Cz_-e!rbhH zi0=4tH&@pn53tU-U^hq9a=6}gp|u5*S77bvC=_3C4kc~B(>9&}r3jRx9nYzUAjLz^ zO%TvZf%L)efK1DD0abM$j)>2I5B6n3RFp&lDPh^)-T;F!_0f8bn}|xp7-Ec9ywZI; zG5$h%njKGfbPGXC3MX;yio*o$Dk3!i9iOVMt}aNYIanufSO$h@YSa2u^K~l*#k#Vh zfpL*UjvbMDg-`y;S3tR^BbKgu({>%(2C<#G@_+AZ3PEPUTpBYa=5M-C42DFHjfI7U z?VKE?r;k@hI#E)gm(e*=5-AbHsq$K0KP~|yCrbd z2A?tg8*JIbhJepMaZ9=-|KgTD3steF*Bos+O2L=ztC`-F-E2QEcE@kvBv}!TF@R>i}c1QopDB zTc*EVR)h`^(3-O`IMQqP*99tiP;!Tdzj<>LeSc|bDHI~*w`So?{iD^%CVCjn5#VJs zErYU>nN~sJ-K`;{k$mDYc-=5(I#W5lHTr>v)-0ssM2ZX&Gjy;w0g$b^zA$3Rzue)B z7-z911E;Z08HN({>#bByb2I`@Nh6qKYZQ2?#mm2uf_<7-Umta$nngtZmGdmNBnXVX z5@30dZ75r>@#0_Vc#i64F_qiq44{T-lU;h)m#W9Sh5qIB=2B*_itGy8+Jfg?2*n)D z&(kp;N63^o{{1qfzHc2vXYSlo!p3koq;hx;6}|fR~$_ zt2QPnRt)$0FRqsVaDgvrq%$V}nyeKKQ=l6sF*Tn8iL-grrgEfP1o~Q_mntP_@`^}h zfznb2y)w&nH=M>|<*Vn}?0gGeZP$;Dp}R)XC#t+_w?@-yxe-4DNV1TVq*;H8PhN|06S94J#s~0918BU=%xraN&8WC%~NVC zp0sx*)VsR_T+9*HLRL<6igqO{pCM92oqN9~VpL4{e4!Sn$Xsc@hwdJsWwCFhXm0=u zi}v>LC^!5naO3;CB`+uQIO|K8K(lEc%^PaGzrPOp^|C>plzIq(?yT4}1i1t{byf@5 z;!M}|l;=N=+jcm#K_K&AvEJM}^VrV-UbN_1?Bs;d4O%{xOflH>n~^U^X=@C_71T25+>Dp&e&=sY$0 zj=G;Z7_JAX>(s`bj(|tpgt(acB6kL#iU+_Rv?TYnbIl$q1Te)?AD2*N8|(LY)X^+oP=wKxdXtoVMcrJkkNUY^#xn(UO&kL7tZblAM3R7GZbSLa* zU6wL*L9rJdXXahrGhk8reyW^2rVK_D(1x?cAYJ^yfNJ^Gi?%CFeBAs~&-l*UEa%`* z(j=Wu&5;!c*&q+ii9a^K^kwudwh#~80-y6Sm$Qk~<&s>;gJlQfpBJq2FXwtWlCXQ% zF40_==hft=WaD3EaG&eTC#ElJ89~9@n&m`^Tp}wYV*yGvns(W%=B1Vanc8|p0+>cv zcz6nqp?y)?)SJ~^_6Xz&iqRuSz%p{x;ng5B+F!PoM=GODfKV62;I*mXIy8Rs4*UHmd{!GAqi7Vrr1mZQ1KAZeeSee?_tapl%Bzn z_feXqsG2o8;5=1%K~TIKS{;#G%u`24>!+tm=HrvgJJ4gll-Ex4X;14Kuuv5ApS@J< z&m7G@eGVzQ0wPo<1@BinwktJ)=9BW!j_pk@ZY>G@_Bz6Z@`H$jQj2S2eS`(y;1`Ho z`|~(0zgyG9YP@y{SmNR8iXeJKdLpc|xVlg42?5l-HdMQK(Qlh{n(ay?+lA66JMK*6|`CrBjCkwc4fD5%S7te|t^N z`CR0_$Td~f1YoSoH|L(&6jaDbEl#Ai?1_@bTV*J&hgoPSmCCuROY3m#H&P2)IZvU@>+NQ&<0A&J{R8#p7+nF6N?z^dM~HXNWK3hl7Id{_}BQ; z4oVP`-hMYn7Cp0#+@+>0y~^71=}An7`*&~|mGxh`YzVF5T$RwwMWf^oE7d6cp;5wD zn;cSRnC?Wy#Fj&<`~9XJu)3SNxjt8%oG$5B)pyJu92_F#+jyspzs&pyp_cE3=;fpc zR#-+|@9A*_3W3XS@95~-c1xnC12)iT6Ov!!94S-7XTFa_ukhW#^nk%quJ${dlpZS9 zLE>yCoSs39$Zqs)9zzC9g4biZOKipx8*ySl6h+ch*mOCCdcn!-A*Q7IZ1d0VFJ_ux`xti}; zv-y{?EAeW+L8cN^=3e}AWfhWhW@N8SL_W|nGk0e_+KHDQufh{ukbp&~55+}AFA^}_ zBM(r_5qqr{w~;0W0|eH)?v(_WSF$qGpNeX7)HZW2ZjB!lUUZVMTzn{Nnb#@YQzr~5u3HwjGnLt|aGal1Y<4d=5BKd|c`OJd{@WH$;3uXojk%l;@O z1UMj|wP~ufAw&IN@VzMkPpenX-cE+f|Gv3)I@L| zavOIsMZfZ$8VF2Mw9O=%9TkE)H#+CJnal8ng*7zSQ7Psc{^WzMX5 zJ+tch#gsRd88n0Ng~kBx>kM38r?nhTnt-@iZk>lV(ywqDsmLNvVj`nM8dLOY4vs&7 zfI*J>r~Lv+nCCW9(mQ3JTn3O9J3rly_KRSJLOwQCrdg(e6LzM&iL}VlRI_2-8$Fux zlB&-8e9VW=*Ct~bLFfjC6x6!hM;@kAwnED)G_8uOwDrD&nvY5~X8|ifbcD15=a>Bb zC2$;6sszsf-7H2>B$YrSu13>7NZ{pYoRdulL$psm>?rX20D06Ud1t>qN3I45I%!16WhiO8_rXt05-Q%GCha;{w?1(au}E2#r(W$O_@-Ww~*e| z*!y;Nui-g0x=+~?dte*oppBSTzom6q9L{rY=>wzS(oR4^x{Nb>iVYRHLQ+|ocwrzv z%iTjWxOA(?5kD@PUXfYl>cxz4*wsD_Y6aSptg zBaOOvSBOgyQdXY>7jLI`a@OCJWN%*-smZ&IuY_10Sk!?2aS&&*c# zMJUh}>r1yjHO0^GNYQ6x3?b5AdnbY*05Zrgqf=u-rZdBM$j1*SxO4LIqa@|pN$v7}|~tJ_~2rz55D06m{R0JpBBaWhXm3J3oIawxjk?hs;cfOlm7; zoL3rb)U_4rUwvEY*x_`R89F_qVIDK~Wq0-#uvGfyg{GSBY?JeR3dgdv;zL0AH0q&+ z*gA$SPqQN5a;hIeaLep9-Sro^ZwyX2qxrAr+X z*G}Mca7$oj-M!aS`@MU@6kZoyZ+i6CA8{&kv?{nqW%D(YNEd|n5Tb?psiA~PJzm2( zO5LAZ9w_MxFAY`8FJogQ*ww+zopa1Zi%n+NuhUrJ8j{zGz`$HGp_0b5@LP{RT`i}1 z=yL?G1kD}Hwgq-kAe;zqUs7?_uf=?}x+?pxQw66dtw0#3F$8p41BJ-To~fB6D*>1E z#|UFVT1f}2RwVsg{h9hiY7j4}*FXp>FMtYCqV(({AvHj@oO(HrQkMxqBU(R-*?;x` zBi}DTaW)t7{#ma70Lg!Dn!g*Ze%9e>)5LF$$aJeQJq3|Cy%r#fN zqJ~ZH;9!Nu$^?54CDS%#;=3BYNyy1*QfTB&Ww4{6h?G@#4J(~Cs((MNFRDpACNuM+ zO<*JXit@Cd2?=|~}bf{rYS8@P>vmP(L5v#JClTEaHtSPTEWw_48 zBucBCdAvHg`Oh2UQzF<(&z1z)y_LXRn5A2U$)?V8CtJKmJiJY2AB_l5#Bwh1Q{y7H zG=)=5-QnEbm9%UjaduEX%PY%O4)^}k@anDkDkThAMTXW=UPfFfVA};X1zzyvMQ~d(u;xd({*uJ}W7n`{( zm<;&ZI%ZR`WM1MHg?^x$%YT9x_y9@?zEzGq>fI@{Hkt5uL%(nUQd^Lt=#J}0KbzFS z3sCxU{#{W!&FXfom{^9vffBI4SyMw^!St2#r@Le$6bQo=Yy(I(g2ET)1sfq|sk(tU z*Z>m{Y$ymwg12luTil`u>v@YML@FuZp32?{j92DkG(nWpM0Nd?42__qDuaBM6oNhC zV^4VY7v$L3HXrvQ&$E`7~wb113e)f9!?ixx$3VVcTVLgIyHu#xJq&^K{w){YoMokG$ zB3KFS)o1O?zCmsillA13tSa%;?vE_D=CDI$8>6Alu$L=oANHtJvB<6otuMBzs-6nS-HpPo1f z0(YW~iJyqnK6&5h65CwCv;TXZxV0f+=UErVHWec3Vf(3 zqJA()olE-|B&mW3zc)7lnxw^BGA`QZi%0f!I-u#)E4Ut-sY#393&FC8RMWU%Gr5>{A>TDudMKqfKc4 z_UO-p+bPBM@TQF#Dcw36e|i&>5ZS~@!-D@W9nNEoX00kKX!3T#xV|HvVxtK)~l zPaAuPT$+T?`&C!7z%nb(MNX{3KkY8RY)GqU+0a@|0xNbap>31ktB!Fzs;&S&pH3)S zE@VyhO1PCzhY?U2C(mEBwm40*#wi zKQ+@^>euU8%3xQhg;SCN8JpFw+UM+N00sTnzRGb@&C37$$)3Col=6%$%O~j`6RO=h zlLhy`3U`2MpZ&odH!|LKIs8D6iNAAr>~1Dh>2FLvNKzPRxTRn3Z~Wb%{Ejd9P61JD z@)|kB9f;zjyJflxbZnqhHf^JC^MW!V{V9-&#lbfH>Sp;>=~IZD`Q)fUzl!NJ*8eWg(InqNp% zy8dO+lcDg0x@ePQUSlR}A>$hJ6@f=cpUdp+{yn!9 zRGiw8(LR}{XQ8U2QCk2>DEb@duB<>jRgb4~%}bT^h*`2QqYV{``DcZixji)%Dw+SG zJp)1>G6^pu1^)I!z{J$XrNWBnA4*w(>ri^C`=A_A?f*wLfb)B#h=VDg6D88%=; z(2XSxvzDg$5T@-*ZXG3*E*}hMpG+p@!H|#;YRd_Oj%|e72PLJnq~t{VW+c#JV3i(6 zQ%)aMQdZ{f;el|`W6fJU3e>$?;F@Or*D_RQwx!1{n0Xm5XbGY+>KbqzGct<1)BtNw z?huSfbcd(fWz+gN$n)EYVK4jYS6z#QxPXNna_uTACuL=74@PG3msriS6of4wvF+LTS zhC~8}{6lwy0&l?0yxVS;azCSJy59o_K|;#e4&&&tI4x?y69MLcbLsbIo4GosjfJK; zXF?UO+LmMH^x%Q}PyZ6Zv1t2Z=vj>e9cVi)sQMR+I_Q;Es9##)7;kwSVa_7QNe3`0cJ*DbPeN)?sFnfEL0_HrD7+?f}#r5=e#`N!foG$KW3kekV_VCkk`vB4pQqAYqNVZ9hFZMukT&FwvM z$M;VT9#_Rf$mG=0X6qBb2CZiac_Bd}yt;6UdRJiUzhe9n-`yA-(|m7q$j zs}C<&_Kbh@S1o2z|#quX3EsErw1n_i64(${^JLJ z>bc%9t!12B8SCGP&q?4ovYf6VIL;2|lcq>rq$%I=kZS>P7K}&+9upDELE0YuRH(d(-Gbpn5I$#mFFiM_!UO~xw)vj!N-x-1fZw%q*gjA+7Dgy zl9uLX3`Y?G0YdA_gCzWs_|uW*QRM?vY$XznqKlP~4&r=~NLbKmdWV6cTj3j~(OdL@$K~gixEOh=m#|zn!E0 zV)(E7hkaNZ)#aYstPj=*O*}{5jk6-tDHGPBXQuol$hSjtmEb}C1Oe=|r!Fr3KCd;a z%gIX5-NF-sf=(!vm6e5fpeOp97b?;Gudk|8arqtkt=P~j+jtzoa61QJyJ*p|IHpN; z@=dJaGDl{!y|W78Ywd&8oIwTZ$$!y$!3OoUk8bZUv|jQazhdw+Z@t0?!VXnVeq#|V z(s%Yxqtij5)72}*%)EJToaF$aN=Ze91WXZ$1_{wD0Y^ckm_J;#F}FP`IOQ#irPffI=%jXv;g@25?RHb$;U=H%&>Rw z7T8b)1qI<+#FqM=JSe4iby~jl3C6^q@~ylZ2w3JSBh012l(NmyRz*c;pOlTQI)%Ty zf$Galb3Cmn{mjQ^BF@`uQ}QNdLt^kuNK!yjQdJ!qL0yK~%LLMg_(5c}O@5%6Izu+) zLh)`(oPEE(U^H2u?Fb^b6JiA!nKGmsJ8)9Jp5ssb`K^ekVlzsUc&>^b4FC<=eVzaK zPX9%3MVm8|yOTn-4put~{{nW+4k{)2cSsF%s5eux&zp#BI!lMOKb=lL_B{exjYHD{ zyHRN3V;7f$>IKh`+yL~}naA-S-EZAJG5nBD(@*C+tXofvyo8zLkp5flUj#%yHu;f* zA`U=UzhCDqL#`C5VOQhKot;mbx&&+)@{OUiID|jz?)Y&d-v-WTyK3YXk{T|z{P=d@ zt>GVj3{7(cVl$Z+vp;-uI`rai6Vz;E*Bl=A$Fup}12yNi{Az;w@pt^!^f%4Hmd&o5 z`3d)t^>>cn4fB5#63?c=_343Y$2A6(uVC=8f$AFrY%K5XPj8 z#H#WocRL6QR)a{H0tFj-`Mg$*Xf`0)#Mx6WRW;FVRh5Vj)5OcPN;_;t)S zHAMvl7xY*q4h9AUOaW$!ijUQW`S92L2QN~+#?sw1_*IH@1&x&5NLh-kyj<`_SSJkU&H^vowd*qGi#|ixQ$QB5 zewZ-}58etJ&+)>mYq+mNTd7#(1!f~h*0icgD59RM`7j!O4`kb0Jn zcEW6%`-{cNzE^AFf3XYy9DV5@Cce7+5akXNc#SC;s%3<}8lW|B7J+S;-K1NgnpteW zk-8VkU8ewS^pB|3L}{c&v(D6_ z65^a%t(b&|BL6M2_HcLSW~Y=j#DQ$*+kg6sJ4@8R(om{h<}#g|MMVs9b-xU9u*ZMY zVf-ad;>Us|@+1@<{cm&JOrC^XnXy}UI<^Af!^n0D1ZX9TNu55VtgE^o}@>I#3^@GtnUvbZ`8lY9gY?{no;^iB{iVxoJ zs?$eRn)1G-UdUo$X2L;VrFz;Sp7!y|H0&HnVaI|aBZ=`ey+;d_t=PT)NVE9wyB@is z+VBpYeOTB&^G#ZF(w;UG@xOYx!vh26yp^1L_qM@U1R`4#a!{DOMR#SUH&S2FR`o$V z6->u`pAt9gU8TP7=y)1#^0sau_pV*=`j9ow-p47H0G4G0_0Hc1yOA@|-g^L;d=@y~ z!jlBYtFDke>k2yZylsXT_JZj^JPRPp&q;FD{KI!jtPuLRLeP;2(+n06S5Y{-y=7}c z_%B&8vt@UwHqLY}&HXv5v^x+?g4U zINKhgQJf$xm7V_hT+IRp9fz$cSm>t2147-VcM2Y%%RXaVXc_xKlnBI-_kU4?VrfRU z9O~vvwIBOTK192Bjp2tiJ46l(vM=4UM!%Az=yqF1n=wKAe)K)61M=>k_Qy9y6&RaE zWvD40uUHq(Ots1AWy#6c`!)?9(Xeq_E)?r&IDSiT{`r;j=G)PE)wg`DzRB}x^t1kD zu|tA=rc>{lcv*c9Ia>`Ti{A ziI_(uBrX!wgrrYzkgviow9GNl27D15pkPkrDTi2yugPv6-uF$_wP0xsc3ofcL$=YCJda~P_TdTL{ePtY;Ut`JG@gCT9K3AJAi#VEH>M|-tSL|-f8)0529Lal@ro$h*{`0Y{aS-|8mH1esJ=}$& z6Aeke8=dJ7R^5HGyk^Q1Cq}d}-*}`A_Z@1ky4d4}sOXpQVQ`1MI~ekA%b>D%bBHq) zpcC4Sn{NHlJ*OOX5uL`e|8QjoAtkW zkN#mS*bvV<$hU&-J*LFXlfHBa3rj7p8n6@V@GmFG)PAG7sbx!@rn=t13YtgAF}WIZ0_YA2x$obbStNP2@hQgq<+j+V_3 zw3I-}UU=%;w{KxkxESxU9sJShu_6rCnP$7LS;9!L;1Hr*FrCvopF%jTste4jllE&p z1ItbSJAhDVm=E>D=!@6Fb>HL6@#F7-TgMHh8Ie_HahEyy4({QWJZ2k;gRAh22+D9n zgI)_sxk_o&Hj=q%oy4AZ^kjM?ilOKo8#hYzm@w3KH(<_(`KHV|utr;EJEtB0cjoep z_Ox0jkmIG-adXSl!-9GfT9$<6D^|qxR~i^*VxujC$(9lz@zi?$1>~1+U82ia?%FR4Se*xuvoKYEF3(6J8jF(2-dDAzd>VR-sW)J?CU&vtFU6^o^DLh96lK$giYA?A_J#Cy3?BB>URb4|oDILIw__-%e zx&xa8;T}S*s5mlkl;1Y7%Ol|{B_HFi|NM3-&EQOg3fIKB2&unpL(kfERp(<2_6+t* zrF+7jE+KNDV-v4A`rhGW(Amfj0L%X`KTsNZNkSv>kDX&|hh0RvLs7KD3pXtYB-X<- z$BrEGShoOvB>ON8i}XfANlp(k>)Ty~&dtGflu8@aIJObbCR4>p-x$3E`B#gdPrP;o zt~+?2#n!G3PD`VTx^PKUTzh}5ar|F`Li__>ROHzBptKDt+rYs?z7!I`Tq4r(2HBR5 zWw{kw@nDkaHJtiQVWkF}&)a@}BIa)&U%NJU;PG|GbDR$PmYXh7$G$Mz6_AO?H}>aI z$70X5V!r`ah`lOVKI4weOi(zIZTPs_g4^2KfU)ed?P}=m?uNRtHbVW8zeQVv()_rw zTd{8ud=1C9o2!aXUr()s1cbDk&5)zAO8~_T4-MT3rUM!yhRxtD*7DZ*J3v2PF&8t- zmjwl&uh(I>=s6#pDf06((@y_u!99Py@e$_}^B#c0eGqRbtbOfx#l)NeZG();@Cin|LMGJB zwOj41o?V!;y&dC4s=~)FVE(FM8$c{U*coj&Ai;(~W8n$?`ED4kOUcM6wm*DpXM)j3 z8Dehl#9F-C^_}_qb~%$>6TSmhg<+ynkTkkqureedh`_J9e+ljgx;i@8#wwanmMHV@ zxReJNLtQ|>!pNqx-Q1ZtC6^J;rIpBf5q#$$7(@BNiTAZ8&CH}-0i7-bD=VwTYnk$L zirueMJLQP*At|TYY)5-61hn&!m(Fy&jq3{a@b`ZMNg~D{x`y-XPS;qMQrdsAdF8Z69e zY3h0OJM{>%;DwlLL#6|=-13&i`sLMm;h;ofPCH5|1PopNa{6w<5him(L+InS&<4Y* z+K1kNqU%Z&yES0>dh{r>qy#)DPZ49Cxh3FlZ@=KOgUwl)AbAMk)l5n{T!R$@loXu_ z7|S>j$*|RThhzWg9+^;}sP`SzRT#!;MC@sslX=jDW5UXr{Q)rEuWcio@75q{3k_vuebz@%MgC=D*!whv z0iz&+Hx~aQIyW}wyNUQ{v4%>x6Lvhm_OrMbsCFFZec-weagTIyn`gGCA>ihMQ*Poi49ZDGi-kuQ zzkU->FZ@nmuL3t3RO1%5cZ|6jey$ig4L)yf?YC(CUcNkda*Oa-7xCL39PYINJtnxD z19AI*UCgW{lP)F*(FO=w@_5L&acuX$vgY@*=kJdJ_F6xy=if`yzb&!=IHp07&VB^ukq?bBg!*9fQLS>w z4}(ZmHw2Pkw4MTOK6dIP(p-~HQH7b;@Q{&Fl;|yM?W2P$FmK1D$J9V}S-6<7dLsbEqL5leW$dnWV#dS>|J$atgsrbsYOxpU%OnOJ?` zD|kHuNCjvr3>)B044ET?adH5l!3&(4j`)yeZ(TPcYXpc}RgE)94yoy2HQ?N+r0)HA z4=ddWL$Xv!IH1Tt2KeMSx2(L0^58OY8Q|#S8?AK9bK(REqX{tJZ1KO04RIeg$!`L@ z1qcrQpxeUtMNrNb7#WufN+!NF(#W#2hR}nrN@E>o$l;TYj*iV*c}8|igAGhtyoxb|N$rEHTnsPZHqpJhPENy6J#sD(iUC;& zJO|vv!X`MFR?bcPcFNa4jbY2Zi635M#1Ieh4RNpF2U}tn9ux8W&II-kjI9i;_Mp#Q zP)O3z!fBrmC`OnBD&cn@s2|^t)cB?hd(TqDL z!Wv=ZEIMC(%Fp7@MP*ZW(_0w7_}{t}^kt=rsw#9N;bCDM>(}>=hHV&^kSSM3?AD`c z2l3 zuS;GSv8dCJzj70+>Nyd|0DFL56vs1v%VEJ>N>ch5+R+Jy_s<&4{wc{ZLa{`kZ`krV z&fjvwo+oAIIFK>1sm(hdH9D<@m;^@xl1rp?OgUV0Dbas>CtlArKu@AQP*b&i#}4pu zgfZ*!JDG9y`D;6iU!~WCGelvAZdU5+)KXB&q;*Wq9*tKl(qNp$mAs#IbKALU(Nh6F|k4H79B ztkF;#oEuv_vpYdQTU+W5hwx_;KDrj&Ew;Qqd=$s{rhWD`-OtLH-Jjf?O#G3mTnDWQ@{IlP<@@GT!>ze4)L4BYBVdP}| zYgh8&+BXn-;GPEdSWSODd>=MOqjRq9Cxa%#@ozhvy*2hQ-2UNjW$y5C1?Z%y^2VZp zdqYGYq8kK96=^>uegX;i?>`gm&@oN`Kv_MD?#K^NJG2xgqC&WkKKpHnd^(Phor7G) zB4`n0CI<`GxEBJf!3qIK;hTzG3I~RNU|=>Hv;n1gd)F;m=nPvQuF0L&l{L;lxSylo zJqos&^$3jZu{u#KbMkb+$P_O)IJ+<@`37E$vWEtUPpGcCjX~qPRYwn-X3bszx(HK{ z#CC0--Pd#?&{pDd9!?cNvG5`O)s5J#%4Hq_`@tDaRdg><3tWD{ojV)2^3%P1eBk1} z9U2?i@~m>GtV;xoGuBXq`~G_21BPneYN%iW^Dc#qG7daoqv zDi3eGTJn0(5K}SGizCAV@u_h?yN+qr=?7_R_kh>cH!L2V~tP6j99qyg3fmP1d1W~Ped9@O$z;TUVp zU15Nl7lyx6E@$!XT42~XsL4Z@@H@6Sh>74vo$7uxQXGEZ-YfWF<{Q6Hv{DOfiH0mP zF~N@Fv9r=lD$Bn4#wOk`5H8Cq9$EJD@Htez^kceO2QRWTgkcNhYZC8H&T)OqpXZKY zz~6rL;5@e@c}}$p{spyFx!&kD9VSK`Pff?V64OkPI-}Ug$Xj`MJW4t4oo^7p)`9#E zBg|EKp}m6XK`XsI+!fX09wVK;O-rtPvkd9d?m2~3vONQVDn_irrw_>Oj&u0>ZjTY^bc`3%P9=N`i;H9B?N@HzmBKwT@hjkw zgHi$RJ>(E_6KMp?Fyvr%OSs?Zo7UgabnKgYqPbiVjXjLQpk+l(Vc*l53e&%=OzXux zHY`ZxLPpRUsmgvE76fk;z%(fT3kZN(*$(MiYI5>J)HcjaG{VXDp}Kx-8+@Roth|hg z>8Lmp{?vu*)>v(KtElRt&F9zfXr`cXz-k#_c2Oa=HoWV8d)0#|vzEZsdZV6o`I8&Tv1# z@_08O|9d;(UGW>Lb0r8g@P>wl$RMEQ7JNw~4*yWEQJTkEtZpE?@rVFm%K4GI<>4F4 ze-{Yo>FY;b=eA*)a^V9E6Mp-&*_q@R0(yCveJ<*qwIm^U@d~^me_BZ6q8*U+=f^iM`k2oSWzahcG z(ig%){@Wt;Yv(K>wgt52&L!r$eW6JjC7-p@PzsJUi|v2=g|jb5h~>W*s#NaH6c|jCqXB z#aO$*W2$2l5`5BHQ;Jm?x2T8qj}Wvqkf5!@)};($rST? zC!8#3@_U!N&l%Yn(7dccxj7l9wTc7W>c*j!HRDS-BjY#mDSaw1#*d_ zO0@?3F25uetCIOfsYe8VB?Z}u8``lgwe@|v=i?l< zOTTa_U#NMpP+g~Dh{rK)`*uE~$VXg=DABV5@YwZie5BIC%4d8#1v@fmkkP%V@k`_e z&=id{ayyeg&gFZ^YsSY1|6HhJxA^Jp6R6N&R#yRt55p6qqn{#s^VWMXgnc7OTM2Pe z0oU_0+Vj!*45m0Qm;*kd-LiFMLHS>8dN)&ib$q7CM_773e<)PhH z&QZ65>__-Rbcb2AAZG;0sc0*NzOW48DkBgs#SY1t<+dqI4d3s(YGT7oOA4Iu6@)M; zlXvb{?5pB`!LJ>Qxt|@n|A*nFu&_XRS&JEJ)TadX7Du?+f&6hyQ~LKA&5-c^8Oc}* zXD~+2P%LkQZS7X9+Zs~En1X{XErQIEk_YJT7$K}yf6~V!Y2V$v*38!D*c!7 zb92w-Nmg{XRVCmF5(1)iz4lP6DrQ;=uUZm&MN++FR`1`nGCxDLnT>oy%IC3F73}V` z(AZ>2=DZ*f&i+M#zGKatIQuc#;W~sLR`7PGtF+Ho1r_Q+l`MQ{y*&4e?W8N2f`$XlD0QZ z+L2)^8g>??f5Q?0A3_7Ev0rfB)WFUwEhm=^MauWj)w=giCvIrCPfW@zuRyC@ zAiBu9HDEDn-+niSUR&AwUz^AeZ>4?uNbK5Alcp&*H89pa(P2rYP$&R#rXY%O1^-LY z-du%*eoS|J&t303O9&Az2DAXB>3svxYBV72XA4{?ZUzclOY7)i%LI%ujeh4LI7~k+ zvwPJJmP~S7(7#^BAX;wcnbS(w-)Jwr8xq;at5kdey8)lr=cqp2p? z&sPkA!W@XTYG;?>im{6g&|SHJ;D|VtBT0Hv;O4G`2ZJJsP+Go;mlw=tVhe&bh0-=q z$SQnp9@R!%>lnn3>yECf0Dg#5oxH#{>O&+MW)PMH`xVKt`SoM)0RG>W0cegBRU zdkjIld*c6&pn0s5^W~Ldd02PG56+_8bK0p#&4&P+gp%t-Wi4XOB0m7mpygDY!5(Sz z9pP;z7a$QFtf1l@at~h_U@i=XFkme&Z>YokM;up-?5~jZE95s0{-^Ig=$N8(5|{m7 zW;dv!ySKun0x3$vpz=f&1A5NO&5cU)TB+I3aY`x^*&66N83e-|2Y;NZb_8reEy$CL zC`U*j=#-x^>H8$1pR zHndJbog&_r&p~nGp_~Tq-#=0AOw0UTX3Nzv*T5Ffv6{HXLzk-X%`G67=R=gt&0WW(2|)ZrJ>>9?;JY4Ne}9Mlsyq;esiWCaTF0e^#M)TR5u{Dva^SA5AaNt$Fh{noxb_uypZ0o#XJdCr+33w)9*m!DfZqN znnfwdP`z;f3sq26&-H6x_(S-dt@CI#QB37!HruLKkl5Yj+o}fpA_ztJQ7OL*UT_6dm2#>sdSJa=weQT0(Yn zrHhVQo)*;kFHldhFN4Q;?JSp~SKpUS`+8xl6CL~Rw>oZ8QS<$Ht$VZD-tp8Nw@o?_ zFC+y81tB$$I!7XBWT?aK=}VJGJxjm3eqt>w_x+OHo0`zs`JCz5-##uUVIHQ;YcVXr zg3LGpBSp-FiT$pW)bi0GchJW5#7AK}Qr*OWxK!@q6L>)$lun zM@$Q80J2+bpoxuMrV_7l*VB^_wbkk$`Hb!TpZ!2?i|5;VV)f|WfAH-Ez<^k^uh0nW zS83z&s02<+T%unU4ozp|)*=jLW%H>DA3Ls1IlF(1mN9z%KFh)8EOYU|*YaJ+uAl<% zn(Ah2gnb__1M)!|`(XNz2sk?}trs{YD?%oH37B$KnK(2=MiOT%d&8Z7oxfa_Stx)x zJS9uXetc#xXTNIa*+b!Ap-I(j?}op6v!fgBA__nBS7HF^45uDDU;kP#^F{-eWcdTV z2Ii0gj3N9jp>wc}#uw58l>njio|7Q4!#8iz^K#rHyZi9y?k*LAjk=%k#wtfZWEZ+9 zaa97-;4)AUFPB8ZTOVaA%sh>W*%SuFBv9!TlnP($;lfF9CDgURK$XPp>M1(}k}gWr z#@Hha87@*rNH;V^vx6hKGO}DP{oZas_pV6$XkLwPw}$r4TyPGyA?_A%@w6%CtHKT+ zb4OKM+uY4!ip(Pbq&~dJiU{c^+q6k@lAO0Aq*7}Z#{jfNTAz|Qu$YtOcQ}^`>0tb= zbbjsvxBlj>_5}s>tfCrn_BbnA@5acTi03ym4GB(=(+P&)EVT-rv63Bar&YdiKB4us zyxcTXV6i!tC?!kljg$o1NSAr^D3>4Pk}7jvZ0x&$q@|9dN=ocT(hHXwI_xv{yS^@_ ziGfA4_i#f4*~nXX7bPV`Pz>#@iFFqApxvmlte&2qVO-G?`$qm<6f6mE4}7X7n}{B* zD_>Ku&4f}Yx<5MbHHP?0Ut(M{)y;X)2eeD7b6#-t9cU^*{9;s;5E6&9w6qM_liRsM z(7E=nuEV22?$+54&#&&*{h6r*XJjKZvwXa~o3$Pbf`fHhTRxbF^21Gbs1<{8oTM=k z1->Ly=ANh{x|l81BJV746D%+g?~GvSy!h2Imu_e981a&nwAktz2A)1gGRhuQQNxOi zW0QU$ZD_(CJM?wQT*@OroQLucdHH1scD97f2x}#%^D5sct-$g*2*X>{lGsltP?ovc zV>L+`%SuaMyLl5*hhq&nfh>kEoshod&ZnrTh%zqzZvI+^e-_(^lJDI^P+;9pyeZ zsZbOk6cIhP=HbIStkWo)(y{Rx?3?t)vu)@O!Dpu;9TMb*6MA~5JKhF7pS^otmJXfI zwBJ4xkxG?uxxhj|@c@*ElhE_|>?hY|&8IP)#&8T007Sx+HmD^+fUCK}of+_>3-w*?p7CU)(KD7{vUGd?<6-j8@33Whcl#`a0c8N=gVj z1T?blon2EQ(6r&!gkfm$z#x-D!8QygZ2OL9AtRnqVdUk6Q_u;WdsPmDt2#Z*XUFRi z{kZLKMSx|$p_+4&m~EHdo;`brvdQy)K8I0cFy64CS0jTrTWwpyeDumq3`vn-)7LXl zKcaOnuITRddN}J~N5>mt84X~cI2R|RH~FGys{VBGsa??T+=jHf#XZvhirx1bSaF`q zpU5Aze(Qq}=wUig;Dlio3^nY!F>_>ChZh*lJPzh5QX*@26VOeJ?5 zxnF8hW!&O_wuswDCJxY&H%ET+YQL=%ajFY8Zr#;|i5evuu(cZ?wz8=wbV7EQx7lId zs()ki_OY7d#gFXe|Gq|i1o~#JUD4Vtu6S8?o~MDtLva_#Wz-0g8JE~Z5_n$sB*P;_lRgRxCS zsyw_dI<0dwVhRyn877-r)Kwl~qUdVBJOVH78ebZ0AkfM=fFbPj=lhJ{hyD?zopI#P z(VH1INR^8hy1FdyM&)>XN8OP+=c?Hh8N<`+pK!WiRl%tQB+XEkGLoaTIs>Lp&fqB% zP}u=)>fH+6%@9(A!Pw#p#gSuov}~=+SyFafnYmJ~z{qmVXUYZz*-PwJ>LR3eusu3e zR%A71Pd(;!8GOiLem6M0f?f$$rx?lNp+{MFJ%lkoZkWtRB$jotX3&_5OS!(Ut3gx< zhp4FNy9Yku6R)nH(|!&E0E!NbZSfR29R3dKQ?Gg3d;D{dU|P5O)uXvL^;})Ls1@|-o#R^Ig~>W zrE@TzNs9tD(|xo!h4*>Ao{kU?>RrMnG?RsO!6xzkypX2T5s9$FVj)3FBN+6l49T?_ zVxPRuxW(h~rgi`&mztLBzefvF!qn)#i1>Jr62BSud&t8p1?ZQxZA?FywzbW&=>u}# z2Vq8I(0-SgEgroR99-JsIB$XOxjb#2EZ~fLCFA@&D1>{SUJ#rEW5mcga*~ObV)rw4 zWkO)qb9om6G-6BYdoKyfCU1r#noCg$?R>N(rXTeA^qnY!Acj=%o7nnRlh!xd8A%r& z%YiQ8K!n9igj2uIeoPTTmtj31D1;mgJRIvy+u}*`{N!?p(jE5?D|4{+UU15Q@=seB(ZDNY(YZ0*Km9DkF__^JtJ;qa zaFoQ`C{>UJTNKu5Ye(jEbIlceY)Qf1W)x0sgVUj$Dj=k@zWgNA@I z`jp}cOAX+gCS?md*RJ@$+y3hEQ?KVR7vkj}8#j()C35uZ7s@J*mR{#Gjvvv~)I4(J zj<@&va}g-3p}|zJ676ePQlO9)5iye`L;=wgVcH&gSOQ-J;@3sVhhzQ%Ae#sqhPFoD z5p~SyS1!;m@{U@~(XC`1v4xkwb*%B`_1UPk7uKnkmEPgeZD>YzljT0^fufe%uAbA@ zMl_smDnuiK|6d=$eyY>2elUF4{!ZSVPhHtjb6OMD7bcQP>{^wP%6%V=?G$yQP2Zj_ zmo--WnXNRDKYA~xeZeW;OI)}9cJgekmn>qjq)VCagZ}KuZzp7i1Bq6mbZKPMtT;v~ z^<&s+bC+YvY`5J-R(-(ren)mDa*}8gsZqR-6d`N!$T3Zz3IfB+KGS*HIbY}b7YZCTgyBAepaq%f|S+8JTzWm z;o;y~)skc+ml$tSW}lja&ikG2&0PlVh)c%)_X7M6+)RV^5uJ;D&jXs~x_vOw9$>jg z3_4QSX<)-GI2~X!Mi^2ybVMd3#2k_t6rEUx-vF?;4udNt@5e9ORAdMM8+3)n3R@5& z!4a#dsKCZN3qf9Rv50%1SKt8~Uku8?G97x&rF$ZhLOF}d1=Ck74&=<*;&^H`h~ndg zfq}t^6EU#S2=wqB9HS@79fR06>AIFPKq7+2_pGPb5pRa3$;~rc{zlhTq7Fxl-{B;n z5*^Ne20^u|tD<&@iD@%N#m9R`^24zf!$YZ0I3F92{BKB?u0iC`~6b;JDH(B-% z16wLEURu6niK2>1I6qKWKfHNv*p;A90o|Y*u(@4QVmSw?vqj)^|hB!mMOi`!hD?J`(p|H{Vf$2hyDSJD3 ziJs+UetskwyG~DsZQ$b{AFh{q>uuNpa3kbIhD|sxjwA2=O5I}^D3%8Lw_b5x~p1{2E%V%)L5d|HZ43!i& z_a882utHKjwpYD+yrBrwtD;g{jW*3%E#{;__1~_44JOAM(=SL!H6;T{LsN+{(lmYr>liU7W3SG46J*mU*Ki@wiWXCXxUReL71aTgA! zD;zK+xmH?Qf!Uo0NG$W%VvTA;kmHleM(?91-JLN%4_jR{Yx)n*74Ecb=?ScbONUTu zw*hvNb1nN)pEI)K)lNla<^2;rY6A10Uzi7u#?~EghcFpVc8YK(a?U>q60z(&(kggK zC=VIUmJr?PIG8_S%TIwScg4v^PtTZ?Ui$SZxI2$5m6eoW+)tU z_x+)@8b7CZ-uZ*l@ZUS+-*lSe5c33%IgOk``rdweH)NyiQ1`qy*uXjR#&Q>AaeweV zZUDQiou(Dy%&)(D1lX|>F@wWn$=lete8;g+I8~xelvmGyOQ>xaj35UI0gCRCFFd!Y z9Hak4VSD9?Ep>k8*&7qcASE#eHsyrqa|)3W3VT0>EvCgTq|8T#T zl5NxTQAh=lX@%lkk5@pz2J75g)9R$V*l)3PK+>yY=vQV~r!N4eU#D?V2#5<@i`|3az4v z=F=J{62Eo@g?9jn~BIC$PC)BgrR>geO zZ#H&W=npV~Y=dah)HL>`GxDi>*v?(A=P?m;F?c#j75?ivq|6roh$-uqb|CsF(9fU) z!tH5M+EFK2Da^n$(quj?asEOI2nVM?Gj|L!=ms6-d(tqTkg4-fe8ut_d)fae^=*>T z?k|Jbw?Qv78NMjOjb`p6!XAa8G^MZFTdo4PG6XEAAydifXQop7@qaHNn%M!ZWeyZ- zfoiW0ut3|J6o1T&NUsP(8=BG#OvtJU8zLx*A9kCO2F46~!bVUXqz$`#>l4Y+vetnY zva_0!QVQsYQaN^uYem8b{1B;hc}R+12RX4z0$gmaLNp)V>>-ffr{?%Pz?^p zo^ctmIK#2>#lr`St6%F8at|rpR+v9l=m+;g-=^&)&?h9s%Y*-6Hoayrm)j{=!tZ*=b7l*RlvjjgIjh;wb7?m$lkC9S;8dX8I~&B5T5emCN6Z1ngf1oEo${l(_6w$9f4vXZ< zYn<77DB+z2!f8%sXe{gdsSm+jXG4iHm&<(%?Dxdhc+yCZ>hx9$;a{Sx-JwEyo{ zv=oI1l8}{ONqSu}Rp-ofjkboDXd>QE*hI&J?m+UCB-p#2GaE|N@{|%|&Ivo)cZB6T zUqoPY!HrGPnsXY4d`9LDYmqjI()H-q_Rkuem>xObLTLD{ZZdnGOJm^M^E2js)>UP% z1!iAx)c)DpOm+?qO$BaS6s!F?7aixjX#c0Bp8A;bT7#CS1e=*xqG$FSN71iGNKDSK z`Ek~1?^I>7blypekBt#V-;s3-h!>r?M8I4(gu&f%Tr;)x99OXibArS~&fk zQ0$CZfc!yY9TR$|l28M9nBWf5r7$9%KyXYOPJM=O2T8&{ZF}~Cz~LUE@LhF$cbyD7 z$3z*ko`q;t0n})p=Au1OH~(eyw_RBiF>A>{zJgeP=}kSKG1?KWLvFm?9qy>Lgq52b zpnix|)#008eTsa>ASR6nsU@}(kJq9uH)=$fk~_(fDZW0=qYfYxTr#NYF75J#=LQSt z=){*9>!$iew&3x%#^3sNxTC6>QV{BqY$Nml(wo=BYRNeq9#&!Y_<%@Y@G4xaoq5J=@7`z7LBWucuukio5o2mp9u7^y|LC)i?aWBrp=4;d6BsZ^ z%BVS!96?$5JYB6(wF`o|k0@Kq-Ol^6obg75YbN-mP@%-g3t`_y%6 zn(yLqM-~!Hg>sB7za=E>V-MqGm zEBH;D5rhn^zC!Qg#;5{F-eNI9M+_ilHJ9fvzh8wQsSw;QU2q-nyawTgs%77v+Su55 zF1(NIHx182CFMS2i>n+G=9NQ6qg)d@^eLCEmaLO-rEpt-VVvf}J>=ZUHiRr{f-K(P!YxL4Bu>E=*ebPq`mPkRWQPM1gHOu!YuDgG5PH5CptHU^&aI;$ zIl<{|g76*Wsi|%coA%y3`I#4E@B-bWjpt-(q7;V3Jt<&Z^6%JT1zjg5!nZ3L#s~0) zCaPw##Zrl9QHOOm!v_%}Kr9Uapr!#Gr*(vRrJtxQ@h>Uvok}>jZADBNmiEQu^mEnA zQ>PIB@^2IBPa1*!QQ}*!qobUsU;X~;h?!Hx2H(?T6o}Nbuq}Jy@db$AWD3XfLbfrs zRK;R0M5Z$Wf|@P3vhMZrLPi$AM*6$D;qJjzDkGl>w=HD2g?tBIQLDMHc5Q4xo`~DI zJVPR96Shp@YgvgfDD&$*qj(-c`u$3s# z0UE=F-Qx_XAJB1~^y?$wC4{>S2MYZNnW9(u=4JQOybeDsmYH`~F}Vdy#GKztLND>L zlu(SWoLIJX9CxA6ILN)i?6UM0oN_E5DY^N4Eg3Xq377ahY=bo0xK9KD;3{E zEL3;Q68i>s$om~P3jm5hs zl=>N7-E3k=F(NnP4}l4tOC69a*r2c9s4xt?G>2d-Z_d=3p<|r=sX7|gH_{~G2U--m zC;OFj9%D#{eH@>-1MfZ|cyUevaO^pH@J*p}P6JR2AtKHvOOwTPE{tP$87WK(uX5f~ z!G+#(XY7%#3p7E1prH{l%>@P*k|NN2P6&~XsM(Pgr zd7}prDdKmVkPt_5FYte(>KkZy@(^&cd`qspQ z>*kSKLwSN9l-mB{;fw^TWy@mn4@a)`)m0MtigR7QT zFpu%_GD$sA30W+lGL$;?WNKC%pX6@P;Kk^SrpL1l-e$8?YR;E>bdBgCzbC! zzy(nIxpF!AVAnMopd)$I)=&c(gM@k-Z+w5?qT{_q#z|QMQ>h`>>BR;q{O_SXfm}7O zu1e{-Pb)r+64aBr#fE9>uIa{)YJsmao7*?!(9U0{zAye<1Q&ZIkZtBHEyeV zm|EXLw_Yv95n!j;19ct1;=nJ^H!u+G%3lW#YpSdh0#Y%QKwfVSU=Q#`FR!-Sux_1r zleY6v4}_MFp;_VMrlNaRBX2mOnDeHE+sdE50h`h=YzJUDo{m>O}zRsWU0n6ndf zFe|a$WKownYyG$a;3-FH-yWq{qKesjBl4uWaDd-u)Zxx*#=xej%evX@mf-pCPDb1d z4Ae%$ly3IVc7sRH?I1xI06ly540~%e%&LY-g24%<%Xz5eqKdJUb<@z$ULx?UDW}oDM-J82LT&V5t?$ zb)*(N;sI`C6oaKV)Blz18Pyos0O87v^JaU=Wn?W!}zVTXs;Jd zDS_`{Wr7q#xhD#nSNFuY7f89tO8(fvcS6?_h5|C5@Wz~c+$quW>gCJZb#Xi71B8{~ z9YKp{9*8q*)1GH7EKr^0rk~1BdtKjcxBBhQll~iNN+5a+Y-Z=E;pd*^e3j>&|HZun z%7#it#mp?2F}?-I0|6sPJ{84KtK}Wd$}%jW;O%b=VqmJ}U9Fm%zq|ZptAv?`c!)8V z+^6upA)Bosrm58mhMO`S-4%cP!!Qf$Rm_B4o@0fi;b%(lKcBk@iiDbxBxWf^K5&9T z+H-`IS37{$z+IEm_e0Kg%BRh+O#2dkgN2V|y*>iC2v?_oa?hf|!m{#mm^>kGadQCD zlt98#jXk0OGN31d0}PH%LIJ#Oy5Qzp4OhD`0AtR`yRZu%?okrrX`35tmfP~?M~_m_ z{>eJaPlp?oX%4>_yl;byl^!`%LKxU3f?-*>M zwjpH%V`$8*4dI9eH)9EjODlQ0_Xxtr03J;(uMVzses>Jp zm74wfiEK;X-^>s33cJR3c#~2LHLgNZZK-i8KV!-o>Wq}xDOYySEo3|79=Gq@n%qTt z%6o@%3XJ5qZG`UHd(J=JWt&q{R25)Vl4bSjZT*|O37*UIZmfzqG5mokXT$f)7KLf6 zKBSr_Wti^kb%shKE!BU;6pOTX5if~Nw*zf7+Z4)ptfD)<&U!}iPL?&i@UV?Ux#l6v zzAL)EH@0yx3wEC^R~3@HTgA+{mMK1ce&n$C#R~4*H7}xhhAIvZIQJjp@Lad<1?NbN4=>fJnIHo1*6l`bUQrahj^u&IlC^^gBXimh!_PLkf!zZBAJ%l zNj~C&Qflv6by@hVz&Evnu|Q@qhAkgGvl$_lq-u8BD}9HUm%USau29VsJ(lo z7VBNvWgH2N2a3U@UI$cn5%RH-*t3rC4`7O3J&R2)fN3!kQ-r+dgI8R-f_5(4S~w9c zG5eqN`yYdo=C&Nf)Rp|EN6=@rd$Q#(vai5szJ_)9>ns!b#!Doe5&TRb&svrR9KDG< zIA2N{|ECW;Xv(7t+B%KkhS@9#Zif$hczR-hYKN|*G6zUoOnk;B7%D)i^lIlf*TNlX zvT9a0s?LNTyi53Pc;D{c?W<6fmoiRu3Wb`Mm#;WuPs?rP3dX{$>oDO>HLX4zH~eH8 zeSQPjRYjIy`QlfY%sh%g$eSMM`p{hx@e#<@*)pltX3Q)xRaJHd!pD&a;NcNO0eD=S zUNP?tX*)yE%q4IKa#ZsUM%mDosv*XuD9&(}fE#EC>U1An$4IH*2{t8)@b6f?U^vPznv}cNT$@;9NTJRa^U1G@wN(P!NWY1tm4x0Z z#{o@g!-Wge)i4LZ_WY2g2Ia!RgT$<*q8Li%(6F$AXZQ^i^SBRadA!&#kHA%@dz(xDO+DS1 zSQVux$>6j3l`mfmo&|w$8o|{${KXLJOKL%UfR_D-{$rXVn$>$l+Lr>-lVrOe*b6-< zHuQM)RB1Q6b`RfJhSVczrgwLqNu7^(zxCzlt9$&u=WB3M*bQI+5>4NU1Z(v$(9fi$ zqaoNhb*dAkSJ()Vw${LHL^-f?uA5fv#*IVY6bj5eRB_I3RiI>u8Lv~J2IIgTZr~J~ z^JyzaMcdFw;tD96qz5 zTU=#+K)n42hX+~#a%q(2oiiBb^`uz6+6B{uL_5s{%`RbWJ0!u<~%VfNfIsKIh@bqc_R_HV}0_*ORH}2Mr{_C9H@qbDsg*=dIfILw{Oq%LTq8w@M`23bkq|DEMCi^f&zu(;{0$e90Z0I z2nQDM4zP6yUwCx#1hj)m79XC^kuo97dOJHisp_gh4Y8Ri?X5sq!6h-szV1=BlaY3E zhnCf8+SG9r!L^~tncuh*h(|@d=F!7#Z@1I5yV^rL4}ulkpg*hZOmedF=Y4jvKkw}N0>%!Sx zsip;c9>Z8=0SoUTR%S$OyD{_ZO;Q8TKSm`E8{PdT@;W2TDJO8#5WHy&FcSp9j6i>< zbK1_39D{P`1vo0eBT^eV)r7fm)bK4!l)cfCiS5y*$;e%A$~3&2*V;2~EWrUUl=;dH+zlRE2HTQMvtB=Sl zUT(%Na@!1`WSqwVO&erXO-taKc_{toL9Mp=2|Cub%HJ#4OzBD^T1UBHlMcAh)XuD=@cRu>s;jy<$8C3aRK_U7Y~s?u>ZV(?>*eULApddy^618M-vP6CXAII{ z)r^tuI;hidJGTu^H%#{zYg9kwm0Em_6>U0Q(BZMew2;g_Ae96jdi(bN*qLE~1aXV9 z!1NlJyAZyn=oui1?wxY)*_^Y+_4~k3daHc=p6Yskvr7cW@VExR^7Ax&&(fvDcoUMqlW20#U~ zMCvQK%5@%buo5~rLKfc0 z2M@lYi^9t&g8^}OR;{3Th^fZjkLVq-k>SF(ISroOCEMGM7un>^qr`n0IG_?dwGvdsqNom?{x+t9BXd&U3?-<3*D*2>`)ew^%{m9=#8S z!r(-NNo4gFKAr~n=~gMMlIy1Jp}Og5E)_2`0?nY(Jidl%?c{B!zXJPm+?$<9#mWGwLYfF~(F-Cg z>qu676&|CDJd1+Wf7o?19$fer86>uj5vQoJY98TadVaWnNmT0Q&a%QqEDKCFqQ!>8 zRZ?gpHG~7dGI1Fd(Wb>=Grjtr+KRgByR}C3@D9 z6NUyvn~r=ZCc&7cOP4k@HNhC7287ioYPqA|bPC-F!b`59zocKm4=UR$QNVB|4C7}z z=8e1E-<*AG`$->~sg|-vJZ7t*cR(B`bT_{m4M4Dg=#_Cgoy4<6sS&_BmU=(A#6N31 z>1gErF)bW|3b&r#_0x@4U>k&Q=Ei@ zn4x`>TE}+vXW#X!H!>9FDRc&w3<=uF0lB~YKIAsZV9?(aLSpV2c}CAox+l(&&Jm@3 zrP$O3uaE8NbZbK4hb183bCYojonYWkLoPPvX-e0-Z&gzXTFKt5)l?Q^q$`NL9fzDLu7xJl$3k{^ntBihgHPtnM3H5 zg`)2^y@uo&L*uc=dh`J^e7=0uY^~mZZt~`MQ(rxYcFUml{6aC~2}c)pO%4GlDr+;W zO{({Tybed5ppN{Ec+(y5Qu$Jpk;?Eez`X>!U_ozXEB(%|E(d{YWo|}zAX?vag9m6f z@&l2r!~+Wpz%Hdj1QcCm^Ts_eaBB#Y=IK(D?^(i{-}fE7yM2GJ#%fIigK*JR@d0ur zltnpWl@lyS8Dx z&l{f5=dEuU?eolH&el}~pTg;v)DMf+%3`!s%NQ9!LLCHwvxOSxm%Xkn<7a=@MwQ-q zMj~zfM&i1Ww0XsCI+D!eb9;D7E5aCTYN$o|5+o;h9=4zKQMZevqOAk?H~O|9J7Zv` zg^CW0JGtsg5&_|@ZwP8O{*3f`o4-3j&H=_cxXrGtVp{T^Atb1q@8p}aC@GAS+O}S_ zda0m00~Ur(f4Nen_yclsat99Fx`?J9GuxP8z?d(dKZl}ce;piy_JFmo#qpA|VNX#_ zLRkQsEVTe(>x!gsfzOz+XFl2Zlc+NMJ*J>B-3fwdJFffGi<@aMw;^yz!iKT-=Wq5r-sU>#bobGEMk|-ZlIrn3FUklH19RHV zYmFhg&*>{RZ!kciRhol-4$&X;%xIY=d=-CTG? z`j>-JMys!1zYf38!!2Jh&Gcv_i6CMtQqMwHYFrh+CD_LNE&`NQ(-NFkhohB8GkJ)X z_LK_12h@?Z^T2PvW!MkDnDB@Qd+Q9#)wh67z(;(n<7}&lDhI!$n^$O`HVAXjOXRsO z4;i*)s3I4Lo*;;2`&fGy_Ta%b0fC1>K~HLoik5xwQq?k|1fA<|fcujnrr92E7#JAT zQ;qqE`PV1(n@Y}egkTPz-iORHFcfw|7RSkX0!Q}vvnqDCH?N3v4{S=^L6R&zUMM+u z=xvB;{+aj`5&8xlf%olUx~R9q`b%lF_^IwuX}+g5G`U?M{FS0*%g z)3*Ml!Q1Gs3$B-oasGJygtQNlFONddLZF;|aFc+Ykp^Qe*%!#Y!F0eN)zjU*G*oKI zFz-tIy&%^(f*E`FkYftjN3U zqjyiP5H?7{@fwy&Kj8d`TbO))?C{28^>3*`5QjWyD1cxsl5H*lDl#1R#C?fMzI6$ zQfuu>z=^ug8sd03_30k~`=rlTwXp1kIqqkiYH$#b#>NH13=*IYgQ406LO;e*HUmc% zs3dxduH<_|o5>9Ip<$^GFcfM5YKz^V41u!@O_Q}Flk}(}1ZOBZ!3_c=$h>H9&XVk195N! zzDdv)D{xDq#H#IR%i2J&vS6H<4^XQ-qpgPs^k}np?7O!@2-ZtZFJebo=k;pplnJfkdja~L&myDYCNMRD;N9&Li z4CD`!%Dc!w@8`Q}W=DdSbB214s$IEybz|n`5j$YVdd*!$+#>+y^wuIoNHT8h46!dO1l)mO*0ZLYeDLc+U-&#J)+&$O;e8tz21 zg_4-l_LZcXxD!uz=!G?Jo>GrPGM=N-jbk*ZIkmR2Jc z=8R1m;YraQTBebF2Scu_7@)2|0L)#77prVur{UMPgQ2M!!mzKhK64Y~-en|)>*n$uc_xa~ z#^mp>6#K?f(SGhYe?ECkv+IE0K$L)a2sR#@I%}VAmo4DL=I8Ifl9d(lUr7X_y|axx zS?J`@k|GGFptv~ItlE~l28&Og&kl?L^#OuEl=7}!dmQsUtjBo2q^%HTlfd01neFv8 z=6)&*I{zItME$w(zVppZ!Dx>8W5#*LV@fk3P{*+fSw62}t!sE%D88O|>J^rW)22xa zMA?D?okB{lC)Gq0=~@uV$su}JfCm8 z{hB!O%Z(w~N|QRV`O80J+wF~)*3WA5lQYS$G~n-NtIGkln@Y5P&%at22>iP89F=$2GS)BBHb;mAYBR! z4I>gGEiED7<hF0}SS<~vIuO~KB;JaYvF%G|!bXEncqEhwC$ zkpffv&BgDN@W0;UPh~}9NtEudp{P1cb@h^7ov$=|(M=ynu6`hd$MHW9!anA;F&NU- z>xCLAJuV~F9f*lb6Ie|ExKOSDjX_4n)gKvTYJ@OXpw2+K0rCQ)9gGK{DG

1sf>C zv`8+Muk=Oi*xyn0CL382G#O!%SMQvF0DyJ_CPn}!0rUwOk|OUZR`r;iP*bD+Vod5% z!b_)gcJt!#1Ww(Wx=MJ3BQ(HGkS?8VX*fgt0H%$QPMS?C!qIyC)G4(u*}(~kzkd~V z5`6_?F82$V2PPQ+-2w{5!Om`X$g3q>JGC(aWYne3Za~h5wE52PJ1r&+&2$)`XQi{< z+OC2p7wFyCwD5_ms&OJYc1yk%C=T2d+=eayUAo?pvpWSD;8g}yb{|$)Wp%!U&x)EL z-DN73Hvq$_Zx(s)Z^;21qPg}R(LajxSPi>E2nr-9PC~7!f5T1ygr)%To1ooLFLlV4 zA;1rr2~2S}=Sbe<`}K>eFV)MjX%! z$I^6GR~~Wj(Cf$_In(2Rte(}L0hY|tHVCM2P&HbEr69O%0cGu9>GVy0NT)@O7Ex$~ zigGy}7z4$LAFk>sQmPT<;`*JTK~(ESX}D=mK?}1G0Dsm`z%@jI8LQl*PHG;R)N{Wq zpU2;RJ2n#-eK#V)!!^~_jcBh@SN}Mc{s9b`JOf{%JQFNZP<2+U=f|G@ku0D4EiaO3 z<^%#QOo>BJLYkw@7N&|?j?i4(^Anzu-!@F!PSup+B+LUpc7tsb(1Hg-70&3^qnA?L~5BeIonU^oYf^YHO&%^UK z3@pJz0bast9JnfNmlww>emY=ydfX5^xpQ?UhvY=Mr^b>7_fB1idD~88blpXJrBQTHL)OKvL= zxU&THQi!sJ^8DxrzIMhfI91U=L{TxsZ&+;&@ApLh%txBzQX@ca2^{0 zj>1_u`BWQWc-j|>oXhTO%hS+5*Txgx%H^78G4548`02yX1@ddQ4_6h|{@-E5p9awp zhelH$voS{W5E;0KW|0YMenxLTrxh1u9-*(xC8hU6sR z38YK)gukMQ;zKe~uAksD2}y?nvlZB4!BkG22yhyX4J!qwS4?Y4%fCvR~!^0^RXnz5X0FnRZ zJ;kA&R&b|}Hq$s*_Wau4o8IJtrd)s}7E%aUw<+P!W7~uy8s5-Z#3uy})5fe*(CSm| z$ukZJqniZ!t1t}FL!}4~CP?BC{}>2F_nyZA3N0-!kGSsvru!16{}h5@P6AU+rk2Fk zypr`O`@w(3hRTbtfOOj5uQgfW@^!;<_MeCgWapqz1~EAd#{I)=01`^6(1-+4 zmVPi_;)PO^21K)f2oC8zS(}#9y{oZ`V8E9%2l<>gC2vb{A&eBNLoKLh2$&_mZq326 z-F*ZH*;KyDiymJMex7nqw_QrNO5U-D^^J`I-@5?ihDgv|dlb`Dt#IPMwyf5H1n7A} zBWDr_yCAs>FG}Ji_hVDejtf75fM1&tn(s$ON1=tw3Lbhn_n@c4QTk2fxNh@%?2qH> zSf$doXdIH%zTPk9p4ScIFK{M9%Rt(Qp0krn>t{3l+)tqaa~`51J{Ac-bDivFJu ze4E+#qr>#e6WAsa{=jd33%-b}_s{d?e|_NB2lVT2LgK?u8p8iXaDIy-$8M*Wvv17R z(()R8{0yzb0s_Nz1_;A>gXNqc;3EDXFu(Rg z%HJ$j)k8f3bU;-z#GMq1jyy9}r^OdJR@4-5R^mXHu&4Zuc+Yp>F%Wa*^|eHYVf-H^ z)jH-iEz0tN+^^w#okjC4@Y%JxtRweySiT2FjLyGaug)peV=P6=Ah5ox2uiF8x6Mt& zA|?w$4re4rH?Ak8h!q4GP8Z3X%wj zYsiHpzTs%~?0ZkvSUU{r{TZOtW1ulLcA`+I`mcu0!-W^>1q7HUXlMpDO=>v8;;%~# z2C?Z2j%{Nw{o6ETkaGOF3 z;ok6m?Uw+$-?sWza8y)AdCXhab-Od5pB~FP3u1|O*UcQERhg3ot)Bd2na0F2MEN5o z>_;$vyxj!oTAroiuZmTx7%2u$)j23Q85l+p$e*_VB3K89%9w-i;!Ji19F5V^pKS?F zCjU{@loey>CUC|wpn=z&9Y?P+Nf2J{*7c&`_QyhPz7>!3djyx%GGW|+R|cUcr7- z1G_rnzo2;VSVo|F38|)*7FDZb2*diGyZfWC7axs&1liSZRY$-Dg-JH71Dt`O%)pGA z4VJC~epfe(Bgq;f-Sba!^xhH}HP(XRRsh-jDWPp6WxhqtIKLfk_S$zIp$X?U72E#X z{Xc?`HLFI>h`8{YBBWni_Dt8841ZjL^xeC6&CSh`Y#g36$#u+smpO>*IPwXglfxVw zzVIsL;&flv;BFEc@aRTD+g3cGe-ac906oyrk&7a^8O4{pN&$rPSJBZ#_p8qT^^ud3 zddt454H3gy;;GDBU#8}P3XMp>?1VwTzmT_3zS<&We*51<^d{tk+eWzxO0R4pCV{wz ze6`7b8*dxxbR(k?S}gMaALQP8;O-mudi!53?Kl%lr4D#)#8G@20j^bn{f-Bu5VC$Y z3p^~puaWXSd@6SH#IK;*);%&;awD3@zvdj|alfLs-Wpuro~}RpfqQ2# zEk!jnD5Q%n67``iveg^;@n3wXkewc4LdMABeh~H!xw5T(%(;v+AMK6OR1AR7v}ORS z83;(DV*qXHYhZfESO9@j-6U?{g;iQ%Qt^AVtUyHhgnJhze(Y*U)sVg0Rl-_Vh1;w% z&Xa}V03hH|7+dR$rtJV^HV~41U&phwJgrM|-kbLH^}m2&<;Tu@ z@}Nbd6WakN950X_EM5RxSkxTq6P=arF&0FHbfN;Na`D1EJO!Xki?msL#^i_dGg!FA z*);9XQn}&#BCc9hcG4ZohNudhpWwI^(7Q;40eV?ebKAkVTqc(m>I?g3_nJ;TeIB{~ zdCKax&B4U_BIg#GouRCKfoxB6uaKO75Pezv-`6#?$U8G(0Mt|niwwLlrs@dtH7I=2 z=%zk+m7kA<88tu^Ak@IL3lsTZ{9XXJDu)1v_Ok>?fHau|LiE$4 zH>V5%@J0^jPp_YpXF}7@4$4YkML^Y)(^3ejOgFT5SxT4@4&xwrz5>fzMbsn+@hXBG zp{2Exy=?LJ88ndki&w$vqrQ38b=G+z0i~8}hYZU>B$E5lzQNAs6HUsy4xjeUT9A{L zibk1e z5FPT373eSKwR|jU@n2156kWi=`N><5u z`{f0WNLYd1gn(<=aY@t5DRjOg7m6-MP9zXMHm!~uAtYrhEc2$P&M)_i`c%Lw&!*$7 zA3tW)`OIiQ(9j20mCBF3{NT#7UsznQe+;ir9->?!lXD-=Fo$gH0zeNS%><}r&i%-^ z;8Q_<2k9b_7E=oh=($1Q19%~1R9<*f*+R(%tu`+MOd5(5l{I`wXMKx)52v7pU*LsJ z@Jh&+S}6tBqoveU)p^S|M1)AJznaM^?L%=p6}WQI1tMo`j99Ix#0GNVnOIb8s*=OJ zg>pp~Ayq$<`V!UM{MTkZ8~EgwGV7M?NJgyj&wEH01b*GGv4(6YYDYTn{eHEpn_ea5 zd;kMzoCaQ?nYUTFuW!QTAT0PVr`eq3)|&(#tzQcuyHY~NAZZwNC-Od8wf<(aT1O$L zicw%BA9!F+QU?X5v-l-{pua_ zi+u$u&5LmI+q)%$tY1FtX6S<|R#cdbYN^2}g(M#I4%v5LC%w}eL5#t76jJQ^%@z=Q zfFu@iGkqQ{EFTmKbz$WS=&hpkqvwMhrot24_A-G-6gZdp?R1za!uPuE6(5ZU{RCA~ z>B&9mjoRxL-8&YJL4Fym$XC=tN<+}rNOoHiPtR?1+gr=m(=)vCt=JAJc9K|U&U|#L zs&ReNj(b~D?o(*T&y*xT1bY-c&<^P{r&y^ZM2ofpr`5LQo5FW8V?VjaJvzVuLD9u&w%sXzHh+k$l1Kz zINmM~EL&m#<-D&Mair#i{58Lb2v$ua$f72)%|8}TPSopxZ0j!6;&HsHu+=^|Tf%?A6W=3bv%6gRQ>8^|(XhI(pAfRtQu@2tts zNiOBbrh68tDUJnwzq@U|{1^0?w6_Z}IQbV=`G5QOzk|Twvi8EV8T8`eR!OcuGX?KM zx_ zdzJF5xY~1{FH>x2NV$(?AU24fI|MrRkR#eNQvuA6isOJA@ihvJ_E0k!x4+!P_JEBN z^xlCAG__MD=6L(V+cSF(S(yVd2{f8TK~NZ41c0T%a20feETT`+RF%C7Dk1W?;aSES zY=v&TpZr)m*U*3{d71>-Kr zPy@dl)qj-6VLbW`fzUSq#Xdw`_B2|+u<6w&U`EiBAKS9a@N>OV=hi3h&3&ezXlQ!qx<4Lp0vk5N*{Y`6TevVLo=N6_>3m4xfaQ zUE|AKw;83fp(FIo)X4eM2%vV!7O2o8 z2NX1ZrpG^0$KzAUqf6zgOJIi{la%xos_ugD%dsHDJnyywTso*2!GHcV=#<#7;rB2M zahdmjhthSmtAc=qb_oolFA1qL!tfB%ZOnOHx--}dtl&}n`sFeXP~Pfmcywm!%wjJh z^DWRSs(-i()l&&H%fz)zh`QigE;%SE?GErr(~410bTmf*1MzE=wc5qG@)GNwHpD;; zC{O^Ax`EotU@;9Z<`W~rY$ku@KsrSKG~a}YO^vjCxwU8B0ciO z!e5VY$pNhtP;Bg0s!jH#K`T^S{^+?QdZ4qDV+riX$#=%1`5BJ*$O9&97^T_UR}F8J z!Ys8v{gQ0zub9V=9shWjvq>^azu}^HZHk3Yqj7)4o){+WiVyNJ&{4gEm-zI+p$luUaU784Un`-%f zSHKEL7-2RmaG{2-9jXNjlbGwJGu5W$UBehz*_MN2E~P`t&kQnT@?)Z@ktK+dew{xx z0&|K?jgYIGKop-Q`UhT((sDwx0Hy9!hfE)PkTZE8fUk8U$n-_rZ{P=VB!d$yf{9If zy`7{~bwzA=tQTN0O^m@{->6xtO;`|(|KlEzSvbJ{_6`88Ypz-Qnb)Q<(5r!D7Ar8Y zJ&Zwj?p!Ru+@MdPqPZ{ueFS)-NhINE4A1uLhc)nG)e_e%jgeMyv>wtb?#)^3 z%5hobh122EZ1$VQ_EPAgj?Z#=7a89OWkLK*K|B1O zGNGNyyhu_bxb`)!pyhb&>JtB1?UgUwxXC#%FR$rwPn&vlvuYTtX~#xMX`|0ysBwGw z#8u#H2^NB5GNvQC14;|cnU={X0D#s#Bz)=xNryg6NU3+q`OLS+qo&K^f8+W7I71>P z*HMNQHYni7BC>$xMKD8}2=z*lTzi@*h@kqtP+3baVnqn!5CHiBbdoflP*61cIZWsn zPWZ_UHOG~sdE(uMY#<6jm;1@!JWO8+!GtGh7&WZV0=yYM`^f;Zv5CnrsjEQzjtrXo z)4FDlX~ke2&Y~kwKK&xo*!NNFgh~wc4iP706-jpw8#l6A^*1MWj_H3 zR`!u4(_5ym)C-%|>99l> z_D6OdIM2to3VO=dV8q_dtvrJPZ-Dkmu?AD9Z}QxKkSMfo70#8nL01`?+6oGHrj;$O zrxOFUfPHUUG3{EqzoA3RB9|c({13f*` z?*?U~wu`f$8LaEdK2%pD!-<=bv8IU*;4MMHZ9<=lG;{#VbZVUM)Vlm~jX~Z~49Nlc zF%6S6W^f8e2L;6w@9xZ0Nh8jbb`5CYkAX0XT-e;!r$>jH!nhkvL$rKMB*$@( zB3+5#fE4(F(U&j~uSck!QKgX}_qenLtZ_FX@T5+ghXuU)KZR50ic5%_IASDF^~z4E zq$N-o{q1;=902ci)0)QZHFpv*gk_h4kHJ<(Y%dopJ?>Qk)h2Zq%uFhTV z)Un5JeV;+HH?D_3%NnUU%kYJSUnp_ia;&5-*=NG?M{#ys69zdvoEC&X@g7@;} zJc+|l_36JDSwE7Tqf3&*Fz48tAWIpE;A>6*4b1L4U*a5*QG7 zP;Hp5`>$3MGd29}#t&!Qzps^lOEcnkBA#eG-G3-dDF5#t{grsL zrS2Us5D;qxD;37S>7W_9%S%jYdagDaDJ08R$0*lxcL&KH22xlnpZpFl)%fY>Oi>ZY zx~HN}k`FWQIM}QfcgIBHz`{+n*H_3XLReS(Ks*BFMz}%YWnczHN6+`Um1uyk2oX0% z{i>i>3)^W44DQIPVH}B!3ok40Hd-r-A63weY^(hzlD9-XiNZdxG$Q zFWS)9P_W&b_7j)kSm%>HIt*xZT=h~q@S*!`c+>Pqz2hFeB&KBef=aj9O^<}fxa2-O)&mJiES zyuzj9^)UHdv&(CiQ?Y(xsoq{6)#Dw?-bD8N-(5&#Q0yF54VR&k+F6Y0ixfR(3a{7} zwkPm>*qpymrAr)G{@Q_}19gm_EU7t}>8P z_31z^TxF`6t12e7|B6Gapzv(p3ZrZ8+9|sdJL+iXnIkQ3*p@rZ$}^7pI#Z^{QxB8Y z=BpULSAHsvHj{qcuUaJD3b&<}f52-(fsvjb*b{2@>>B=Rse$`%^%c6WeiGNTn`XAn zpb1?DaVez+@MBF()80&;l5}$nora9x42HI+3W_ft0E&qPHYGnQ>wZsd&e~LuyJFl9 znn*`d=E8KX)p{@9!}mMB_VZ)PZ3Z2C?E5)k!oD@K-Gw`!cphfJI60GbYuV?WDjXM1 zwS*S$7EsV;x5-?}4tBnt z|B`@G);^Jv2hXBxqJ34BE+Y4Yn~gc{;~x<*)|w1a?+;Ph!QHW=*N$M?HM;-QyehBf zYMBREpQyZc*5}zao4IPf0yg25{zf)b-n`TI_a+Jbw44~MJ;l*i9MQHeqO==8dCen8YMD&02pfuMiu?xpsXqwj4{QU6w z7yoAsApM3oVS?S=PMQ7;Z`y~fFIgr6twPQ8&z)2qLXve&Te7Wt+1~tw5Ty8GeEk1 zGP43ssK-%)nO1TwnePs-J9RolI+?x)Nx|2^tJ87Rx0qCT)mK>XZug0FhGXd*Dq|Vi)QOPQIBG7bvthcFW^OoCgQRGcPZI$!g#bMA=nDEtp2OB$!A)nAp0%$XIi-)mT|`mD;OXH=$n??^>SyY0 z)T2lM!iVx|8pJh$3;*3=dbN?G)eMH0Qq-29-Qadv=1~0xFcr_^h85KNMnhIM23MTa ziKO+y#G(?(fCPX?K?k7Q&=`z&UQPK=sSnqju?_~wnmg!5CykA%+=q~{G>~=jzjqQ@ zVun*DPx;xPvfKDvpzghLank_Oc)Hi-Yi+~V=$wShc+9WvC0_KqTp)l&4!DDvAu3(3 zm%fk(bv;fohto21&6ef_Htt33k`~@sSi5Qm*Zt@nR6u-#H3BG>X0ft_s&-%nh6%L-h)Lo?N_A%^Y;i~IZ zZG5%k$AjYC`Nanph|XhDMa$j2ST$mdW}$Rser%?43w7{I`ePy)ijtD`^Nh{QG@s?& zorb@75Fd6-8{88Z*Ib^etmdJ0rVq;(qUD~lG$JAiWY{Fuqu(X`{g?cyG2No4&%0%~L+=*s=%TOw} zGz3N0jhquTmzI@X-<-v#X%)>uIQBCnD9OtPgtDmKhiQx=BBB1WY{A(R9pt5B($0ic z;gm>aBIzesAR~X+)-!}8>(@Wcc@Nub!rxk`xg$=g@i+(0K^N>Lh+o^{Q(JFgsFk=;*J*4sZ z+T~8Kb=UKKy|BueEWAfu3wG`7*+F*DHelz4aO;d9+B|~>GdEiAE=MaGu1vZq6}0xk z!hl?KZf)!WX!WLAh6x45r~)`Lx?*=s^ej-+UrE8Krp;HLH&jrj&i>4{RnQ9B;js^h{wE{+g&rTQ8V7|*yhf!j8-&%ZHypF4JyJ_^OHZVSf z-3dBNC~zPy@i^3~*qG7E?|8n4$JBN99(pv))GaA<7Ft3sA|kJ;3>O3^#+G7iZK958 z@L2Rup8a}GG&xBOVx&h`V~VMwqt$}bS_HQ1@m{Wl#!@v`exK(;0vY(;4Tnfl1>Uao zo-iB3c{Zjxm#z%M+G5$vHSi2Bd~zV9-fje=y3*ZnHNx=ENxx5VL)ZL<*+$Trs501_ zqt(0>Q|**}9Jo449Y<4BY5$bnsiD@I-;dN8ths{jpK?N+q-tnqo%*V z3xsgh(tGJ0#pH5Lvr{G?mXiw<0;4x^B7ynTWD}4gWS!`%C2!bHbu(~sO3L-UJj0RP z>W;j@Dh^d+TDP~_jCEWW#T~U)^`qCpg(X;=W>prl7dO8w@&J@}iCrp2B1)o#G{a8b z?p-M!=+@{Eok1mXJz70F>%0CU8|~493Ki|c@VeMG3?pr#aZaX^>Fqi<8FHk2s7M8y zm(y&_RihR9n62lKGkmSh&WA>Az_wjp?>%fXN5KnS0&R_BUe46J%I;tIdxOcIlIKGA zmqrP-Fl8=`A8iTv`cmt}`HxK;3))NL*BIBP-bR@&3}a}n4`>w{n6^aL7e<_207^Rb zBv07{u2f@StOb1`vQwV&zCKk;?z^SBub(WV>T(K?YpEz*6d}I{ayFX{;ddOrcX~B8 zTCw@eYVY~|9a($SaGMGRJtDdjdE1v`(hOIZx^QKSZ7jEy+TUDNK_^?hP6-`zwb8~c z>S3D_wbSFJ_+e~c^l6LxdLPB4@APwWxqrPdu(?c4OJf`;ahDg&d`Olm^w;N9{U zPaBWJ)P52D3L@@B3z6~TQ&t6OX$v#u>x7aSicI%irAmpOiE^$(;=&WD4Z zA~;W{&+{^!rccgL=(ss%oW%JvIPTl`d_FI5^9+|!5RSQ4HsNd zt4Zxxv$I;ovlK?A-lAX=s5|P4@H}<{Y3brLky_^TB#pV~*KgNS>0#fHPqivFQ?Bsu zEzlQ5vj3_{eJnSc?31mPXM?H8uCP`8!Bio9tGoO%k(T&9aYm6Syq?=CLfh^bX_{F( zdF?^xSbtc0QAUQm?`j6R)f*G8y#&^e<`V%m0QS-i=&}r3M(x%Xujx9`Q!oROD`c@Y zI2qlrcH3=_u<=3skVE9DNh(ac%i~C_ew2lPYldKF!QCo3zATQi#gug1(v9SE<1!Kw zZ|7?Mgd|Ue8DCi8+QQSm>fM-+Dz$5JnZTlslL>vlldh$l&fiTdNl{V_enjzmd)*M~p+O!#6!Tu2jt3ynJ%1OfU0@6yG?y^O4Jv>HcNNyVuAx*%$ z7sH6KuG*Ol;ccQGf@-uD4N*xrSHD9%kXx>a z#6@L>37sq;%(`ZZU&Ybe&E(2}sv_Y^Yg3NdM@;%gj>YjKXRO1!c4@Xg*zw4DxpWLb zv(~ygYO5eqhMZ~~_PHs^;5j!gZ}F8z z^X}6H)^Gfm%cJT#$?T`T+sBFF8!l?*8O56e;#EkfX?-K<6Xgdm>#AjGi>H~u>L~Ty zfs~ozj*^v?(K0VKw-TWX+;0dzFXgOGFcBJf+Sk0LkH@IxU$y5JeyKEzx-@z?6sZ6@ z{hxLIr~vr68ggCHBhi zF1Dot|2pSOc(D7`x?;RG8EYePmbhtwLKWY?F@v(>;O3S+a%TBj#iFT~KjrAd3yqqE z!8+*21ejT*0VpeN?ZW;m&A- z{0RF6I3o$BI2<)Aog1SwtR`UEjPixrHi)-Zh zL~VTUx2>3!nXaK(#8hY5wMt{i6oJGm-k#9)8RvHe`lfe7)>$nDFBc#uVKGeky8A!9 zzfNUb`Bpt6>*3`t>R|O+vPfCaVU7hFa zljmzBiT;UZ0xjs~Bf}j6K2hc#=WKpYMNv=13@dWg;{$7bqUq(YITlDqj>|Vi^z}eB z7$-JBXEPB1ZM!eo*iTVeEC5FmKvrSNoHAQy;w?p9JJGjZ#_?Wi_&_l_ zrTpWoS=S5qr>l;1HRhtC>MyggOqTXp(3)*_DIQvn&{@-+1jVPI5g3ofCAU{Q;X3kA z@kIUNwU~&%&-73upMP1?aa!JSk}0Kxk7W0gzB-HJhQrPddPz_hM&{#fm)#`iSE z%z36L1P-3@4FxrBlQ9`?d^jQZH3GYZ%G-{f=V~*;W*?(SJ((Gjccl-Gybij z#qkAQL-%|$K=~s$hZzhjOgfrEO&*J(Fy=@Jyw{5hg}MK@hPir@VY-QDq4C<1qzw=9 z>qY3&`Fa1J!)D1Q>rQ7vD;}jRIwvn4QZ-IJ>}={5npvIcTWsxPS!9Fyb-yVap3&r;vF|0J$P|4Ny2tFeWUl;{jYAy zDxb`8c~(b8w(&>(X;bu4F)z_3oY%=ITND0;+;G$}`%rQXw3; zy@wBj44aOM8f`4=^yl|$d3jA68)>g+Dph(Ahy!hr>gz+O0cavn}qz>h4Fk#ti9JX?xYz znz<4Z+)|b*=#J`H=NIa%f&grX&7RXw>EBz+hOiHu=C@V1vRa+$;iw=IZfj>qMFmQF z^gX(;^TNJo&z>!QJAD~rqHJb%Ak%BA{ljy44OW^PH{Le}Gi2*vLTDL`pDd*$;=Qq9 z;9R#Wh3D+q3+LW9+_Sdc?>6VTH^r0zG}_209jb%9-F$T{z3 zt)k*8%6aI}bc!|m>RAx^^Ti~Vd6JjmXNALN8Om12`fSEZTj)j;IgIO$*5k&&F2HS~ zqs-^>*=Nt^oSb+pL|SFk#DarWgT0WQ~f;_eV%U;atiLe7s*Bo zke{)s8*jM7x@kZG`oSPVwSR8oT-}I?MMXr!{Y{{JVt+pMqN2Ww#Fz~j@a)F9E8DNI zsj_o|hgbaK+V?r7^z>6b&2rby@;mKuOziCqXipSgsXUO|lkxrgNLH3);1Og z3Vu6KzLWFu&#QDIJ5_l9yJZENCZVIlmf*=@a#385(^iZ}^AK5Zi2G{Ol>lAcQ(x1* z4cNQO(`A{4q-W1*sjAZG^MNEDrdK|rrY0Afy|1adh7xzlk)FHevLEw`IXP88wuiYQ zGe38MKfbQ7fNV};bzGX_^38b{?*#XIUmDsIIi8b>Fm|lqhzi=}ndRl$uCW(BqT2cS z%MHqD*_=Kf>7NcVQz9!l|jdj6D3G4(*_IJDrIN3=ELCXhnV0V6A;Bza=!| z*+xhr+l31Wz-u`&FU#-iS~qLkJAZo6z25p}>i%0aLK|wML;eohMb_}>l-nj!qC6H~ zKHn_2VpwS;hTp!(dRf?cG3{VYTXX_2;MaDi?`q^$6d9YD8SHlOHf!qxu4?PuOvl%8 z{kA3)eZA}~ycerL?(1~fH^tB6osGwu^m+Lr%LfsK-o0vObglA6!&w~ zHB(ii3c1K2C5x7S;_fdFgmb@>=FbtW2D$+7j0r zLm3HueHHT~ZteaDJP%%?dL>5kVEm(0mX?xMKkZkuzD=e*xTo>@qoX>-7iHp_RjUFH zKHWzu|1vQAWR?HRh6YhRg`kP?B09bXV-^(Oq3ZRuHP97vpTQBA;QWAL$}WxAD80J7 z!VJ*rC1XglO*q26m)<1MzY-4lKsOCma)R)RoV?DU?>P}6(wDbPWp$&u<}Kc5o0b$6 zJtBqEFv);7`lejhj!!N^Q!*jqFQh~DdSB$%hs4dyXx_S2aLaR$QmCJU(IBJUVUG2~ z@u0lol*Ds_y7y)Wn7l}?v0j2EYI|(ZouZhTnZ7u2Qbwjkw*^@`HHYsfm*4FS&YpWZ zkQlE&zv8^SkAz6nWlXn?&0Q!?OjOezJelVTJ9}$`Te$V#;h!<_rj-Y>e4ms z=N|Y7Nquft;C@a&N#fbm%r>(0p?dTElTdaaWpn<>f_rB3qt84Ch>qiJha5{C$Pb?q zjsJ0DB2tp|meT@WByUM%=t$n#BD?w-t5@|sAPQ@080BZ|J4C%cU{CxyQOsxk229{) zxw!IRvev%;?Y@o8^6WXPvILjc>C-Qmg)LvPf2d_zU-gQ)_DFE_Nt+P%%1&yU(L6FK z)g)Ak>}rHZaPWbdnHkVq)9IL|79cXU>nf(>2`jyVebpi2c6b82PEWMGtvbma|L*oc_A=Qm4u<@?%}2=JYy$z%RSxj$M^-QT>vRQKTf?#W@nGnF1F=Usit9Q6-eGc9M0VCFwwDF$|Txp z;tU06%1}L1Et|&iu1{88q+h9aG4hvu{(Q8MG}<8+zsT^{Qwzfud$8Xr@4;eh=Dr-M zt9NjAhN*$c5yyTm^Ot$1nLAni$qSQ`42@I#qh+MmiH9W&imT9v=@>{&WHqyisK!s( zy3Jr~`E_zqdenCR$l?e*s8=JNvFMUiIm)&xj8pa$($Xxe*;04?c6jV;-%E0d+JD!9 zhctisw(ijNOEWfXu?-O5cgmCOza=HLrq}d}soJ{ert=;_C(zImV7>3(X%VISUmwkc zx$92Hg~7X-N2u3$cnh;2*@%r51RtE*+FBDd9nJYuGQ-0$&z{vie0VQAV&7lqkuQ__ z(T=XI^MM#9mS8o}abf-ixu^cd_va(0(GIMa;A0%Dm)}|5f$^OLQTKu}; z4d%b^LLw`MKsL{JhgH4(Y*1DmN!6b)`19(pBC#QP~v#n@^WfMa3p^H2#*UY zwXhEVAX}o7t1hDQ{LX`<5QOgV(?eHWW&G2pijnX#UN4{6;t~tbC+w+^>r%Yl%5`Z& zYyMU$huV-?xjpPdvu5wVPAcSQI_10bVgLfpw9-`)crn{cqq%;~R6rX-W-Z{~0 zaj&D&Gc8gxghZZxWZZQAax=BNDk@4q)wK7m)rq91J9gaIw@+0c<|%IJjo9|8DC-48 zG_Ea$i;C2|Eh!=0vnT$IUY3YtY2Tx(tbSc*&yZU!F22-5p%}Su90JxZhi0(t(gg7v zTFUrPNPEV_YKiZl%fd7qsxPK{{aSa#fl&14j%4TP1<|A9C9{M-|9bdFp*7y#vmh9$ zmy7*&JYLapWdOIn?$ev!*xBvC&cfm&Q(5DssnHGROg-l0Dfs2viP+}^Gm5x4LuD0L zB~53L%g1Xw+%4!nL?gLleZ^hY?%CmUAtLWbW5HsZz?Nqr+@+XRysSKeD8Vb zp8Bk3ad=z$;o*R)N=83XbMu)CEUEhsQJ2_`TNg+(qz_s5stCHDx@|JC1_}zE)4g{o z{`|DD5G`C&oB6=v@hc&siv=28w#K|8#(w)92OD2=$ns{rfgUi{uJmeHU9j`4rsK62 zz}*Ud)bxBa3R@~xQs?{)4S5p>rx#D(4)OvsO~2}URt~ve<^6~`nY(va=M~w{-YAzO z%{lVXVROWxe(-*8o@^@53<(2XWbpadObsJ~9!^R()`ca#e~hxSHz zNXRKZ{y_z(HH0gUxdcd?zFov%S8>&DxN8ABXJ^XeO)3w!?_GNU6JO>7C|^In-Jx1g zOd~o)YilGiT|aN)%~bR?5~5{O#wBJfR{ZNN-C@4+)VFUVq%yQt1^TcZw%Ym9ja&u! zXjN4?_9wJSCTe6ogKjFL-0d`3 zYj&w@XATZ@@j0JUtq)!AGzz=t3YaQd=b+*eF%oc#^75IbrPQV8*nWJM((TLNB9TcD z;Un8eW+Rt>WmO_0gKH&k5aZoXuU8;#Jw?_N7S>{Dh)<eZDP&eBpP;gMx~_+P*bjl z$Hd%OmVShUWeG1YeA43$bu0L`_F|qJ=b^e}!pWQAqGL&3Hm?$tFk4BrI^|NR>qWpMrw+su?nnD^1SK$Zg8VXiaDFlq?rf33P&8 zuxELBxiSse5^E8wV`(>U0vQY8^wFbS4&UzZR?C~$nOZu)qh;cI92goZ;As8!Y3)&3 zTG}j_t?@$YMFOK042F|XUA3?J>KpxCc~?d5GNk~0lgL!xLE&+ zQ~t%MuVbF;`V3UHQ#L3?YnWIKAFH-Zcq1+_TZ+)clKP=H_=@T{T5TomEw% ze0+Sc#bO5^HO2iEF_F^c-Mc@wg2zb0r`>+W5&YaQN?%vsJ0t9No=%3b z9I&Jd?~z%c)9PD0kyM7RqvE&6{7Njz{kIQY@=_q~|Mk1>P{b0*AP$l|_;dGGID8YI zeqzC4uajOwUEOV+K)9oKx(B!Z1=KbR{Qa#tgPz8S$F#JV^6`aZPcgs`d^^3BYtYsD zbj2qcLf?SqAx*r{CYNb@LIkbb_ZzOaka;B^&aLMI|BHl#3yI&`e`59_-@g4v7Fp8U ce~jkc^O~?xwy_2+$X`g_lDnB9a_8ay0Y2KF)Bpeg literal 0 HcmV?d00001 From f773549b5a7859f8d7d19c8b17ea82fe8cdcae3e Mon Sep 17 00:00:00 2001 From: JC-comp <147694781+JC-comp@users.noreply.github.com> Date: Sun, 4 Feb 2024 03:05:25 +0800 Subject: [PATCH 047/305] Add error handling for version check (#2615) Resolves the issue where progress crashes when the GitHub API is unavailable --- src/util.d | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/util.d b/src/util.d index c75e62b1a..d35bb9627 100644 --- a/src/util.d +++ b/src/util.d @@ -671,10 +671,8 @@ JSONValue getLatestReleaseDetails() { githubLatest = content.parseJSON(); } catch (CurlException e) { addLogEntry("CurlException: Unable to query GitHub for latest release - " ~ e.msg, ["debug"]); - return parseJSON(`{"Error": "CurlException", "message": "` ~ e.msg ~ `"}`); } catch (JSONException e) { addLogEntry("JSONException: Unable to parse GitHub JSON response - " ~ e.msg, ["debug"]); - return parseJSON(`{"Error": "JSONException", "message": "` ~ e.msg ~ `"}`); } From 627726edd1ff6cd2e03c64f65c6511af3115e539 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 4 Feb 2024 06:49:27 +1100 Subject: [PATCH 048/305] Update sync.d * Fix regression introduced before 'alpha-5' somewhere --- src/sync.d | 52 +++++++++++++++++++++++++--------------------------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/src/sync.d b/src/sync.d index 3d5b8c248..281b4baac 100644 --- a/src/sync.d +++ b/src/sync.d @@ -950,12 +950,12 @@ class SyncEngine { } } - // Free up memory and items processed as it is pointless now having this data around - jsonItemsToProcess = []; - // Debug output - what was processed addLogEntry("Number of JSON items to process is: " ~ to!string(jsonItemsToProcess.length), ["debug"]); addLogEntry("Number of JSON items processed was: " ~ to!string(processedCount), ["debug"]); + + // Free up memory and items processed as it is pointless now having this data around + jsonItemsToProcess = []; } else { if (!appConfig.surpressLoggingOutput) { addLogEntry("No additional changes or items that can be applied were discovered while processing the data received from Microsoft OneDrive"); @@ -1173,7 +1173,7 @@ class SyncEngine { if (parentInDatabase) { // Calculate this items path newItemPath = computeItemPath(thisItemDriveId, thisItemParentId) ~ "/" ~ thisItemName; - addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); + addLogEntry("JSON Item calculated full path is: " ~ newItemPath, ["debug"]); } else { // Parent not in the database // Is the parent a 'folder' from another user? ie - is this a 'shared folder' that has been shared with us? @@ -2006,7 +2006,7 @@ class SyncEngine { // Calculate this items path string newItemPath = computeItemPath(downloadDriveId, downloadParentId) ~ "/" ~ downloadItemName; - addLogEntry("New Item calculated full path is: " ~ newItemPath, ["debug"]); + addLogEntry("JSON Item calculated full path for download is: " ~ newItemPath, ["debug"]); // Is the item reported as Malware ? if (isMalware(onedriveJSONItem)){ @@ -2046,30 +2046,28 @@ class SyncEngine { addLogEntry("ERROR: onedriveJSONItem['file']['hashes'] is missing - unable to compare file hash after download", ["debug"]); } - // Is this a --download-only scenario? - if (appConfig.getValueBool("download_only")) { - if (exists(newItemPath)) { - // file exists locally already - Item databaseItem; - bool fileFoundInDB = false; - foreach (driveId; onlineDriveDetails.keys) { - if (itemDB.selectByPath(newItemPath, driveId, databaseItem)) { - fileFoundInDB = true; - break; - } + // Does the file already exist in the path locally? + if (exists(newItemPath)) { + // file exists locally already + Item databaseItem; + bool fileFoundInDB = false; + foreach (driveId; onlineDriveDetails.keys) { + if (itemDB.selectByPath(newItemPath, driveId, databaseItem)) { + fileFoundInDB = true; + break; } + } + + // Log the DB details + addLogEntry("File to download exists locally and this is the DB record: " ~ to!string(databaseItem), ["debug"]); + + // Does the DB (what we think is in sync) hash match the existing local file hash? + if (!testFileHash(newItemPath, databaseItem)) { + // local file is different to what we know to be true + addLogEntry("The local file to replace (" ~ newItemPath ~ ") has been modified locally since the last download. Renaming it to avoid potential local data loss."); - // Log the DB details - addLogEntry("File to download exists locally and this is the DB record: " ~ to!string(databaseItem), ["debug"]); - - // Does the DB (what we think is in sync) hash match the existing local file hash? - if (!testFileHash(newItemPath, databaseItem)) { - // local file is different to what we know to be true - addLogEntry("The local file to replace (" ~ newItemPath ~ ") has been modified locally since the last download. Renaming it to avoid potential local data loss."); - - // Perform the local safeBackup of the existing local file, passing in if we are performing a --dry-run or not - safeBackup(newItemPath, dryRun); - } + // Perform the local safeBackup of the existing local file, passing in if we are performing a --dry-run or not + safeBackup(newItemPath, dryRun); } } From d0214a86244169d98a3f2f6ef99c3fc7a5066f85 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 4 Feb 2024 07:51:37 +1100 Subject: [PATCH 049/305] Update sync.d * If the timestamp was corrected locally, and the files are now equal, save the data to the database to ensure this record is kept --- src/sync.d | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/sync.d b/src/sync.d index 281b4baac..c3e37fe6b 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1784,6 +1784,8 @@ class SyncEngine { if (localModifiedTime == itemModifiedTime) { // yes they are equal addLogEntry("File timestamps are equal, no further action required", ["verbose"]); // correct message as timestamps are equal + addLogEntry("Update/Insert local database with item details: " ~ to!string(newDatabaseItem), ["debug"]); + itemDB.upsert(newDatabaseItem); return; } } From 6d3b96bdc860613d059cbfdd039c22a824c0d103 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 4 Feb 2024 15:03:34 +1100 Subject: [PATCH 050/305] Update sync.d * Ensure that 'cachedOnlineDriveData' contains valid quota data if using --local-first * In a --local-first scenario, query the DB for unique driveId's rather than use 'cachedOnlineDriveData' which may not be fully populated --- src/sync.d | 123 ++++++++++++++++++++++------------------------------- 1 file changed, 52 insertions(+), 71 deletions(-) diff --git a/src/sync.d b/src/sync.d index c3e37fe6b..bc35bd93f 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1783,7 +1783,7 @@ class SyncEngine { // Are the timestamps equal? if (localModifiedTime == itemModifiedTime) { // yes they are equal - addLogEntry("File timestamps are equal, no further action required", ["verbose"]); // correct message as timestamps are equal + addLogEntry("File timestamps are equal, no further action required", ["debug"]); // correct message as timestamps are equal addLogEntry("Update/Insert local database with item details: " ~ to!string(newDatabaseItem), ["debug"]); itemDB.upsert(newDatabaseItem); return; @@ -2318,7 +2318,7 @@ class SyncEngine { addLogEntry("The source of the incorrect timestamp was OneDrive online - correcting timestamp online", ["verbose"]); if (!dryRun) { // Attempt to update the online date time stamp - uploadLastModifiedTime(item.driveId, item.id, localModifiedTime.toUTC(), item.eTag); + uploadLastModifiedTime(item.driveId, item.id, localModifiedTime, item.eTag); return false; } } else { @@ -2724,10 +2724,8 @@ class SyncEngine { if (singleDirectoryScope) { consistencyCheckDriveIdsArray ~= singleDirectoryScopeDriveId; } else { - foreach (driveId; onlineDriveDetails.keys) { - // For each key, add this to consistencyCheckDriveIdsArray - consistencyCheckDriveIdsArray ~= driveId; - } + // Query the DB for all unique DriveID's + consistencyCheckDriveIdsArray = itemDB.selectDistinctDriveIds(); } // Create a new DB blank item @@ -2737,6 +2735,9 @@ class SyncEngine { // Make the logging more accurate - we cant update driveId as this then breaks the below queries addLogEntry("Processing DB entries for this Drive ID: " ~ driveId, ["verbose"]); + // Freshen the cached quota details for this driveID + addOrUpdateOneDriveOnlineDetails(driveId); + // What OneDrive API query do we use? // - Are we running against a National Cloud Deployments that does not support /delta ? // National Cloud Deployments do not support /delta as a query @@ -2890,7 +2891,8 @@ class SyncEngine { if (localModifiedTime != itemModifiedTime) { // The modified dates are different - addLogEntry("The local item has a different modified time " ~ to!string(localModifiedTime) ~ " when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime), ["debug"]); + addLogEntry("Local file time discrepancy detected: " ~ localFilePath, ["verbose"]); + addLogEntry("This local file has a different modified time " ~ to!string(localModifiedTime) ~ " (UTC) when compared to " ~ itemSource ~ " modified time " ~ to!string(itemModifiedTime) ~ " (UTC)", ["debug"]); // Test the file hash if (!testFileHash(localFilePath, dbItem)) { @@ -3881,27 +3883,24 @@ class SyncEngine { // Query the OneDrive API using the provided driveId to get the latest quota details string[3][] getRemainingFreeSpaceOnline(string driveId) { - // Get the quota details for this driveId // Quota details are ONLY available for the main default driveId, as the OneDrive API does not provide quota details for shared folders JSONValue currentDriveQuota; - - // Assume that quota is being restricted, there is no quota available and zero space online as a default - bool quotaRestricted = true; + bool quotaRestricted = false; // Assume quota is not restricted unless "remaining" is missing bool quotaAvailable = false; ulong quotaRemainingOnline = 0; - + string[3][] result; + // Ensure that we have a valid driveId to query if (driveId.empty) { // No 'driveId' was provided, use the application default driveId = appConfig.defaultDriveId; } - + // Try and query the quota for the provided driveId try { // Create a new OneDrive API instance - OneDriveApi getCurrentDriveQuotaApiInstance; - getCurrentDriveQuotaApiInstance = new OneDriveApi(appConfig); + OneDriveApi getCurrentDriveQuotaApiInstance = new OneDriveApi(appConfig); getCurrentDriveQuotaApiInstance.initialise(); addLogEntry("Seeking available quota for this drive id: " ~ driveId, ["debug"]); currentDriveQuota = getCurrentDriveQuotaApiInstance.getDriveQuota(driveId); @@ -3911,10 +3910,15 @@ class SyncEngine { object.destroy(getCurrentDriveQuotaApiInstance); } catch (OneDriveException e) { addLogEntry("currentDriveQuota = onedrive.getDriveQuota(driveId) generated a OneDriveException", ["debug"]); + // If an exception occurs, it's unclear if quota is restricted, but quota details are not available + quotaRestricted = true; // Considering restricted due to failure to access + // Return result + result ~= [to!string(quotaRestricted), to!string(quotaAvailable), to!string(quotaRemainingOnline)]; + return result; } // Validate that currentDriveQuota is a JSON value - if (currentDriveQuota.type() == JSONType.object) { + if (currentDriveQuota.type() == JSONType.object && "quota" in currentDriveQuota) { // Response from API contains valid data // If 'personal' accounts, if driveId == defaultDriveId, then we will have data // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data @@ -3922,72 +3926,37 @@ class SyncEngine { // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value addLogEntry("Quota Details: " ~ to!string(currentDriveQuota), ["debug"]); - // Was 'quota' returned in the JSON data? - if ("quota" in currentDriveQuota){ - if (driveId == appConfig.defaultDriveId) { - // We potentially have updated quota remaining details available - // However in some cases OneDrive Business configurations 'restrict' quota details thus is empty / blank / negative value / zero - if ("remaining" in currentDriveQuota["quota"]){ - // We have valid quota remaining details returned for the provided drive id - quotaRemainingOnline = currentDriveQuota["quota"]["remaining"].integer; - // Quota is not being restricted - quotaRestricted = false; - - if (quotaRemainingOnline <= 0) { - if (appConfig.accountType == "personal"){ - // zero space available - addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); - quotaRemainingOnline = 0; - quotaAvailable = false; - } else { - // zero space available is being reported, maybe being restricted? - addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); - quotaRemainingOnline = 0; - quotaAvailable = true; // technically unknown - quotaRestricted = true; - } - } - } - } else { - // quota details returned, but for a drive id that is not ours - if ("remaining" in currentDriveQuota["quota"]){ - // remaining is in the quota JSON response - if (currentDriveQuota["quota"]["remaining"].integer <= 0) { - // value returned is 0 or less than 0 - addLogEntry("OneDrive quota information is set at zero, as this is not our drive id, ignoring", ["verbose"]); - quotaRemainingOnline = 0; - quotaRestricted = true; - quotaAvailable = true; - } + auto quota = currentDriveQuota["quota"]; + if ("remaining" in quota) { + quotaRemainingOnline = quota["remaining"].integer; + quotaAvailable = quotaRemainingOnline > 0; + // If "remaining" is present but its value is <= 0, it's not restricted but exhausted + if (quotaRemainingOnline <= 0) { + if (appConfig.accountType == "personal") { + addLogEntry("ERROR: OneDrive account currently has zero space available. Please free up some space online or purchase additional capacity."); + } else { // Assuming 'business' or 'sharedLibrary' + addLogEntry("WARNING: OneDrive quota information is being restricted or providing a zero value. Please fix by speaking to your OneDrive / Office 365 Administrator."); } } } else { - // No quota details returned - if (driveId == appConfig.defaultDriveId) { - // no quota details returned for current drive id - addLogEntry("ERROR: OneDrive quota information is missing. Potentially your OneDrive account currently has zero space available. Please free up some space online or purchase additional space."); - quotaRemainingOnline = 0; - quotaRestricted = true; - quotaAvailable = true; - } else { - // quota details not available - addLogEntry("WARNING: OneDrive quota information is being restricted as this is not our drive id.", ["debug"]); - quotaRemainingOnline = 0; - quotaRestricted = true; - quotaAvailable = true; - } + // "remaining" not present, indicating restricted quota information + quotaRestricted = true; + addLogEntry("Quota information is restricted or not available for this drive.", ["verbose"]); } + } else { + // When valid quota details are not fetched + addLogEntry("Failed to fetch or interpret quota details from the API response.", ["verbose"]); + quotaRestricted = true; // Considering restricted due to failure to interpret } - + // What was the determined available quota? addLogEntry("Reported Available Online Quota for driveID '" ~ driveId ~ "': " ~ to!string(quotaRemainingOnline), ["debug"]); // Return result - string[3][] result; result ~= [to!string(quotaRestricted), to!string(quotaAvailable), to!string(quotaRemainingOnline)]; return result; } - + // Perform a filesystem walk to uncover new data to upload to OneDrive void scanLocalFilesystemPathForNewData(string path) { @@ -4869,11 +4838,23 @@ class SyncEngine { // Resolves: https://github.com/skilion/onedrive/issues/121 , https://github.com/skilion/onedrive/issues/294 , https://github.com/skilion/onedrive/issues/329 if (thisFileSize <= maxUploadFileSize) { // Is there enough free space on OneDrive as compared to when we started this thread, to safely upload the file to OneDrive? + + // Make sure that parentItem.driveId is in our driveIDs array to use when checking if item is in database + // Keep the driveDetailsCache array with unique entries only + if (!canFindDriveId(parentItem.driveId, cachedOnlineDriveData)) { + // Add this driveId to the drive cache, which then also sets for the defaultDriveId: + // - quotaRestricted; + // - quotaAvailable; + // - quotaRemaining; + addOrUpdateOneDriveOnlineDetails(parentItem.driveId); + // Fetch the details from cachedOnlineDriveData + cachedOnlineDriveData = getDriveDetails(parentItem.driveId); + } + // Fetch the details from cachedOnlineDriveData // - cachedOnlineDriveData.quotaRestricted; // - cachedOnlineDriveData.quotaAvailable; // - cachedOnlineDriveData.quotaRemaining; - cachedOnlineDriveData = getDriveDetails(parentItem.driveId); remainingFreeSpaceOnline = cachedOnlineDriveData.quotaRemaining; // When we compare the space online to the total we are trying to upload - is there space online? From 1630ae37f3613302a1e54f090969d533af910f0b Mon Sep 17 00:00:00 2001 From: JC-comp <147694781+JC-comp@users.noreply.github.com> Date: Sun, 4 Feb 2024 12:34:45 +0800 Subject: [PATCH 051/305] Suppress processing dots in verbose mode (#2622) * Suppress processing dots in verbose mode * Fix coding style --- src/log.d | 13 +++++++++---- src/sync.d | 54 ++++++++++++++++++++++++++++++++---------------------- 2 files changed, 41 insertions(+), 26 deletions(-) diff --git a/src/log.d b/src/log.d index 822197ca5..79b6c2308 100644 --- a/src/log.d +++ b/src/log.d @@ -149,10 +149,15 @@ void addLogEntry(string message = "", string[] levels = ["info"]) { logBuffer.logThisMessage(message, levels); } -void addProcessingLogHeaderEntry(string message = "") { - addLogEntry(message, ["logFileOnly"]); - // Use the dots to show the application is 'doing something' - addLogEntry(message ~ " .", ["consoleOnlyNoNewLine"]); +void addProcessingLogHeaderEntry(string message, long verbosityCount) { + if (verbosityCount == 0) { + addLogEntry(message, ["logFileOnly"]); + // Use the dots to show the application is 'doing something' if verbosityCount == 0 + addLogEntry(message ~ " .", ["consoleOnlyNoNewLine"]); + } else { + // Fallback to normal logging if in verbose or above level + addLogEntry(message); + } } void addProcessingDotEntry() { diff --git a/src/sync.d b/src/sync.d index bc35bd93f..361b5c2dc 100644 --- a/src/sync.d +++ b/src/sync.d @@ -740,7 +740,7 @@ class SyncEngine { // Dynamic output for non-verbose and verbose run so that the user knows something is being retreived from the OneDrive API if (appConfig.verbosityCount == 0) { if (!appConfig.surpressLoggingOutput) { - addProcessingLogHeaderEntry("Fetching items from the OneDrive API for Drive ID: " ~ driveIdToQuery); + addProcessingLogHeaderEntry("Fetching items from the OneDrive API for Drive ID: " ~ driveIdToQuery, appConfig.verbosityCount); } } else { addLogEntry("Fetching /delta response from the OneDrive API for Drive ID: " ~ driveIdToQuery, ["verbose"]); @@ -912,13 +912,7 @@ class SyncEngine { // Dynamic output for a non-verbose run so that the user knows something is happening if (!appConfig.surpressLoggingOutput) { - // Logfile entry - addProcessingLogHeaderEntry("Processing " ~ to!string(jsonItemsToProcess.length) ~ " applicable changes and items received from Microsoft OneDrive"); - - if (appConfig.verbosityCount != 0) { - // Close out the console only processing line above, if we are doing verbose or above logging - addLogEntry("\n", ["consoleOnlyNoNewLine"]); - } + addProcessingLogHeaderEntry("Processing " ~ to!string(jsonItemsToProcess.length) ~ " applicable changes and items received from Microsoft OneDrive", appConfig.verbosityCount); } // For each batch, process the JSON items that need to be now processed. @@ -2716,7 +2710,7 @@ class SyncEngine { // Log what we are doing if (!appConfig.surpressLoggingOutput) { - addProcessingLogHeaderEntry("Performing a database consistency and integrity check on locally stored data"); + addProcessingLogHeaderEntry("Performing a database consistency and integrity check on locally stored data", appConfig.verbosityCount); } // What driveIDsArray do we use? If we are doing a --single-directory we need to use just the drive id associated with that operation @@ -2810,7 +2804,10 @@ class SyncEngine { } // Close out the '....' being printed to the console - addLogEntry("\n", ["consoleOnlyNoNewLine"]); + if (!appConfig.surpressLoggingOutput) { + if (appConfig.verbosityCount == 0) + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } // Are we doing a --download-only sync? if (!appConfig.getValueBool("download_only")) { @@ -2849,7 +2846,10 @@ class SyncEngine { // Log what we are doing addLogEntry("Processing " ~ logOutputPath, ["verbose"]); // Add a processing '.' - addProcessingDotEntry(); + if (!appConfig.surpressLoggingOutput) { + if (appConfig.verbosityCount == 0) + addProcessingDotEntry(); + } // Determine which action to take final switch (dbItem.type) { @@ -3981,9 +3981,9 @@ class SyncEngine { if (isDir(path)) { if (!appConfig.surpressLoggingOutput) { if (!cleanupLocalFiles) { - addProcessingLogHeaderEntry("Scanning the local file system '" ~ logPath ~ "' for new data to upload"); + addProcessingLogHeaderEntry("Scanning the local file system '" ~ logPath ~ "' for new data to upload", appConfig.verbosityCount); } else { - addProcessingLogHeaderEntry("Scanning the local file system '" ~ logPath ~ "' for data to cleanup"); + addProcessingLogHeaderEntry("Scanning the local file system '" ~ logPath ~ "' for data to cleanup", appConfig.verbosityCount); } } } @@ -3993,7 +3993,10 @@ class SyncEngine { // Perform the filesystem walk of this path, building an array of new items to upload scanPathForNewData(path); - addLogEntry("\n", ["consoleOnlyNoNewLine"]); + if (!appConfig.surpressLoggingOutput) { + if (appConfig.verbosityCount == 0) + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } // To finish off the processing items, this is needed to reflect this in the log addLogEntry("------------------------------------------------------------------", ["debug"]); @@ -4008,7 +4011,7 @@ class SyncEngine { // Are there any items to download post fetching the /delta data? if (!newLocalFilesToUploadToOneDrive.empty) { // There are elements to upload - addProcessingLogHeaderEntry("New items to upload to OneDrive: " ~ to!string(newLocalFilesToUploadToOneDrive.length)); + addProcessingLogHeaderEntry("New items to upload to OneDrive: " ~ to!string(newLocalFilesToUploadToOneDrive.length), appConfig.verbosityCount); // Reset totalDataToUpload totalDataToUpload = 0; @@ -4062,7 +4065,10 @@ class SyncEngine { // Scan this path for new data void scanPathForNewData(string path) { // Add a processing '.' - addProcessingDotEntry(); + if (!appConfig.surpressLoggingOutput) { + if (appConfig.verbosityCount == 0) + addProcessingDotEntry(); + } ulong maxPathLength; ulong pathWalkLength; @@ -4751,14 +4757,16 @@ class SyncEngine { foreach (chunk; newLocalFilesToUploadToOneDrive.chunks(batchSize)) { uploadNewLocalFileItemsInParallel(chunk); } - addLogEntry("\n", ["consoleOnlyNoNewLine"]); + if (appConfig.verbosityCount == 0) + addLogEntry("\n", ["consoleOnlyNoNewLine"]); } // Upload the file batches in parallel void uploadNewLocalFileItemsInParallel(string[] array) { foreach (i, fileToUpload; taskPool.parallel(array)) { // Add a processing '.' - addProcessingDotEntry(); + if (appConfig.verbosityCount == 0) + addProcessingDotEntry(); addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); uploadNewFile(fileToUpload); addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); @@ -6088,7 +6096,7 @@ class SyncEngine { // Dynamic output for a non-verbose run so that the user knows something is happening if (appConfig.verbosityCount == 0) { if (!appConfig.surpressLoggingOutput) { - addProcessingLogHeaderEntry("Fetching items from the OneDrive API for Drive ID: " ~ searchItem.driveId); + addProcessingLogHeaderEntry("Fetching items from the OneDrive API for Drive ID: " ~ searchItem.driveId, appConfig.verbosityCount); } } else { addLogEntry("Generating a /delta response from the OneDrive API for Drive ID: " ~ searchItem.driveId, ["verbose"]); @@ -7212,7 +7220,7 @@ class SyncEngine { deltaLink = itemDB.getDeltaLink(driveIdToQuery, itemIdToQuery); // Log what we are doing - addProcessingLogHeaderEntry("Querying the change status of Drive ID: " ~ driveIdToQuery); + addProcessingLogHeaderEntry("Querying the change status of Drive ID: " ~ driveIdToQuery, appConfig.verbosityCount); // Query the OenDrive API using the applicable details, following nextLink if applicable // Create a new API Instance for querying /delta and initialise it @@ -7222,7 +7230,8 @@ class SyncEngine { for (;;) { // Add a processing '.' - addProcessingDotEntry(); + if (appConfig.verbosityCount == 0) + addProcessingDotEntry(); // Get the /delta changes via the OneDrive API // getDeltaChangesByItemId has the re-try logic for transient errors @@ -7297,7 +7306,8 @@ class SyncEngine { else break; } // Needed after printing out '....' when fetching changes from OneDrive API - addLogEntry("\n", ["consoleOnlyNoNewLine"]); + if (appConfig.verbosityCount == 0) + addLogEntry("\n", ["consoleOnlyNoNewLine"]); // Are there any JSON items to process? if (count(jsonItemsArray) != 0) { From 83726ac4dedcf11138d8fb3170def357ae41aec9 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 11 Feb 2024 11:29:27 +1100 Subject: [PATCH 052/305] Refine Shared Folder Handling * Update Shared Folder Handling so when adding a SharePoint Library as a Shared Folder to a Business Account these are correctly handled. * Validate that DB matches in 'online first' vs 'local first' when using Shared Folders and SharePoint added libraries (in the case of Business Accounts) * Tested and revalided with OneDrive Business Account with 'sync_business_shared_items = "true"' * Tested and revalided with OneDrive Business Account with 'sync_business_shared_items = "false"' * Tested and revalided with OneDrive Personal Account --- src/sync.d | 326 ++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 236 insertions(+), 90 deletions(-) diff --git a/src/sync.d b/src/sync.d index 361b5c2dc..ea97737af 100644 --- a/src/sync.d +++ b/src/sync.d @@ -576,7 +576,7 @@ class SyncEngine { } } else { // Is this a Business Account with Sync Business Shared Items enabled? - if ((appConfig.accountType == "business") && ( appConfig.getValueBool("sync_business_shared_items"))) { + if ((appConfig.accountType == "business") && (appConfig.getValueBool("sync_business_shared_items"))) { // Business Account Shared Items Handling // - OneDrive Business Shared Folder @@ -1197,22 +1197,8 @@ class SyncEngine { if (hasSharedElement(onedriveJSONItem)) { // Has the Shared JSON structure addLogEntry("Personal Shared Item JSON object has the 'shared' JSON structure", ["debug"]); - - // Create a DB Tie Record for this parent object - addLogEntry("Creating a DB Tie for this Personal Shared Folder", ["debug"]); - - // DB Tie - Item parentItem; - parentItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str; - parentItem.id = onedriveJSONItem["parentReference"]["id"].str; - parentItem.name = "root"; - parentItem.type = ItemType.dir; - parentItem.mtime = remoteItem.mtime; - parentItem.parentId = null; - - // Add this DB Tie parent record to the local database - addLogEntry("Insert local database with remoteItem parent details: " ~ to!string(parentItem), ["debug"]); - itemDB.upsert(parentItem); + // Create a 'root' DB Tie Record for this JSON object + createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem); } // Ensure that this item has no parent @@ -1226,21 +1212,8 @@ class SyncEngine { addLogEntry("Handling a Business or SharePoint Shared Item JSON object", ["debug"]); if (appConfig.accountType == "business") { - // Create a DB Tie Record for this parent object - addLogEntry("Creating a DB Tie for this Business Shared Folder", ["debug"]); - - // DB Tie - Item parentItem; - parentItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str; - parentItem.id = onedriveJSONItem["parentReference"]["id"].str; - parentItem.name = "root"; - parentItem.type = ItemType.dir; - parentItem.mtime = remoteItem.mtime; - parentItem.parentId = null; - - // Add this DB Tie parent record to the local database - addLogEntry("Insert local database with remoteItem parent details: " ~ to!string(parentItem), ["debug"]); - itemDB.upsert(parentItem); + // Create a 'root' DB Tie Record for this JSON object + createDatabaseRootTieRecordForOnlineSharedFolder(onedriveJSONItem); // Ensure that this item has no parent addLogEntry("Setting remoteItem.parentId to be null", ["debug"]); @@ -1278,8 +1251,7 @@ class SyncEngine { itemDB.upsert(remoteItem); } else { // Sharepoint account type - addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED ........ ", ["debug"]); - + addLogEntry("Handling a SharePoint Shared Item JSON object - NOT IMPLEMENTED YET ........ ", ["info"]); } } } @@ -4228,6 +4200,7 @@ class SyncEngine { if (canFind(businessSharedFoldersOnlineToSkip, path)) { // This path was skipped - why? addLogEntry("Skipping item '" ~ path ~ "' due to this path matching an existing online Business Shared Folder name", ["info", "notify"]); + addLogEntry("To sync this Business Shared Folder, consider enabling 'sync_business_shared_folders' within your application configuration.", ["info"]); skipFolderTraverse = true; } } @@ -4389,6 +4362,9 @@ class SyncEngine { // Step 2: Query for the path online if not found in the local database if (!parentPathFoundInDB) { // parent path not found in database + + addLogEntry("PARENT NOT FOUND IN DATABASE - QUERY ONLINE", ["info"]); + try { addLogEntry("Attempting to query OneDrive Online for this parent path as path not found in local database: " ~ parentPath, ["debug"]); onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); @@ -4449,33 +4425,17 @@ class SyncEngine { addLogEntry("parentItem details: " ~ to!string(parentItem), ["debug"]); // Depending on the data within parentItem, will depend on what method we are using to search - // In a --local-first scenario, a Shared Folder will be 'remote' so we need to check the remote parent id, rather than parentItem details + // A Shared Folder will be 'remote' so we need to check the remote parent id, rather than parentItem details Item queryItem; - if ((appConfig.getValueBool("local_first")) && (parentItem.type == ItemType.remote)) { - // We are --local-first scenario and this folder is a potential shared object - addLogEntry("--localfirst & parentItem is a remote item object", ["debug"]); - + if (parentItem.type == ItemType.remote) { + // This folder is a potential shared object + addLogEntry("ParentItem is a remote item object", ["debug"]); + // Need to create the DB Tie for this shared object to ensure this exists in the database + createDatabaseTieRecordForOnlineSharedFolder(parentItem); + // Update the queryItem values queryItem.driveId = parentItem.remoteDriveId; queryItem.id = parentItem.remoteId; - - // Need to create the DB Tie for this object - addLogEntry("Creating a DB Tie for this Shared Folder", ["debug"]); - // New DB Tie Item to bind the 'remote' path to our parent path - Item tieDBItem; - // Set the name - tieDBItem.name = parentItem.name; - // Set the correct item type - tieDBItem.type = ItemType.dir; - // Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie - tieDBItem.driveId = parentItem.remoteDriveId; - tieDBItem.id = parentItem.remoteId; - // Set the correct mtime - tieDBItem.mtime = parentItem.mtime; - // Add tie DB record to the local database - addLogEntry("Adding DB Tie record to database: " ~ to!string(tieDBItem), ["debug"]); - itemDB.upsert(tieDBItem); - } else { // Use parent item for the query item addLogEntry("Standard Query, use parentItem", ["debug"]); @@ -4567,14 +4527,14 @@ class SyncEngine { string requiredDriveId; string requiredParentItemId; - // Is this a Personal Account and is the item a Remote Object (Shared Folder) ? - if ((appConfig.accountType == "personal") && (parentItem.type == ItemType.remote)) { + // Is the item a Remote Object (Shared Folder) ? + if (parentItem.type == ItemType.remote) { // Yes .. Shared Folder addLogEntry("parentItem data: " ~ to!string(parentItem), ["debug"]); requiredDriveId = parentItem.remoteDriveId; requiredParentItemId = parentItem.remoteId; } else { - // Not a personal account + Shared Folder + // Not a Shared Folder requiredDriveId = parentItem.driveId; requiredParentItemId = parentItem.id; } @@ -4675,22 +4635,37 @@ class SyncEngine { if (onlinePathData["name"].str == baseName(thisNewPathToCreate)) { // OneDrive 'name' matches local path name if (appConfig.accountType == "business") { - // We are a business account, this existing online folder, could be a Shared Online Folder and is the 'Add shortcut to My files' item + // We are a business account, this existing online folder, could be a Shared Online Folder could be a 'Add shortcut to My files' item addLogEntry("onlinePathData: " ~ to!string(onlinePathData), ["debug"]); + // Is this a remote folder if (isItemRemote(onlinePathData)) { // The folder is a remote item ... we do not want to create this ... - addLogEntry("Remote Existing Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]); - addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]); + addLogEntry("Existing Remote Online Folder is most likely a OneDrive Shared Business Folder Link added by 'Add shortcut to My files'", ["debug"]); - // Add this path to businessSharedFoldersOnlineToSkip - businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate]; - // no save to database, no online create - // Shutdown API instance - createDirectoryOnlineOneDriveApiInstance.shutdown(); - // Free object and memory - object.destroy(createDirectoryOnlineOneDriveApiInstance); - return; + // Is Shared Business Folder Syncing enabled ? + if (!appConfig.getValueBool("sync_business_shared_items")) { + // Shared Business Folder Syncing is NOT enabled + addLogEntry("We need to skip this path: " ~ thisNewPathToCreate, ["debug"]); + // Add this path to businessSharedFoldersOnlineToSkip + businessSharedFoldersOnlineToSkip ~= [thisNewPathToCreate]; + // no save to database, no online create + // Shutdown API instance + createDirectoryOnlineOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(createDirectoryOnlineOneDriveApiInstance); + return; + } else { + // As the 'onlinePathData' is potentially missing the actual correct parent folder id in the 'remoteItem' JSON response, we have to perform a further query to get the correct answer + // Failure to do this, means the 'root' DB Tie Record has a different parent reference id to that what this folder's parent reference id actually is + JSONValue sharedFolderParentPathData; + string remoteDriveId = onlinePathData["remoteItem"]["parentReference"]["driveId"].str; + string remoteItemId = onlinePathData["remoteItem"]["id"].str; + sharedFolderParentPathData = createDirectoryOnlineOneDriveApiInstance.getPathDetailsById(remoteDriveId, remoteItemId); + + // A 'root' DB Tie Record needed for this folder using the correct parent data + createDatabaseRootTieRecordForOnlineSharedFolder(sharedFolderParentPathData); + } } } @@ -4824,7 +4799,7 @@ class SyncEngine { // If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty if ((parentPathFoundInDB) && (parentItem.driveId.empty)) { // switch to using defaultDriveId - addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls"); + addLogEntry("parentItem.driveId is empty - using defaultDriveId for upload API calls", ["debug"]); parentItem.driveId = appConfig.defaultDriveId; } @@ -4929,11 +4904,24 @@ class SyncEngine { // even though some file systems (such as a POSIX-compliant file systems that Linux use) may consider them as different. // Note that NTFS supports POSIX semantics for case sensitivity but this is not the default behavior, OneDrive does not use this. - // In order to upload this file - this query HAS to respond as a 404 - Not Found + // In order to upload this file - this query HAS to respond with a '404 - Not Found' so that the upload is triggered // Does this 'file' already exist on OneDrive? try { - fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); + if (parentItem.driveId == appConfig.defaultDriveId) { + // getPathDetailsByDriveId is only reliable when the driveId is our driveId + fileDetailsFromOneDrive = checkFileOneDriveApiInstance.getPathDetailsByDriveId(parentItem.driveId, fileToUpload); + } else { + // We need to curate a response by listing the children of this parentItem.driveId and parentItem.id , without traversing directories + // So that IF the file is on a Shared Folder, it can be found, and, if it exists, checked correctly + fileDetailsFromOneDrive = searchDriveItemForFile(parentItem.driveId, parentItem.id, fileToUpload); + // Was the file found? + if (fileDetailsFromOneDrive.type() != JSONType.object) { + // No .... + throw new OneDriveException(404, "Name not found via searchDriveItemForFile"); + } + } + // Portable Operating System Interface (POSIX) testing of JSON response from OneDrive API if (hasName(fileDetailsFromOneDrive)) { performPosixTest(baseName(fileToUpload), fileDetailsFromOneDrive["name"].str); @@ -6537,24 +6525,23 @@ class SyncEngine { // Is this JSON a remote object addLogEntry("Testing if this is a remote Shared Folder", ["debug"]); if (isItemRemote(getPathDetailsAPIResponse)) { - // Remote Directory .. need a DB Tie Item - addLogEntry("Creating a DB Tie for this Shared Folder", ["debug"]); - // New DB Tie Item to bind the 'remote' path to our parent path - Item tieDBItem; + // Remote Directory .. need a DB Tie Record + createDatabaseTieRecordForOnlineSharedFolder(parentDetails); + + // Temp DB Item to bind the 'remote' path to our parent path + Item tempDBItem; // Set the name - tieDBItem.name = parentDetails.name; + tempDBItem.name = parentDetails.name; // Set the correct item type - tieDBItem.type = ItemType.dir; + tempDBItem.type = ItemType.dir; // Set the right elements using the 'remote' of the parent as the 'actual' for this DB Tie - tieDBItem.driveId = parentDetails.remoteDriveId; - tieDBItem.id = parentDetails.remoteId; + tempDBItem.driveId = parentDetails.remoteDriveId; + tempDBItem.id = parentDetails.remoteId; // Set the correct mtime - tieDBItem.mtime = parentDetails.mtime; - // Add tie DB record to the local database - addLogEntry("Adding DB Tie record to database: " ~ to!string(tieDBItem), ["debug"]); - itemDB.upsert(tieDBItem); - // Update parentDetails to use the DB Tie record - parentDetails = tieDBItem; + tempDBItem.mtime = parentDetails.mtime; + + // Update parentDetails to use this temp record + parentDetails = tempDBItem; } } catch (OneDriveException exception) { if (exception.httpStatusCode == 404) { @@ -6961,8 +6948,9 @@ class SyncEngine { // What account type is this? if (appConfig.accountType != "personal") { // Not a personal account, thus the integrity failure is most likely due to SharePoint - addLogEntry("CAUTION: Microsoft OneDrive when using SharePoint as a backend enhances files after you upload them, which means this file may now have technical differences from your local copy, resulting in a data integrity issue.", ["verbose"]); - addLogEntry("See: https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details", ["verbose"]); + addLogEntry("CAUTION: When you upload files to Microsoft OneDrive that uses SharePoint as its backend, Microsoft OneDrive will alter your files post upload.", ["verbose"]); + addLogEntry("CAUTION: This will lead to technical differences between the version stored online and your local original file, potentially causing issues with the accuracy or consistency of your data.", ["verbose"]); + addLogEntry("CAUTION: Please read https://github.com/OneDrive/onedrive-api-docs/issues/935 for further details.", ["verbose"]); } // How can this be disabled? addLogEntry("To disable the integrity checking of uploaded files use --disable-upload-validation"); @@ -7847,6 +7835,96 @@ class SyncEngine { } } + // Search a given Drive ID, Item ID and filename to see if this exists in the location specified + JSONValue searchDriveItemForFile(string parentItemDriveId, string parentItemId, string fileToUpload) { + + JSONValue onedriveJSONItem; + string searchName = baseName(fileToUpload); + JSONValue thisLevelChildren; + + string nextLink; + + // Create a new API Instance for this thread and initialise it + OneDriveApi checkFileOneDriveApiInstance; + checkFileOneDriveApiInstance = new OneDriveApi(appConfig); + checkFileOneDriveApiInstance.initialise(); + + for (;;) { + // query top level children + try { + thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink); + } catch (OneDriveException exception) { + // OneDrive threw an error + addLogEntry("------------------------------------------------------------------", ["debug"]); + addLogEntry("Query Error: thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink)", ["debug"]); + addLogEntry("driveId: " ~ parentItemDriveId, ["debug"]); + addLogEntry("idToQuery: " ~ parentItemId, ["debug"]); + addLogEntry("nextLink: " ~ nextLink, ["debug"]); + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(checkFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry thisLevelChildren = checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink)", ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to query OneDrive top level drive children on OneDrive - retrying applicable request in 30 seconds"); + addLogEntry("checkFileOneDriveApiInstance.listChildren(parentItemDriveId, parentItemId, nextLink) previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + searchDriveItemForFile(parentItemDriveId, parentItemId, fileToUpload); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, thisFunctionName); + } + } + + // process thisLevelChildren response + foreach (child; thisLevelChildren["value"].array) { + // Only looking at files + if ((child["name"].str == searchName) && (("file" in child) != null)) { + // Found the matching file, return its JSON representation + // Operations in this thread are done / complete + checkFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(checkFileOneDriveApiInstance); + // Return child + return child; + } + } + + // If a collection exceeds the default page size (200 items), the @odata.nextLink property is returned in the response + // to indicate more items are available and provide the request URL for the next page of items. + if ("@odata.nextLink" in thisLevelChildren) { + // Update nextLink to next changeSet bundle + addLogEntry("Setting nextLink to (@odata.nextLink): " ~ nextLink, ["debug"]); + nextLink = thisLevelChildren["@odata.nextLink"].str; + } else break; + } + + // Operations in this thread are done / complete + checkFileOneDriveApiInstance.shutdown(); + // Free object and memory + object.destroy(checkFileOneDriveApiInstance); + // return an empty JSON item + return onedriveJSONItem; + } + // Update 'onlineDriveDetails' with the latest data about this drive void updateDriveDetailsCache(string driveId, bool quotaRestricted, bool quotaAvailable, ulong localFileSize) { @@ -7886,4 +7964,72 @@ class SyncEngine { addOrUpdateOneDriveOnlineDetails(driveId); } } + + // Create a 'root' DB Tie Record for a Shared Folder from the JSON data + void createDatabaseRootTieRecordForOnlineSharedFolder(JSONValue onedriveJSONItem) { + // Creating|Updating a DB Tie + addLogEntry("Creating|Updating a 'root' DB Tie Record for this Shared Folder: " ~ onedriveJSONItem["name"].str, ["debug"]); + addLogEntry("Raw JSON for 'root' DB Tie Record: " ~ to!string(onedriveJSONItem), ["debug"]); + + // New DB Tie Item to detail the 'root' of the Shared Folder + Item tieDBItem; + tieDBItem.name = "root"; + + // Get the right parentReference details + if (isItemRemote(onedriveJSONItem)) { + tieDBItem.driveId = onedriveJSONItem["remoteItem"]["parentReference"]["driveId"].str; + tieDBItem.id = onedriveJSONItem["remoteItem"]["id"].str; + } else { + if (onedriveJSONItem["name"].str != "root") { + tieDBItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str; + tieDBItem.id = onedriveJSONItem["parentReference"]["id"].str; + } else { + tieDBItem.driveId = onedriveJSONItem["parentReference"]["driveId"].str; + tieDBItem.id = onedriveJSONItem["id"].str; + } + } + + tieDBItem.type = ItemType.dir; + tieDBItem.mtime = SysTime.fromISOExtString(onedriveJSONItem["fileSystemInfo"]["lastModifiedDateTime"].str); + tieDBItem.parentId = null; + + // Add this DB Tie parent record to the local database + addLogEntry("Creating|Updating into local database a 'root' DB Tie record: " ~ to!string(tieDBItem), ["debug"]); + itemDB.upsert(tieDBItem); + } + + // Create a DB Tie Record for a Shared Folder + void createDatabaseTieRecordForOnlineSharedFolder(Item parentItem) { + // Creating|Updating a DB Tie + addLogEntry("Creating|Updating a DB Tie Record for this Shared Folder: " ~ parentItem.name, ["debug"]); + addLogEntry("Parent Item Record: " ~ to!string(parentItem), ["debug"]); + + // New DB Tie Item to bind the 'remote' path to our parent path + Item tieDBItem; + tieDBItem.name = parentItem.name; + tieDBItem.driveId = parentItem.remoteDriveId; + tieDBItem.id = parentItem.remoteId; + tieDBItem.type = ItemType.dir; + tieDBItem.mtime = parentItem.mtime; + + // What account type is this as this determines what 'tieDBItem.parentId' should be set to + // There is a difference in the JSON responses between 'personal' and 'business' account types for Shared Folders + // Essentially an API inconsistency + if (appConfig.accountType == "personal") { + // Set tieDBItem.parentId to null + tieDBItem.parentId = null; + } else { + // The tieDBItem.parentId needs to be the correct driveId id reference + // Query the DB + Item[] rootDriveItems; + Item dbRecord; + rootDriveItems = itemDB.selectByDriveId(parentItem.remoteDriveId); + dbRecord = rootDriveItems[0]; + tieDBItem.parentId = dbRecord.id; + } + + // Add tie DB record to the local database + addLogEntry("Creating|Updating into local database a DB Tie record: " ~ to!string(tieDBItem), ["debug"]); + itemDB.upsert(tieDBItem); + } } \ No newline at end of file From d2a78be39616e12038593622a468f79710eecc04 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Sun, 11 Feb 2024 17:46:07 +1100 Subject: [PATCH 053/305] Resolve that upload session are not canceled with resync option * Resolve that upload session are not canceled with resync option --- src/main.d | 17 +++++++++++------ src/sync.d | 16 ++++++++++++++++ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/src/main.d b/src/main.d index a85299787..fe0185209 100644 --- a/src/main.d +++ b/src/main.d @@ -629,12 +629,17 @@ int main(string[] cliArgs) { string localPath = "."; string remotePath = "/"; - // Check if there are interrupted upload session(s) - if (syncEngineInstance.checkForInterruptedSessionUploads) { - // Need to re-process the session upload files to resume the failed session uploads - addLogEntry("There are interrupted session uploads that need to be resumed ..."); - // Process the session upload files - syncEngineInstance.processForInterruptedSessionUploads(); + if (!appConfig.getValueBool("resync")) { + // Check if there are interrupted upload session(s) + if (syncEngineInstance.checkForInterruptedSessionUploads) { + // Need to re-process the session upload files to resume the failed session uploads + addLogEntry("There are interrupted session uploads that need to be resumed ..."); + // Process the session upload files + syncEngineInstance.processForInterruptedSessionUploads(); + } + } else { + // Clean up any upload session files due to --resync being used + syncEngineInstance.clearInterruptedSessionUploads(); } // Are we doing a single directory operation (--single-directory) ? diff --git a/src/sync.d b/src/sync.d index ea97737af..efea6b98b 100644 --- a/src/sync.d +++ b/src/sync.d @@ -7588,6 +7588,22 @@ class SyncEngine { return interruptedUploads; } + // Clear any session_upload.* files + void clearInterruptedSessionUploads() { + // Scan the filesystem for the files we are interested in, build up interruptedUploadsSessionFiles array + foreach (sessionFile; dirEntries(appConfig.configDirName, "session_upload.*", SpanMode.shallow)) { + // calculate the full path + string tempPath = buildNormalizedPath(buildPath(appConfig.configDirName, sessionFile)); + JSONValue sessionFileData = readText(tempPath).parseJSON(); + addLogEntry("Removing interrupted session upload file due to --resync for: " ~ sessionFileData["localPath"].str, ["info"]); + + // Process removal + if (!dryRun) { + safeRemove(tempPath); + } + } + } + // Process interrupted 'session_upload' files void processForInterruptedSessionUploads() { // For each upload_session file that has been found, process the data to ensure it is still valid From 1961c0cb6773c3338c2eaca8c4876bbcc8068f30 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Mon, 12 Feb 2024 06:41:36 +1100 Subject: [PATCH 054/305] Update docker.md * Update documentation regarding where to install Docker from --- docs/docker.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/docker.md b/docs/docker.md index 7c904b092..07d89820f 100644 --- a/docs/docker.md +++ b/docs/docker.md @@ -37,7 +37,9 @@ Additionally there are specific version release tags for each release. Refer to ## Configuration Steps ### 1. Install 'docker' on your platform -Install 'docker' as per your distribution platform's instructions if not already installed. +Install 'docker' as per your distribution platform's instructions if not already installed as per the instructions on https://docs.docker.com/engine/install/ + +**Note:** If you are using Ubuntu, do not install Docker from your distribution platform's repositories. You must install Docker from Docker provided packages. ### 2. Configure 'docker' to allow non-privileged users to run Docker commands Read https://docs.docker.com/engine/install/linux-postinstall/ to configure the 'docker' user group with your user account to allow your non 'root' user to run 'docker' commands. From 1be74fc982f90c8f81ea9006eac622842024e5a3 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Mon, 12 Feb 2024 09:45:15 +1100 Subject: [PATCH 055/305] Resolve Issue #2625 * Local files should be safely backed up when the item is not in sync locally to prevent data loss when they are deleted online --- src/sync.d | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/src/sync.d b/src/sync.d index efea6b98b..b0d8490fb 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1114,9 +1114,21 @@ class SyncEngine { // Change is to delete an item addLogEntry("Handing a OneDrive Deleted Item", ["debug"]); if (existingDBEntry) { - // Flag to delete - addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]); - idsToDelete ~= [thisItemDriveId, thisItemId]; + // Is the item to delete locally actually in sync with OneDrive currently? + // What is the source of this item data? + string itemSource = "online"; + + // Compute this deleted items path based on the database entries + string localPathToDelete = computeItemPath(existingDatabaseItem.driveId, existingDatabaseItem.parentId) ~ "/" ~ existingDatabaseItem.name; + + if (isItemSynced(existingDatabaseItem, localPathToDelete, itemSource)) { + // Flag to delete + addLogEntry("Flagging to delete item locally: " ~ to!string(onedriveJSONItem), ["debug"]); + idsToDelete ~= [thisItemDriveId, thisItemId]; + } else { + // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not + safeBackup(localPathToDelete, dryRun); + } } else { // Flag to ignore addLogEntry("Flagging item to skip: " ~ to!string(onedriveJSONItem), ["debug"]); @@ -2540,7 +2552,7 @@ class SyncEngine { } // Add to pathFakeDeletedArray - // We dont want to try and upload this item again, so we need to track this object + // We dont want to try and upload this item again, so we need to track this objects removal if (dryRun) { // We need to add './' here so that it can be correctly searched to ensure it is not uploaded string pathToAdd = "./" ~ path; @@ -4362,9 +4374,6 @@ class SyncEngine { // Step 2: Query for the path online if not found in the local database if (!parentPathFoundInDB) { // parent path not found in database - - addLogEntry("PARENT NOT FOUND IN DATABASE - QUERY ONLINE", ["info"]); - try { addLogEntry("Attempting to query OneDrive Online for this parent path as path not found in local database: " ~ parentPath, ["debug"]); onlinePathData = createDirectoryOnlineOneDriveApiInstance.getPathDetails(parentPath); From c7bfd0da10bd4ce72523c84b1d508946b2897c06 Mon Sep 17 00:00:00 2001 From: JC-comp <147694781+JC-comp@users.noreply.github.com> Date: Mon, 12 Feb 2024 13:33:44 +0800 Subject: [PATCH 056/305] Implement Blocking Logger Mechanism (#2627) * Replace polling logger * Backward compatibility with ldc v1.20.1 --- src/log.d | 18 ++++++++++++++---- src/main.d | 3 +-- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/src/log.d b/src/log.d index 79b6c2308..d41b6e296 100644 --- a/src/log.d +++ b/src/log.d @@ -7,6 +7,7 @@ import std.file; import std.datetime; import std.concurrency; import std.typecons; +import core.sync.condition; import core.sync.mutex; import core.thread; import std.format; @@ -26,6 +27,7 @@ class LogBuffer { private: string[3][] buffer; Mutex bufferLock; + Condition condReady; string logFilePath; bool writeToFile; bool verboseLogging; @@ -38,6 +40,7 @@ class LogBuffer { this(bool verboseLogging, bool debugLogging) { // Initialise the mutex bufferLock = new Mutex(); + condReady = new Condition(bufferLock); // Initialise other items this.logFilePath = logFilePath; this.writeToFile = writeToFile; @@ -50,11 +53,14 @@ class LogBuffer { flushThread.start(); } - ~this() { - isRunning = false; + void shutdown() { + synchronized(bufferLock) { + isRunning = false; + condReady.notify(); + } flushThread.join(); flush(); - } + } shared void logThisMessage(string message, string[] levels = ["info"]) { // Generate the timestamp for this log entry @@ -86,6 +92,7 @@ class LogBuffer { } } } + (cast()condReady).notify(); } } @@ -99,14 +106,17 @@ class LogBuffer { private void flushBuffer() { while (isRunning) { - Thread.sleep(dur!("msecs")(200)); flush(); } + stdout.flush(); } private void flush() { string[3][] messages; synchronized(bufferLock) { + while (buffer.empty && isRunning) { + condReady.wait(); + } messages = buffer; buffer.length = 0; } diff --git a/src/main.d b/src/main.d index fe0185209..32bed8c27 100644 --- a/src/main.d +++ b/src/main.d @@ -1150,9 +1150,8 @@ void performStandardExitProcess(string scopeCaller = null) { } else { addLogEntry("Application exit", ["debug"]); addLogEntry("#######################################################################################################################################", ["logFileOnly"]); - // Sleep to allow any final logging output to be printed - this is needed as we are using buffered logging output - Thread.sleep(dur!("msecs")(500)); // Destroy the shared logging buffer + (cast() logBuffer).shutdown(); object.destroy(logBuffer); } } From 7621bbab657ff850f9f4eedeee16c9a754b16e24 Mon Sep 17 00:00:00 2001 From: abraunegg Date: Mon, 12 Feb 2024 16:45:55 +1100 Subject: [PATCH 057/305] Update log.d Fix indentation --- src/log.d | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/log.d b/src/log.d index d41b6e296..b1354349e 100644 --- a/src/log.d +++ b/src/log.d @@ -25,15 +25,15 @@ shared MonoTime lastInsertedTime; class LogBuffer { private: - string[3][] buffer; - Mutex bufferLock; + string[3][] buffer; + Mutex bufferLock; Condition condReady; - string logFilePath; - bool writeToFile; - bool verboseLogging; - bool debugLogging; - Thread flushThread; - bool isRunning; + string logFilePath; + bool writeToFile; + bool verboseLogging; + bool debugLogging; + Thread flushThread; + bool isRunning; bool sendGUINotification; public: From a92221bb50d54c952d8df05ffc28e9c05e620840 Mon Sep 17 00:00:00 2001 From: JC-comp <147694781+JC-comp@users.noreply.github.com> Date: Mon, 12 Feb 2024 14:12:20 +0800 Subject: [PATCH 058/305] Add support for batched local monitor processing (#2609) * Fix file upload fallback for all scenario * Add support for batched monitor * Add recursive match * Adjust logging output * Add error handling --- src/main.d | 11 +- src/monitor.d | 374 +++++++++++++++++++++++++++++++++----------------- src/sync.d | 364 +++++++++++++++++++++++++----------------------- 3 files changed, 443 insertions(+), 306 deletions(-) diff --git a/src/main.d b/src/main.d index 32bed8c27..67f36a656 100644 --- a/src/main.d +++ b/src/main.d @@ -748,16 +748,11 @@ int main(string[] cliArgs) { }; // Delegated function for when inotify detects a local file has been changed - filesystemMonitor.onFileChanged = delegate(string path) { + filesystemMonitor.onFileChanged = delegate(string[] changedLocalFilesToUploadToOneDrive) { // Handle a potentially locally changed file // Logging for this event moved to handleLocalFileTrigger() due to threading and false triggers from scanLocalFilesystemPathForNewData() above - try { - syncEngineInstance.handleLocalFileTrigger(path); - } catch (CurlException e) { - addLogEntry("Offline, cannot upload changed item: " ~ path, ["verbose"]); - } catch(Exception e) { - addLogEntry("Cannot upload file changes/creation: " ~ e.msg, ["info", "notify"]); - } + addLogEntry("[M] Total number of local file changed: " ~ to!string(changedLocalFilesToUploadToOneDrive.length)); + syncEngineInstance.handleLocalFileTrigger(changedLocalFilesToUploadToOneDrive); }; // Delegated function for when inotify detects a delete event diff --git a/src/monitor.d b/src/monitor.d index d046829bd..694771e69 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -8,6 +8,7 @@ import core.sys.linux.sys.inotify; import core.sys.posix.poll; import core.sys.posix.unistd; import core.sys.posix.sys.select; +import core.thread; import core.time; import std.algorithm; import std.concurrency; @@ -135,7 +136,6 @@ shared class MonitorBackgroundWorker { } } - void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid) { try { @@ -146,6 +146,96 @@ void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid) worker.shutdown(); } +enum ActionType { + moved, + deleted, + changed, + createDir +} + +struct Action { + ActionType type; + bool skipped; + string src; + string dst; +} + +struct ActionHolder { + Action[] actions; + ulong[string] srcMap; + + void append(ActionType type, string src, string dst=null) { + ulong[] pendingTargets; + switch (type) { + case ActionType.changed: + if (src in srcMap && actions[srcMap[src]].type == ActionType.changed) { + // skip duplicate operations + return; + } + break; + case ActionType.createDir: + break; + case ActionType.deleted: + if (src in srcMap) { + ulong pendingTarget = srcMap[src]; + // Skip operations require reading local file that is gone + switch (actions[pendingTarget].type) { + case ActionType.changed: + case ActionType.createDir: + actions[srcMap[src]].skipped = true; + srcMap.remove(src); + break; + default: + break; + } + } + break; + case ActionType.moved: + for(int i = 0; i < actions.length; i++) { + // Only match for latest operation + if (actions[i].src in srcMap) { + switch (actions[i].type) { + case ActionType.changed: + case ActionType.createDir: + // check if the source is the prefix of the target + string prefix = src ~ "/"; + string target = actions[i].src; + if (prefix[0] != '.') + prefix = "./" ~ prefix; + if (target[0] != '.') + target = "./" ~ target; + string comm = commonPrefix(prefix, target); + if (src == actions[i].src || comm.length == prefix.length) { + // Hold operations require reading local file that is moved after the target is moved online + pendingTargets ~= i; + actions[i].skipped = true; + srcMap.remove(actions[i].src); + if (comm.length == target.length) + actions[i].src = dst; + else + actions[i].src = dst ~ target[comm.length - 1 .. target.length]; + } + break; + default: + break; + } + } + } + break; + default: + break; + } + actions ~= Action(type, false, src, dst); + srcMap[src] = actions.length - 1; + + foreach (pendingTarget; pendingTargets) { + actions ~= actions[pendingTarget]; + actions[$-1].skipped = false; + srcMap[actions[$-1].src] = actions.length - 1; + } + } +} + final class Monitor { // Class variables ApplicationConfig appConfig; @@ -171,12 +261,14 @@ final class Monitor { // Configure function delegates void delegate(string path) onDirCreated; - void delegate(string path) onFileChanged; + void delegate(string[] path) onFileChanged; void delegate(string path) onDelete; void delegate(string from, string to) onMove; // List of paths that were moved, not deleted bool[string] movedNotDeleted; + + ActionHolder actionHolder; // Configure the class varaible to consume the application configuration including selective sync this(ApplicationConfig appConfig, ClientSideFiltering selectiveSync) { @@ -371,138 +463,145 @@ final class Monitor { }; while (true) { - int ret = poll(&fds, 1, 0); - if (ret == -1) throw new MonitorException("poll failed"); - else if (ret == 0) break; // no events available - - size_t length = read(worker.fd, buffer.ptr, buffer.length); - if (length == -1) throw new MonitorException("read failed"); - - int i = 0; - while (i < length) { - inotify_event *event = cast(inotify_event*) &buffer[i]; - string path; - string evalPath; - - // inotify event debug - addLogEntry("inotify event wd: " ~ to!string(event.wd), ["debug"]); - addLogEntry("inotify event mask: " ~ to!string(event.mask), ["debug"]); - addLogEntry("inotify event cookie: " ~ to!string(event.cookie), ["debug"]); - addLogEntry("inotify event len: " ~ to!string(event.len), ["debug"]); - addLogEntry("inotify event name: " ~ to!string(event.name), ["debug"]); - - // inotify event handling - if (event.mask & IN_ACCESS) addLogEntry("inotify event flag: IN_ACCESS", ["debug"]); - if (event.mask & IN_MODIFY) addLogEntry("inotify event flag: IN_MODIFY", ["debug"]); - if (event.mask & IN_ATTRIB) addLogEntry("inotify event flag: IN_ATTRIB", ["debug"]); - if (event.mask & IN_CLOSE_WRITE) addLogEntry("inotify event flag: IN_CLOSE_WRITE", ["debug"]); - if (event.mask & IN_CLOSE_NOWRITE) addLogEntry("inotify event flag: IN_CLOSE_NOWRITE", ["debug"]); - if (event.mask & IN_MOVED_FROM) addLogEntry("inotify event flag: IN_MOVED_FROM", ["debug"]); - if (event.mask & IN_MOVED_TO) addLogEntry("inotify event flag: IN_MOVED_TO", ["debug"]); - if (event.mask & IN_CREATE) addLogEntry("inotify event flag: IN_CREATE", ["debug"]); - if (event.mask & IN_DELETE) addLogEntry("inotify event flag: IN_DELETE", ["debug"]); - if (event.mask & IN_DELETE_SELF) addLogEntry("inotify event flag: IN_DELETE_SELF", ["debug"]); - if (event.mask & IN_MOVE_SELF) addLogEntry("inotify event flag: IN_MOVE_SELF", ["debug"]); - if (event.mask & IN_UNMOUNT) addLogEntry("inotify event flag: IN_UNMOUNT", ["debug"]); - if (event.mask & IN_Q_OVERFLOW) addLogEntry("inotify event flag: IN_Q_OVERFLOW", ["debug"]); - if (event.mask & IN_IGNORED) addLogEntry("inotify event flag: IN_IGNORED", ["debug"]); - if (event.mask & IN_CLOSE) addLogEntry("inotify event flag: IN_CLOSE", ["debug"]); - if (event.mask & IN_MOVE) addLogEntry("inotify event flag: IN_MOVE", ["debug"]); - if (event.mask & IN_ONLYDIR) addLogEntry("inotify event flag: IN_ONLYDIR", ["debug"]); - if (event.mask & IN_DONT_FOLLOW) addLogEntry("inotify event flag: IN_DONT_FOLLOW", ["debug"]); - if (event.mask & IN_EXCL_UNLINK) addLogEntry("inotify event flag: IN_EXCL_UNLINK", ["debug"]); - if (event.mask & IN_MASK_ADD) addLogEntry("inotify event flag: IN_MASK_ADD", ["debug"]); - if (event.mask & IN_ISDIR) addLogEntry("inotify event flag: IN_ISDIR", ["debug"]); - if (event.mask & IN_ONESHOT) addLogEntry("inotify event flag: IN_ONESHOT", ["debug"]); - if (event.mask & IN_ALL_EVENTS) addLogEntry("inotify event flag: IN_ALL_EVENTS", ["debug"]); - - // skip events that need to be ignored - if (event.mask & IN_IGNORED) { - // forget the directory associated to the watch descriptor - wdToDirName.remove(event.wd); - goto skip; - } else if (event.mask & IN_Q_OVERFLOW) { - throw new MonitorException("inotify overflow, inotify events will be missing"); - } - - // if the event is not to be ignored, obtain path - path = getPath(event); - // configure the skip_dir & skip skip_file comparison item - evalPath = path.strip('.'); - - // Skip events that should be excluded based on application configuration - // We cant use isDir or isFile as this information is missing from the inotify event itself - // Thus this causes a segfault when attempting to query this - https://github.com/abraunegg/onedrive/issues/995 - - // Based on the 'type' of event & object type (directory or file) check that path against the 'right' user exclusions - // Directory events should only be compared against skip_dir and file events should only be compared against skip_file - if (event.mask & IN_ISDIR) { - // The event in question contains IN_ISDIR event mask, thus highly likely this is an event on a directory - // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched - if (selectiveSync.isDirNameExcluded(evalPath)) { - // The path to evaluate matches a path that the user has configured to skip + bool hasNotification = false; + while (true) { + int ret = poll(&fds, 1, 0); + if (ret == -1) throw new MonitorException("poll failed"); + else if (ret == 0) break; // no events available + hasNotification = true; + size_t length = read(worker.fd, buffer.ptr, buffer.length); + if (length == -1) throw new MonitorException("read failed"); + + int i = 0; + while (i < length) { + inotify_event *event = cast(inotify_event*) &buffer[i]; + string path; + string evalPath; + + // inotify event debug + addLogEntry("inotify event wd: " ~ to!string(event.wd), ["debug"]); + addLogEntry("inotify event mask: " ~ to!string(event.mask), ["debug"]); + addLogEntry("inotify event cookie: " ~ to!string(event.cookie), ["debug"]); + addLogEntry("inotify event len: " ~ to!string(event.len), ["debug"]); + addLogEntry("inotify event name: " ~ to!string(event.name), ["debug"]); + + // inotify event handling + if (event.mask & IN_ACCESS) addLogEntry("inotify event flag: IN_ACCESS", ["debug"]); + if (event.mask & IN_MODIFY) addLogEntry("inotify event flag: IN_MODIFY", ["debug"]); + if (event.mask & IN_ATTRIB) addLogEntry("inotify event flag: IN_ATTRIB", ["debug"]); + if (event.mask & IN_CLOSE_WRITE) addLogEntry("inotify event flag: IN_CLOSE_WRITE", ["debug"]); + if (event.mask & IN_CLOSE_NOWRITE) addLogEntry("inotify event flag: IN_CLOSE_NOWRITE", ["debug"]); + if (event.mask & IN_MOVED_FROM) addLogEntry("inotify event flag: IN_MOVED_FROM", ["debug"]); + if (event.mask & IN_MOVED_TO) addLogEntry("inotify event flag: IN_MOVED_TO", ["debug"]); + if (event.mask & IN_CREATE) addLogEntry("inotify event flag: IN_CREATE", ["debug"]); + if (event.mask & IN_DELETE) addLogEntry("inotify event flag: IN_DELETE", ["debug"]); + if (event.mask & IN_DELETE_SELF) addLogEntry("inotify event flag: IN_DELETE_SELF", ["debug"]); + if (event.mask & IN_MOVE_SELF) addLogEntry("inotify event flag: IN_MOVE_SELF", ["debug"]); + if (event.mask & IN_UNMOUNT) addLogEntry("inotify event flag: IN_UNMOUNT", ["debug"]); + if (event.mask & IN_Q_OVERFLOW) addLogEntry("inotify event flag: IN_Q_OVERFLOW", ["debug"]); + if (event.mask & IN_IGNORED) addLogEntry("inotify event flag: IN_IGNORED", ["debug"]); + if (event.mask & IN_CLOSE) addLogEntry("inotify event flag: IN_CLOSE", ["debug"]); + if (event.mask & IN_MOVE) addLogEntry("inotify event flag: IN_MOVE", ["debug"]); + if (event.mask & IN_ONLYDIR) addLogEntry("inotify event flag: IN_ONLYDIR", ["debug"]); + if (event.mask & IN_DONT_FOLLOW) addLogEntry("inotify event flag: IN_DONT_FOLLOW", ["debug"]); + if (event.mask & IN_EXCL_UNLINK) addLogEntry("inotify event flag: IN_EXCL_UNLINK", ["debug"]); + if (event.mask & IN_MASK_ADD) addLogEntry("inotify event flag: IN_MASK_ADD", ["debug"]); + if (event.mask & IN_ISDIR) addLogEntry("inotify event flag: IN_ISDIR", ["debug"]); + if (event.mask & IN_ONESHOT) addLogEntry("inotify event flag: IN_ONESHOT", ["debug"]); + if (event.mask & IN_ALL_EVENTS) addLogEntry("inotify event flag: IN_ALL_EVENTS", ["debug"]); + + // skip events that need to be ignored + if (event.mask & IN_IGNORED) { + // forget the directory associated to the watch descriptor + wdToDirName.remove(event.wd); goto skip; + } else if (event.mask & IN_Q_OVERFLOW) { + throw new MonitorException("inotify overflow, inotify events will be missing"); } - } else { - // The event in question missing the IN_ISDIR event mask, thus highly likely this is an event on a file - // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched - if (selectiveSync.isFileNameExcluded(evalPath)) { - // The path to evaluate matches a file that the user has configured to skip + + // if the event is not to be ignored, obtain path + path = getPath(event); + // configure the skip_dir & skip skip_file comparison item + evalPath = path.strip('.'); + + // Skip events that should be excluded based on application configuration + // We cant use isDir or isFile as this information is missing from the inotify event itself + // Thus this causes a segfault when attempting to query this - https://github.com/abraunegg/onedrive/issues/995 + + // Based on the 'type' of event & object type (directory or file) check that path against the 'right' user exclusions + // Directory events should only be compared against skip_dir and file events should only be compared against skip_file + if (event.mask & IN_ISDIR) { + // The event in question contains IN_ISDIR event mask, thus highly likely this is an event on a directory + // This due to if the user has specified in skip_dir an exclusive path: '/path' - that is what must be matched + if (selectiveSync.isDirNameExcluded(evalPath)) { + // The path to evaluate matches a path that the user has configured to skip + goto skip; + } + } else { + // The event in question missing the IN_ISDIR event mask, thus highly likely this is an event on a file + // This due to if the user has specified in skip_file an exclusive path: '/path/file' - that is what must be matched + if (selectiveSync.isFileNameExcluded(evalPath)) { + // The path to evaluate matches a file that the user has configured to skip + goto skip; + } + } + + // is the path, excluded via sync_list + if (selectiveSync.isPathExcludedViaSyncList(path)) { + // The path to evaluate matches a directory or file that the user has configured not to include in the sync goto skip; } - } - - // is the path, excluded via sync_list - if (selectiveSync.isPathExcludedViaSyncList(path)) { - // The path to evaluate matches a directory or file that the user has configured not to include in the sync - goto skip; - } - - // handle the inotify events - if (event.mask & IN_MOVED_FROM) { - addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]); - cookieToPath[event.cookie] = path; - movedNotDeleted[path] = true; // Mark as moved, not deleted - } else if (event.mask & IN_MOVED_TO) { - addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]); - if (event.mask & IN_ISDIR) addRecursive(path); - auto from = event.cookie in cookieToPath; - if (from) { - cookieToPath.remove(event.cookie); - if (useCallbacks) onMove(*from, path); - movedNotDeleted.remove(*from); // Clear moved status - } else { - // Handle file moved in from outside + + // handle the inotify events + if (event.mask & IN_MOVED_FROM) { + addLogEntry("event IN_MOVED_FROM: " ~ path, ["debug"]); + cookieToPath[event.cookie] = path; + movedNotDeleted[path] = true; // Mark as moved, not deleted + } else if (event.mask & IN_MOVED_TO) { + addLogEntry("event IN_MOVED_TO: " ~ path, ["debug"]); + if (event.mask & IN_ISDIR) addRecursive(path); + auto from = event.cookie in cookieToPath; + if (from) { + cookieToPath.remove(event.cookie); + if (useCallbacks) actionHolder.append(ActionType.moved, *from, path); + movedNotDeleted.remove(*from); // Clear moved status + } else { + // Handle file moved in from outside + if (event.mask & IN_ISDIR) { + if (useCallbacks) actionHolder.append(ActionType.createDir, path); + } else { + if (useCallbacks) actionHolder.append(ActionType.changed, path); + } + } + } else if (event.mask & IN_CREATE) { + addLogEntry("event IN_CREATE: " ~ path, ["debug"]); if (event.mask & IN_ISDIR) { - if (useCallbacks) onDirCreated(path); + addRecursive(path); + if (useCallbacks) actionHolder.append(ActionType.createDir, path); + } + } else if (event.mask & IN_DELETE) { + if (path in movedNotDeleted) { + movedNotDeleted.remove(path); // Ignore delete for moved files } else { - if (useCallbacks) onFileChanged(path); + addLogEntry("event IN_DELETE: " ~ path, ["debug"]); + if (useCallbacks) actionHolder.append(ActionType.deleted, path); } - } - } else if (event.mask & IN_CREATE) { - addLogEntry("event IN_CREATE: " ~ path, ["debug"]); - if (event.mask & IN_ISDIR) { - addRecursive(path); - if (useCallbacks) onDirCreated(path); - } - } else if (event.mask & IN_DELETE) { - if (path in movedNotDeleted) { - movedNotDeleted.remove(path); // Ignore delete for moved files + } else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) { + addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]); + if (useCallbacks) actionHolder.append(ActionType.changed, path); } else { - addLogEntry("event IN_DELETE: " ~ path, ["debug"]); - if (useCallbacks) onDelete(path); + addLogEntry("event unhandled: " ~ path, ["debug"]); + assert(0); } - } else if ((event.mask & IN_CLOSE_WRITE) && !(event.mask & IN_ISDIR)) { - addLogEntry("event IN_CLOSE_WRITE and not IN_ISDIR: " ~ path, ["debug"]); - if (useCallbacks) onFileChanged(path); - } else { - addLogEntry("event unhandled: " ~ path, ["debug"]); - assert(0); - } - skip: - i += inotify_event.sizeof + event.len; + skip: + i += inotify_event.sizeof + event.len; + } + Thread.sleep(dur!"seconds"(1)); } + if (!hasNotification) break; + processChanges(); + // Assume that the items moved outside the watched directory have been deleted foreach (cookie, path; cookieToPath) { addLogEntry("Deleting cookie|watch (post loop): " ~ path, ["debug"]); @@ -515,6 +614,35 @@ final class Monitor { } } + private void processChanges() { + string[] changes; + + foreach(action; actionHolder.actions) { + if (action.skipped) + continue; + switch (action.type) { + case ActionType.changed: + changes ~= action.src; + break; + case ActionType.deleted: + onDelete(action.src); + break; + case ActionType.createDir: + onDirCreated(action.src); + break; + case ActionType.moved: + onMove(action.src, action.dst); + break; + default: + break; + } + } + if (!changes.empty) + onFileChanged(changes); + + object.destroy(actionHolder); + } + Tid watch() { initialised = true; return spawn(&startMonitorJob, worker, thisTid); diff --git a/src/sync.d b/src/sync.d index b0d8490fb..7bdf296e4 100644 --- a/src/sync.d +++ b/src/sync.d @@ -3486,154 +3486,157 @@ class SyncEngine { // For each batch of files to upload, upload the changed data to OneDrive foreach (chunk; databaseItemsWhereContentHasChanged.chunks(batchSize)) { - uploadChangedLocalFileToOneDrive(chunk); + processChangedLocalItemsToUploadInParallel(chunk); + } + } + + // Upload the changed file batches in parallel + void processChangedLocalItemsToUploadInParallel(string[3][] array) { + foreach (i, localItemDetails; taskPool.parallel(array)) { + addLogEntry("Upload Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); + uploadChangedLocalFileToOneDrive(localItemDetails); + addLogEntry("Upload Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); } } // Upload changed local files to OneDrive in parallel - void uploadChangedLocalFileToOneDrive(string[3][] array) { - - foreach (i, localItemDetails; taskPool.parallel(array)) { + void uploadChangedLocalFileToOneDrive(string[3] localItemDetails) { - addLogEntry("Thread " ~ to!string(i) ~ " Starting: " ~ to!string(Clock.currTime()), ["debug"]); + // These are the details of the item we need to upload + string changedItemParentId = localItemDetails[0]; + string changedItemId = localItemDetails[1]; + string localFilePath = localItemDetails[2]; - // These are the details of the item we need to upload - string changedItemParentId = localItemDetails[0]; - string changedItemId = localItemDetails[1]; - string localFilePath = localItemDetails[2]; - - // How much space is remaining on OneDrive - ulong remainingFreeSpace; - // Did the upload fail? - bool uploadFailed = false; - // Did we skip due to exceeding maximum allowed size? - bool skippedMaxSize = false; - // Did we skip to an exception error? - bool skippedExceptionError = false; - - // Unfortunatly, we cant store an array of Item's ... so we have to re-query the DB again - unavoidable extra processing here - // This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function - Item dbItem; - itemDB.selectById(changedItemParentId, changedItemId, dbItem); + addLogEntry("uploadChangedLocalFileToOneDrive: " ~ localFilePath, ["debug"]); - // Fetch the details from cachedOnlineDriveData - // - cachedOnlineDriveData.quotaRestricted; - // - cachedOnlineDriveData.quotaAvailable; - // - cachedOnlineDriveData.quotaRemaining; - driveDetailsCache cachedOnlineDriveData; - cachedOnlineDriveData = getDriveDetails(dbItem.driveId); - remainingFreeSpace = cachedOnlineDriveData.quotaRemaining; - - // Get the file size from the actual file - ulong thisFileSizeLocal = getSize(localFilePath); - // Get the file size from the DB data - ulong thisFileSizeFromDB; - if (!dbItem.size.empty) { - thisFileSizeFromDB = to!ulong(dbItem.size); - } else { - thisFileSizeFromDB = 0; - } - - // 'remainingFreeSpace' online includes the current file online - // We need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value - ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal; - - // Based on what we know, for this thread - can we safely upload this modified local file? - addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]); - addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); - JSONValue uploadResponse; - - bool spaceAvailableOnline = false; - // If 'personal' accounts, if driveId == defaultDriveId, then we will have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused - // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - cachedOnlineDriveData.quotaRestricted will be set as true - // If 'business' accounts, if driveId == defaultDriveId, then we will potentially have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused - // If 'business' accounts, if driveId != defaultDriveId, then we will potentially have quota data, but it most likely will be a 0 value - cachedOnlineDriveData.quotaRestricted will be set as true - - // Is there quota available for the given drive where we are uploading to? - if (cachedOnlineDriveData.quotaAvailable) { - // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? - if (calculatedSpaceOnlinePostUpload > 0) { - // Based on this thread action, we beleive that there is space available online to upload - proceed - spaceAvailableOnline = true; - } - } - - // Is quota being restricted? - if (cachedOnlineDriveData.quotaRestricted) { - // Space available online is being restricted - so we have no way to really know if there is space available online + // How much space is remaining on OneDrive + ulong remainingFreeSpace; + // Did the upload fail? + bool uploadFailed = false; + // Did we skip due to exceeding maximum allowed size? + bool skippedMaxSize = false; + // Did we skip to an exception error? + bool skippedExceptionError = false; + + // Unfortunatly, we cant store an array of Item's ... so we have to re-query the DB again - unavoidable extra processing here + // This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function + Item dbItem; + itemDB.selectById(changedItemParentId, changedItemId, dbItem); + + // Fetch the details from cachedOnlineDriveData + // - cachedOnlineDriveData.quotaRestricted; + // - cachedOnlineDriveData.quotaAvailable; + // - cachedOnlineDriveData.quotaRemaining; + driveDetailsCache cachedOnlineDriveData; + cachedOnlineDriveData = getDriveDetails(dbItem.driveId); + remainingFreeSpace = cachedOnlineDriveData.quotaRemaining; + + // Get the file size from the actual file + ulong thisFileSizeLocal = getSize(localFilePath); + // Get the file size from the DB data + ulong thisFileSizeFromDB; + if (!dbItem.size.empty) { + thisFileSizeFromDB = to!ulong(dbItem.size); + } else { + thisFileSizeFromDB = 0; + } + + // 'remainingFreeSpace' online includes the current file online + // We need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value + ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal; + + // Based on what we know, for this thread - can we safely upload this modified local file? + addLogEntry("This Thread Estimated Free Space Online: " ~ to!string(remainingFreeSpace), ["debug"]); + addLogEntry("This Thread Calculated Free Space Online Post Upload: " ~ to!string(calculatedSpaceOnlinePostUpload), ["debug"]); + JSONValue uploadResponse; + + bool spaceAvailableOnline = false; + // If 'personal' accounts, if driveId == defaultDriveId, then we will have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - cachedOnlineDriveData.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will potentially have quota data - cachedOnlineDriveData.quotaRemaining will be updated so it can be reused + // If 'business' accounts, if driveId != defaultDriveId, then we will potentially have quota data, but it most likely will be a 0 value - cachedOnlineDriveData.quotaRestricted will be set as true + + // Is there quota available for the given drive where we are uploading to? + if (cachedOnlineDriveData.quotaAvailable) { + // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? + if (calculatedSpaceOnlinePostUpload > 0) { + // Based on this thread action, we beleive that there is space available online to upload - proceed spaceAvailableOnline = true; } + } + + // Is quota being restricted? + if (cachedOnlineDriveData.quotaRestricted) { + // Space available online is being restricted - so we have no way to really know if there is space available online + spaceAvailableOnline = true; + } + + // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) + if (spaceAvailableOnline) { + // Does this file exceed the maximum file size to upload to OneDrive? + if (thisFileSizeLocal <= maxUploadFileSize) { + // Attempt to upload the modified file + // Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result + uploadResponse = performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); - // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) - if (spaceAvailableOnline) { - // Does this file exceed the maximum file size to upload to OneDrive? - if (thisFileSizeLocal <= maxUploadFileSize) { - // Attempt to upload the modified file - // Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result - uploadResponse = performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); - - // Evaluate the returned JSON uploadResponse - // If there was an error uploading the file, uploadResponse should be empty and invalid - if (uploadResponse.type() != JSONType.object) { - uploadFailed = true; - skippedExceptionError = true; - } - - } else { - // Skip file - too large + // Evaluate the returned JSON uploadResponse + // If there was an error uploading the file, uploadResponse should be empty and invalid + if (uploadResponse.type() != JSONType.object) { uploadFailed = true; - skippedMaxSize = true; + skippedExceptionError = true; } + } else { - // Cant upload this file - no space available + // Skip file - too large uploadFailed = true; + skippedMaxSize = true; + } + } else { + // Cant upload this file - no space available + uploadFailed = true; + } + + // Did the upload fail? + if (uploadFailed) { + // Upload failed .. why? + // No space available online + if (!spaceAvailableOnline) { + addLogEntry("Skipping uploading modified file " ~ localFilePath ~ " due to insufficient free space available on Microsoft OneDrive", ["info", "notify"]); } + // File exceeds max allowed size + if (skippedMaxSize) { + addLogEntry("Skipping uploading this modified file as it exceeds the maximum size allowed by OneDrive: " ~ localFilePath, ["info", "notify"]); + } + // Generic message + if (skippedExceptionError) { + // normal failure message if API or exception error generated + addLogEntry("Uploading modified file " ~ localFilePath ~ " ... failed!", ["info", "notify"]); + } + } else { + // Upload was successful + addLogEntry("Uploading modified file " ~ localFilePath ~ " ... done.", ["info", "notify"]); - // Did the upload fail? - if (uploadFailed) { - // Upload failed .. why? - // No space available online - if (!spaceAvailableOnline) { - addLogEntry("Skipping uploading modified file " ~ localFilePath ~ " due to insufficient free space available on Microsoft OneDrive", ["info", "notify"]); - } - // File exceeds max allowed size - if (skippedMaxSize) { - addLogEntry("Skipping uploading this modified file as it exceeds the maximum size allowed by OneDrive: " ~ localFilePath, ["info", "notify"]); - } - // Generic message - if (skippedExceptionError) { - // normal failure message if API or exception error generated - addLogEntry("Uploading modified file " ~ localFilePath ~ " ... failed!", ["info", "notify"]); - } - } else { - // Upload was successful - addLogEntry("Uploading modified file " ~ localFilePath ~ " ... done.", ["info", "notify"]); - - // Save JSON item in database - saveItem(uploadResponse); - - // Update the 'cachedOnlineDriveData' record for this 'dbItem.driveId' so that this is tracked as accuratly as possible for other threads - updateDriveDetailsCache(dbItem.driveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSizeLocal); + // Save JSON item in database + saveItem(uploadResponse); + + // Update the 'cachedOnlineDriveData' record for this 'dbItem.driveId' so that this is tracked as accuratly as possible for other threads + updateDriveDetailsCache(dbItem.driveId, cachedOnlineDriveData.quotaRestricted, cachedOnlineDriveData.quotaAvailable, thisFileSizeLocal); + + // Check the integrity of the uploaded modified file if not in a --dry-run scenario + if (!dryRun) { + // Perform the integrity of the uploaded modified file + performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal); - // Check the integrity of the uploaded modified file if not in a --dry-run scenario - if (!dryRun) { - // Perform the integrity of the uploaded modified file - performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal); - - // Update the date / time of the file online to match the local item - // Get the local file last modified time - SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); - localModifiedTime.fracSecs = Duration.zero; - // Get the latest eTag, and use that - string etagFromUploadResponse = uploadResponse["eTag"].str; - // Attempt to update the online date time stamp based on our local data - uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse); - } + // Update the date / time of the file online to match the local item + // Get the local file last modified time + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + localModifiedTime.fracSecs = Duration.zero; + // Get the latest eTag, and use that + string etagFromUploadResponse = uploadResponse["eTag"].str; + // Attempt to update the online date time stamp based on our local data + uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse); } - - addLogEntry("Thread " ~ to!string(i) ~ " Finished: " ~ to!string(Clock.currTime()), ["debug"]); - - } // end of 'foreach (i, localItemDetails; array.enumerate)' + } } // Perform the upload of a locally modified file to OneDrive @@ -3943,10 +3946,17 @@ class SyncEngine { // Perform a filesystem walk to uncover new data to upload to OneDrive void scanLocalFilesystemPathForNewData(string path) { - // Cleanup array memory before we start adding files newLocalFilesToUploadToOneDrive = []; + // Perform a filesystem walk to uncover new data + scanLocalFilesystemPathForNewDataToUpload(path); + + // Upload new data that has been identified + processNewLocalItemsToUpload(); + } + + void scanLocalFilesystemPathForNewDataToUpload(string path) { // To improve logging output for this function, what is the 'logical path' we are scanning for file & folder differences? string logPath; if (path == ".") { @@ -3977,9 +3987,11 @@ class SyncEngine { // Perform the filesystem walk of this path, building an array of new items to upload scanPathForNewData(path); - if (!appConfig.surpressLoggingOutput) { - if (appConfig.verbosityCount == 0) - addLogEntry("\n", ["consoleOnlyNoNewLine"]); + if (isDir(path)) { + if (!appConfig.surpressLoggingOutput) { + if (appConfig.verbosityCount == 0) + addLogEntry("\n", ["consoleOnlyNoNewLine"]); + } } // To finish off the processing items, this is needed to reflect this in the log @@ -3990,7 +4002,10 @@ class SyncEngine { auto elapsedTime = finishTime - startTime; addLogEntry("Elapsed Time Filesystem Walk: " ~ to!string(elapsedTime), ["debug"]); - + } + + // Perform a filesystem walk to uncover new data to upload to OneDrive + void processNewLocalItemsToUpload() { // Upload new data that has been identified // Are there any items to download post fetching the /delta data? if (!newLocalFilesToUploadToOneDrive.empty) { @@ -4036,22 +4051,16 @@ class SyncEngine { // Cleanup array memory after uploading all files newLocalFilesToUploadToOneDrive = []; } - - if (!databaseItemsWhereContentHasChanged.empty) { - // There are changed local files that were in the DB to upload - addLogEntry("Changed local items to upload to OneDrive: " ~ to!string(databaseItemsWhereContentHasChanged.length)); - processChangedLocalItemsToUpload(); - // Cleanup array memory - databaseItemsWhereContentHasChanged = []; - } } // Scan this path for new data void scanPathForNewData(string path) { // Add a processing '.' - if (!appConfig.surpressLoggingOutput) { - if (appConfig.verbosityCount == 0) - addProcessingDotEntry(); + if (isDir(path)) { + if (!appConfig.surpressLoggingOutput) { + if (appConfig.verbosityCount == 0) + addProcessingDotEntry(); + } } ulong maxPathLength; @@ -4271,37 +4280,42 @@ class SyncEngine { } // Handle a single file inotify trigger when using --monitor - void handleLocalFileTrigger(string localFilePath) { + void handleLocalFileTrigger(string[] changedLocalFilesToUploadToOneDrive) { // Is this path a new file or an existing one? // Normally we would use pathFoundInDatabase() to calculate, but we need 'databaseItem' as well if the item is in the database - Item databaseItem; - bool fileFoundInDB = false; - string[3][] modifiedItemToUpload; - - foreach (driveId; onlineDriveDetails.keys) { - if (itemDB.selectByPath(localFilePath, driveId, databaseItem)) { - fileFoundInDB = true; - break; - } - } - - // Was the file found in the database? - if (!fileFoundInDB) { - // This is a new file as it is not in the database - // Log that the file has been added locally - addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]); - scanLocalFilesystemPathForNewData(localFilePath); - } else { - // This is a potentially modified file, needs to be handled as such. Is the item truly modified? - if (!testFileHash(localFilePath, databaseItem)) { - // The local file failed the hash comparison test - there is a data difference - // Log that the file has changed locally - addLogEntry("[M] Local file changed: " ~ localFilePath, ["verbose"]); - // Add the modified item to the array to upload - modifiedItemToUpload ~= [databaseItem.driveId, databaseItem.id, localFilePath]; - uploadChangedLocalFileToOneDrive(modifiedItemToUpload); + foreach (localFilePath; changedLocalFilesToUploadToOneDrive) { + try { + Item databaseItem; + bool fileFoundInDB = false; + + foreach (driveId; onlineDriveDetails.keys) { + if (itemDB.selectByPath(localFilePath, driveId, databaseItem)) { + fileFoundInDB = true; + break; + } + } + + // Was the file found in the database? + if (!fileFoundInDB) { + // This is a new file as it is not in the database + // Log that the file has been added locally + addLogEntry("[M] New local file added: " ~ localFilePath, ["verbose"]); + scanLocalFilesystemPathForNewDataToUpload(localFilePath); + } else { + // This is a potentially modified file, needs to be handled as such. Is the item truly modified? + if (!testFileHash(localFilePath, databaseItem)) { + // The local file failed the hash comparison test - there is a data difference + // Log that the file has changed locally + addLogEntry("[M] Local file changed: " ~ localFilePath, ["verbose"]); + // Add the modified item to the array to upload + uploadChangedLocalFileToOneDrive([databaseItem.driveId, databaseItem.id, localFilePath]); + } + } + } catch(Exception e) { + addLogEntry("Cannot upload file changes/creation: " ~ e.msg, ["info", "notify"]); } } + processNewLocalItemsToUpload(); } // Query the database to determine if this path is within the existing database @@ -4959,10 +4973,10 @@ class SyncEngine { string changedItemParentId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; string changedItemId = fileDetailsFromOneDrive["id"].str; addLogEntry("Skipping uploading this file as moving it to upload as a modified file (online item already exists): " ~ fileToUpload); - databaseItemsWhereContentHasChanged ~= [changedItemParentId, changedItemId, fileToUpload]; // In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data to the local DB saveItem(fileDetailsFromOneDrive); + uploadChangedLocalFileToOneDrive([changedItemParentId, changedItemId, fileToUpload]); } } catch (OneDriveException exception) { // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online @@ -6811,7 +6825,7 @@ class SyncEngine { if (!itemDB.selectByPath(oldPath, appConfig.defaultDriveId, oldItem)) { // The old path|item is not synced with the database, upload as a new file addLogEntry("Moved local item was not in-sync with local databse - uploading as new item"); - uploadNewFile(newPath); + scanLocalFilesystemPathForNewData(newPath); return; } From b4c10e6eee69524abfb94ff8e78bc2072b3f5582 Mon Sep 17 00:00:00 2001 From: JC-comp <147694781+JC-comp@users.noreply.github.com> Date: Wed, 14 Feb 2024 02:02:48 +0800 Subject: [PATCH 059/305] Shutdown Inotify Monitor Gracefully (#2628) * Adjust pipline * Shutdown monitor * Backward compatible for ldc v1.20.1 * Fix shutdown process * Update logging output and logging levels --------- Co-authored-by: abraunegg --- src/main.d | 8 ++-- src/monitor.d | 105 ++++++++++++++++++++++++++++---------------------- 2 files changed, 63 insertions(+), 50 deletions(-) diff --git a/src/main.d b/src/main.d index 67f36a656..0e1b0700c 100644 --- a/src/main.d +++ b/src/main.d @@ -727,7 +727,6 @@ int main(string[] cliArgs) { } // Configure the monitor class - Tid workerTid; filesystemMonitor = new Monitor(appConfig, selectiveSync); // Delegated function for when inotify detects a new local directory has been created @@ -803,7 +802,6 @@ int main(string[] cliArgs) { try { addLogEntry("Initialising filesystem inotify monitoring ..."); filesystemMonitor.initialise(); - workerTid = filesystemMonitor.watch(); addLogEntry("Performing initial syncronisation to ensure consistent local state ..."); } catch (MonitorException e) { // monitor class initialisation failed @@ -1000,9 +998,7 @@ int main(string[] cliArgs) { if(filesystemMonitor.initialised) { // If local monitor is on // start the worker and wait for event - if(!filesystemMonitor.isWorking()) { - workerTid.send(1); - } + filesystemMonitor.send(true); } if(webhookEnabled) { @@ -1143,6 +1139,8 @@ void performStandardExitProcess(string scopeCaller = null) { selectiveSync = null; syncEngineInstance = null; } else { + addLogEntry("Waiting for all internal threads to complete before exiting application", ["verbose"]); + thread_joinAll(); addLogEntry("Application exit", ["debug"]); addLogEntry("#######################################################################################################################################", ["logFileOnly"]); // Destroy the shared logging buffer diff --git a/src/monitor.d b/src/monitor.d index 694771e69..1368df7e2 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -15,6 +15,7 @@ import std.concurrency; import std.exception; import std.file; import std.path; +import std.process; import std.regex; import std.stdio; import std.string; @@ -35,19 +36,24 @@ class MonitorException: ErrnoException { } } -shared class MonitorBackgroundWorker { +class MonitorBackgroundWorker { // inotify file descriptor int fd; - private bool working; + Pipe p; + bool isAlive; - void initialise() { + this() { + isAlive = true; + p = pipe(); + } + + shared void initialise() { fd = inotify_init(); - working = false; if (fd < 0) throw new MonitorException("inotify_init failed"); } // Add this path to be monitored - private int addInotifyWatch(string pathname) { + shared int addInotifyWatch(string pathname) { int wd = inotify_add_watch(fd, toStringz(pathname), mask); if (wd < 0) { if (errno() == ENOSPC) { @@ -78,60 +84,58 @@ shared class MonitorBackgroundWorker { return wd; } - int remove(int wd) { + shared int removeInotifyWatch(int wd) { return inotify_rm_watch(fd, wd); } - bool isWorking() { - return working; - } - - void watch(Tid callerTid) { + shared void watch(Tid callerTid) { // On failure, send -1 to caller int res; // wait for the caller to be ready - int isAlive = receiveOnly!int(); + receiveOnly!int(); while (isAlive) { fd_set fds; FD_ZERO (&fds); FD_SET(fd, &fds); + // Listen for messages from the caller + FD_SET((cast()p).readEnd.fileno, &fds); - working = true; res = select(FD_SETSIZE, &fds, null, null, null); if(res == -1) { if(errno() == EINTR) { // Received an interrupt signal but no events are available - // try update work staus and directly watch again - receiveTimeout(dur!"seconds"(1), (int msg) { - isAlive = msg; - }); + // directly watch again } else { // Error occurred, tell caller to terminate. - callCaller(callerTid, -1); - working = false; + callerTid.send(-1); break; } } else { // Wake up caller - callCaller(callerTid, 1); - // Wait for the caller to be ready - isAlive = receiveOnly!int(); + callerTid.send(1); + + // wait for the caller to be ready + if (isAlive) + isAlive = receiveOnly!bool(); } } } - void callCaller(Tid callerTid, int msg) { - working = false; - callerTid.send(msg); + shared void interrupt() { + isAlive = false; + (cast()p).writeEnd.writeln("done"); + (cast()p).writeEnd.flush(); } - void shutdown() { + shared void shutdown() { + isAlive = false; if (fd > 0) { close(fd); fd = 0; + (cast()p).close(); } } } @@ -142,8 +146,8 @@ void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid) worker.watch(callerTid); } catch (OwnerTerminated error) { // caller is terminated + worker.shutdown(); } - worker.shutdown(); } enum ActionType { @@ -249,6 +253,8 @@ final class Monitor { bool check_nosync = false; // check if initialised bool initialised = false; + // Worker Tid + Tid workerTid; // Configure Private Class Variables shared(MonitorBackgroundWorker) worker; @@ -287,7 +293,7 @@ final class Monitor { assert(onDirCreated && onFileChanged && onDelete && onMove); if (!buffer) buffer = new void[4096]; - worker = new shared(MonitorBackgroundWorker); + worker = cast(shared) new MonitorBackgroundWorker; worker.initialise(); // from which point do we start watching for changes? @@ -300,13 +306,28 @@ final class Monitor { monitorPath = "."; } addRecursive(monitorPath); + + // Start monitoring + workerTid = spawn(&startMonitorJob, worker, thisTid); + + initialised = true; + } + + // Communication with worker + void send(bool isAlive) { + workerTid.send(isAlive); } // Shutdown the monitor class void shutdown() { if(!initialised) return; - worker.shutdown(); + initialised = false; + // Release all resources + removeAll(); + // Notify the worker that the monitor has been shutdown + worker.interrupt(); + send(false); wdToDirName = null; } @@ -419,9 +440,16 @@ final class Monitor { } // Remove a watch descriptor + private void removeAll() { + string[int] copy = wdToDirName.dup; + foreach (wd, path; copy) { + remove(wd); + } + } + private void remove(int wd) { assert(wd in wdToDirName); - int ret = worker.remove(wd); + int ret = worker.removeInotifyWatch(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); addLogEntry("Monitored directory removed: " ~ to!string(wdToDirName[wd]), ["verbose"]); wdToDirName.remove(wd); @@ -432,7 +460,7 @@ final class Monitor { path ~= "/"; foreach (wd, dirname; wdToDirName) { if (dirname.startsWith(path)) { - int ret = worker.remove(wd); + int ret = worker.removeInotifyWatch(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); wdToDirName.remove(wd); addLogEntry("Monitored directory removed: " ~ dirname, ["verbose"]); @@ -448,10 +476,6 @@ final class Monitor { return path; } - shared(MonitorBackgroundWorker) getWorker() { - return worker; - } - // Update void update(bool useCallbacks = true) { if(!initialised) @@ -613,7 +637,7 @@ final class Monitor { addLogEntry("inotify events flushed", ["debug"]); } } - + private void processChanges() { string[] changes; @@ -642,13 +666,4 @@ final class Monitor { object.destroy(actionHolder); } - - Tid watch() { - initialised = true; - return spawn(&startMonitorJob, worker, thisTid); - } - - bool isWorking() { - return worker.isWorking(); - } } From 6134a6f84f6c45af97a8570c79ca6c6c9eea727f Mon Sep 17 00:00:00 2001 From: abraunegg Date: Mon, 19 Feb 2024 05:24:34 +1100 Subject: [PATCH 060/305] Update install.md * Add CentOS details --- docs/install.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/install.md b/docs/install.md index f5338122d..a2509cd7d 100644 --- a/docs/install.md +++ b/docs/install.md @@ -12,6 +12,8 @@ Distribution packages may be of an older release when compared to the latest rel |---------------------------------|------------------------------------------------------------------------------|:---------------:|:----:|:------:|:-----:|:-------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Alpine Linux | [onedrive](https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge) |Alpine Linux Edge package|❌|✔|❌|✔ | | | Arch Linux

Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |AUR package|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)

**Note:** You must first install 'base-devel' as this is a pre-requisite for using the AUR

**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'

**Note:** System must have at least 1GB of memory & 1GB swap space +| CentOS 8 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 8 package||✔||✔| **Note:** You must install the EPEL Repository first | +| CentOS 9 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 9 package||✔||✔| **Note:** You must install the EPEL Repository first | | Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |Debian 11 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | | Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |Debian 12 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | | Debian Sid | [onedrive](https://packages.debian.org/sid/onedrive) |Debian Sid package|✔|✔|✔|✔| | @@ -102,7 +104,7 @@ For notifications the following is also necessary: sudo yum install libnotify-devel ``` -### Dependencies: Fedora > Version 18 / CentOS 8.x / RHEL 8.x / RHEL 9.x +### Dependencies: Fedora > Version 18 / CentOS 8.x / CentOS 9.x/ RHEL 8.x / RHEL 9.x ```text sudo dnf groupinstall 'Development Tools' sudo dnf install libcurl-devel sqlite-devel From fd5f5e06b91b1516b2ed14a2e0062a8568e2b99e Mon Sep 17 00:00:00 2001 From: abraunegg Date: Mon, 19 Feb 2024 06:12:14 +1100 Subject: [PATCH 061/305] Update install.md * Add missing X's --- docs/install.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/install.md b/docs/install.md index a2509cd7d..dd68c8b5b 100644 --- a/docs/install.md +++ b/docs/install.md @@ -12,8 +12,8 @@ Distribution packages may be of an older release when compared to the latest rel |---------------------------------|------------------------------------------------------------------------------|:---------------:|:----:|:------:|:-----:|:-------:|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Alpine Linux | [onedrive](https://pkgs.alpinelinux.org/packages?name=onedrive&branch=edge) |Alpine Linux Edge package|❌|✔|❌|✔ | | | Arch Linux

Manjaro Linux | [onedrive-abraunegg](https://aur.archlinux.org/packages/onedrive-abraunegg/) |AUR package|✔|✔|✔|✔ | Install via: `pamac build onedrive-abraunegg` from the Arch Linux User Repository (AUR)

**Note:** You must first install 'base-devel' as this is a pre-requisite for using the AUR

**Note:** If asked regarding a provider for 'd-runtime' and 'd-compiler', select 'liblphobos' and 'ldc'

**Note:** System must have at least 1GB of memory & 1GB swap space -| CentOS 8 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 8 package||✔||✔| **Note:** You must install the EPEL Repository first | -| CentOS 9 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 9 package||✔||✔| **Note:** You must install the EPEL Repository first | +| CentOS 8 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 8 package|❌|✔|❌|✔| **Note:** You must install the EPEL Repository first | +| CentOS 9 | [onedrive](https://koji.fedoraproject.org/koji/packageinfo?packageID=26044) |CentOS 9 package|❌|✔|❌|✔| **Note:** You must install the EPEL Repository first | | Debian 11 | [onedrive](https://packages.debian.org/bullseye/source/onedrive) |Debian 11 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 11 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | | Debian 12 | [onedrive](https://packages.debian.org/bookworm/source/onedrive) |Debian 12 package|✔|✔|✔|✔| **Note:** Do not install from Debian Package Repositories

It is recommended that for Debian 12 that you install from OpenSuSE Build Service using the Debian Package Install [Instructions](ubuntu-package-install.md) | | Debian Sid | [onedrive](https://packages.debian.org/sid/onedrive) |Debian Sid package|✔|✔|✔|✔| | From fecec1ba72ee83e02e50fc60224c6c924ff6347c Mon Sep 17 00:00:00 2001 From: abraunegg Date: Mon, 19 Feb 2024 10:29:49 +1100 Subject: [PATCH 062/305] Handle #2626 | Case 2-1 and Case 2-2 items * Specifically resolve #2626 | Case 2-1 and Case 2-2 items --- src/sync.d | 231 +++++++++++++++++++++++++++++++++++------------------ src/util.d | 5 +- 2 files changed, 157 insertions(+), 79 deletions(-) diff --git a/src/sync.d b/src/sync.d index 7bdf296e4..46a63d2c7 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1127,7 +1127,9 @@ class SyncEngine { idsToDelete ~= [thisItemDriveId, thisItemId]; } else { // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not - safeBackup(localPathToDelete, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(localPathToDelete, dryRun, renamedPath); } } else { // Flag to ignore @@ -1737,7 +1739,9 @@ class SyncEngine { addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not - safeBackup(newItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(newItemPath, dryRun, renamedPath); } } } else { @@ -1754,7 +1758,9 @@ class SyncEngine { addLogEntry("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: " ~ newItemPath, ["info", "notify"]); } else { // local data protection is configured, safeBackup the local file, passing in if we are performing a --dry-run or not - safeBackup(newItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(newItemPath, dryRun, renamedPath); } } @@ -1847,13 +1853,17 @@ class SyncEngine { // The destination item is different addLogEntry("The destination is occupied with a different item, renaming the conflicting file...", ["verbose"]); // Backup this item, passing in if we are performing a --dry-run or not - safeBackup(changedItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(changedItemPath, dryRun, renamedPath); } } else { // The to be overwritten item is not already in the itemdb, so it should saved to avoid data loss addLogEntry("The destination is occupied by an existing un-synced file, renaming the conflicting file...", ["verbose"]); // Backup this item, passing in if we are performing a --dry-run or not - safeBackup(changedItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(changedItemPath, dryRun, renamedPath); } } @@ -2047,7 +2057,9 @@ class SyncEngine { addLogEntry("The local file to replace (" ~ newItemPath ~ ") has been modified locally since the last download. Renaming it to avoid potential local data loss."); // Perform the local safeBackup of the existing local file, passing in if we are performing a --dry-run or not - safeBackup(newItemPath, dryRun); + // In case the renamed path is needed + string renamedPath; + safeBackup(newItemPath, dryRun, renamedPath); } } @@ -3610,7 +3622,11 @@ class SyncEngine { // Generic message if (skippedExceptionError) { // normal failure message if API or exception error generated - addLogEntry("Uploading modified file " ~ localFilePath ~ " ... failed!", ["info", "notify"]); + // If Issue #2626 | Case 2-1 is triggered, the file we tried to upload was renamed, then uploaded as a new name + if (exists(localFilePath)) { + // Issue #2626 | Case 2-1 was not triggered, file still exists on local filesystem + addLogEntry("Uploading modified file " ~ localFilePath ~ " ... failed!", ["info", "notify"]); + } } } else { // Upload was successful @@ -3647,21 +3663,116 @@ class SyncEngine { uploadFileOneDriveApiInstance = new OneDriveApi(appConfig); uploadFileOneDriveApiInstance.initialise(); + // Configure JSONValue variables we use for a session upload + JSONValue currentOnlineData; + JSONValue uploadSessionData; + string currentETag; + // Is this a dry-run scenario? if (!dryRun) { // Do we use simpleUpload or create an upload session? bool useSimpleUpload = false; - //if ((appConfig.accountType == "personal") && (thisFileSizeLocal <= sessionThresholdFileSize)) { + // Try and get the absolute latest object details from online + try { + currentOnlineData = uploadFileOneDriveApiInstance.getPathDetailsById(dbItem.driveId, dbItem.id); + } catch (OneDriveException exception) { + + string thisFunctionName = getFunctionName!({}); + // HTTP request returned status code 408,429,503,504 + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // Handle the 429 + if (exception.httpStatusCode == 429) { + // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. + handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); + addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); + } + // re-try the specific changes queries + if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { + // 408 - Request Time Out + // 503 - Service Unavailable + // 504 - Gateway Timeout + // Transient error - try again in 30 seconds + auto errorArray = splitLines(exception.msg); + addLogEntry(to!string(errorArray[0]) ~ " when attempting to obtain latest file details from OneDrive - retrying applicable request in 30 seconds"); + addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); + + // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. + addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); + Thread.sleep(dur!"seconds"(30)); + } + // re-try original request - retried for 429, 503, 504 - but loop back calling this function + addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); + performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + } else { + // Default operation if not 408,429,503,504 errors + // display what the error is + displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); + } + } + + // Was a valid JSON response provided? + if (currentOnlineData.type() == JSONType.object) { + // Does the response contain an eTag? + if (hasETag(currentOnlineData)) { + // Use the value returned from online + currentETag = currentOnlineData["eTag"].str; + } else { + // Use the database value + currentETag = dbItem.eTag; + } + } else { + // no valid JSON response + currentETag = dbItem.eTag; + } + // What upload method should be used? if (thisFileSizeLocal <= sessionThresholdFileSize) { useSimpleUpload = true; } + // If the filesize is greater than zero , and we have valid 'latest' online data is the online file matching what we think is in the database? + if ((thisFileSizeLocal > 0) && (currentOnlineData.type() == JSONType.object)) { + // Issue #2626 | Case 2-1 + // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - potentially consituting online data loss + Item onlineFile = makeItem(currentOnlineData); + + // Which file is technically newer? The local file or the remote file? + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + SysTime onlineModifiedTime = onlineFile.mtime; + + // Reduce time resolution to seconds before comparing + localModifiedTime.fracSecs = Duration.zero; + onlineModifiedTime.fracSecs = Duration.zero; + + // Which file is newer? If local is newer, it will be uploaded as a modified file in the correct manner + if (localModifiedTime < onlineModifiedTime) { + // Online File is actually newer than the locally modified file + addLogEntry("currentOnlineData: " ~ to!string(currentOnlineData), ["debug"]); + addLogEntry("onlineFile: " ~ to!string(onlineFile), ["debug"]); + addLogEntry("database item: " ~ to!string(dbItem), ["debug"]); + addLogEntry("Skipping uploading this item as a locally modified file, will upload as a new file (online file already exists and is newer): " ~ localFilePath); + + // Online is newer, rename local, then upload the renamed file + // We need to know the renamed path so we can upload it + string renamedPath; + // Rename the local path + safeBackup(localFilePath, dryRun, renamedPath); + // Upload renamed local file as a new file + uploadNewFile(renamedPath); + + // Process the database entry removal for the original file. In a --dry-run scenario, this is being done against a DB copy. + // This is done so we can download the newer online file + itemDB.deleteById(dbItem.driveId, dbItem.id); + + // This file is now uploaded, return from here, but this will trigger a response that the upload failed (technically for the original filename it did, but we renamed it, then uploaded it + return uploadResponse; + } + } + // We can only upload zero size files via simpleFileUpload regardless of account type // Reference: https://github.com/OneDrive/onedrive-api-docs/issues/53 // Additionally, all files where file size is < 4MB should be uploaded by simpleUploadReplace - everything else should use a session to upload the modified file - if ((thisFileSizeLocal == 0) || (useSimpleUpload)) { // Must use Simple Upload to replace the file online try { @@ -3705,70 +3816,11 @@ class SyncEngine { displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } else { - // Configure JSONValue variables we use for a session upload - JSONValue currentOnlineData; - JSONValue uploadSessionData; - string currentETag; - // As this is a unique thread, the sessionFilePath for where we save the data needs to be unique // The best way to do this is generate a 10 digit alphanumeric string, and use this as the file extention string threadUploadSessionFilePath = appConfig.uploadSessionFilePath ~ "." ~ generateAlphanumericString(); - // Get the absolute latest object details from online - try { - currentOnlineData = uploadFileOneDriveApiInstance.getPathDetailsByDriveId(dbItem.driveId, localFilePath); - } catch (OneDriveException exception) { - - string thisFunctionName = getFunctionName!({}); - // HTTP request returned status code 408,429,503,504 - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 429) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // Handle the 429 - if (exception.httpStatusCode == 429) { - // HTTP request returned status code 429 (Too Many Requests). We need to leverage the response Retry-After HTTP header to ensure minimum delay until the throttle is removed. - handleOneDriveThrottleRequest(uploadFileOneDriveApiInstance); - addLogEntry("Retrying original request that generated the OneDrive HTTP 429 Response Code (Too Many Requests) - attempting to retry " ~ thisFunctionName, ["debug"]); - } - // re-try the specific changes queries - if ((exception.httpStatusCode == 408) || (exception.httpStatusCode == 503) || (exception.httpStatusCode == 504)) { - // 408 - Request Time Out - // 503 - Service Unavailable - // 504 - Gateway Timeout - // Transient error - try again in 30 seconds - auto errorArray = splitLines(exception.msg); - addLogEntry(to!string(errorArray[0]) ~ " when attempting to obtain latest file details from OneDrive - retrying applicable request in 30 seconds"); - addLogEntry(thisFunctionName ~ " previously threw an error - retrying", ["debug"]); - - // The server, while acting as a proxy, did not receive a timely response from the upstream server it needed to access in attempting to complete the request. - addLogEntry("Thread sleeping for 30 seconds as the server did not receive a timely response from the upstream server it needed to access in attempting to complete the request", ["debug"]); - Thread.sleep(dur!"seconds"(30)); - } - // re-try original request - retried for 429, 503, 504 - but loop back calling this function - addLogEntry("Retrying Function: " ~ thisFunctionName, ["debug"]); - performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); - } else { - // Default operation if not 408,429,503,504 errors - // display what the error is - displayOneDriveErrorMessage(exception.msg, getFunctionName!({})); - } - - } - - // Was a valid JSON response provided? - if (currentOnlineData.type() == JSONType.object) { - // Does the response contain an eTag? - if (hasETag(currentOnlineData)) { - // Use the value returned from online - currentETag = currentOnlineData["eTag"].str; - } else { - // Use the database value - currentETag = dbItem.eTag; - } - } else { - // no valid JSON response - currentETag = dbItem.eTag; - } - - // Create the Upload Session + // Create the upload session try { uploadSessionData = createSessionFileUpload(uploadFileOneDriveApiInstance, localFilePath, dbItem.driveId, dbItem.parentId, baseName(localFilePath), currentETag, threadUploadSessionFilePath); } catch (OneDriveException exception) { @@ -3810,7 +3862,7 @@ class SyncEngine { displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } - // Perform the Upload using the session + // Perform the upload using the session that has been created try { uploadResponse = performSessionFileUpload(uploadFileOneDriveApiInstance, thisFileSizeLocal, uploadSessionData, threadUploadSessionFilePath); } catch (OneDriveException exception) { @@ -3851,7 +3903,6 @@ class SyncEngine { displayFileSystemErrorMessage(e.msg, getFunctionName!({})); } } - } else { // We are in a --dry-run scenario uploadResponse = createFakeResponse(localFilePath); @@ -4816,7 +4867,6 @@ class SyncEngine { parentPathFoundInDB = true; } } - } // If the parent path was found in the DB, to ensure we are uploading the the right location 'parentItem.driveId' must not be empty @@ -4968,15 +5018,42 @@ class SyncEngine { // The local file we are attempting to upload as a new file is different to the existing file online addLogEntry("Triggering newfile upload target already exists edge case, where the online item does not match what we are trying to upload", ["debug"]); - // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - consituting online data loss + // Issue #2626 | Case 2-2 (resync) + + // If the 'online' file is newer, this will be overwritten with the file from the local filesystem - potentially consituting online data loss // The file 'version history' online will have to be used to 'recover' the prior online file - string changedItemParentId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; + string changedItemParentDriveId = fileDetailsFromOneDrive["parentReference"]["driveId"].str; string changedItemId = fileDetailsFromOneDrive["id"].str; - addLogEntry("Skipping uploading this file as moving it to upload as a modified file (online item already exists): " ~ fileToUpload); + addLogEntry("Skipping uploading this item as a new file, will upload as a modified file (online file already exists): " ~ fileToUpload); - // In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data to the local DB + // In order for the processing of the local item as a 'changed' item, unfortunatly we need to save the online data of the existing online file to the local DB saveItem(fileDetailsFromOneDrive); - uploadChangedLocalFileToOneDrive([changedItemParentId, changedItemId, fileToUpload]); + + // Which file is technically newer? The local file or the remote file? + Item onlineFile = makeItem(fileDetailsFromOneDrive); + SysTime localModifiedTime = timeLastModified(fileToUpload).toUTC(); + SysTime onlineModifiedTime = onlineFile.mtime; + + // Reduce time resolution to seconds before comparing + localModifiedTime.fracSecs = Duration.zero; + onlineModifiedTime.fracSecs = Duration.zero; + + // Which file is newer? + if (localModifiedTime >= onlineModifiedTime) { + // Upload the locally modified file as-is, as it is newer + uploadChangedLocalFileToOneDrive([changedItemParentDriveId, changedItemId, fileToUpload]); + } else { + // Online is newer, rename local, then upload the renamed file + // We need to know the renamed path so we can upload it + string renamedPath; + // Rename the local path + safeBackup(fileToUpload, dryRun, renamedPath); + // Upload renamed local file as a new file + uploadNewFile(renamedPath); + // Process the database entry removal for the original file. In a --dry-run scenario, this is being done against a DB copy. + // This is done so we can download the newer online file + itemDB.deleteById(changedItemParentDriveId, changedItemId); + } } } catch (OneDriveException exception) { // If we get a 404 .. the file is not online .. this is what we want .. file does not exist online diff --git a/src/util.d b/src/util.d index d35bb9627..56d1d7e7e 100644 --- a/src/util.d +++ b/src/util.d @@ -48,7 +48,7 @@ static this() { } // Creates a safe backup of the given item, and only performs the function if not in a --dry-run scenario -void safeBackup(const(char)[] path, bool dryRun) { +void safeBackup(const(char)[] path, bool dryRun, out string renamedPath) { auto ext = extension(path); auto newPath = path.chomp(ext) ~ "-" ~ deviceName; int n = 2; @@ -87,7 +87,8 @@ void safeBackup(const(char)[] path, bool dryRun) { // // Use rename() as Linux is POSIX compliant, we have an atomic operation where at no point in time the 'to' is missing. try { - rename(path, newPath); + rename(path, newPath); + renamedPath = to!string(newPath); } catch (Exception e) { // Handle exceptions, e.g., log error addLogEntry("Renaming of local file failed for " ~ to!string(path) ~ ": " ~ e.msg, ["error"]); From 03386c14993ce6cca41f0171133ad66bf0155e92 Mon Sep 17 00:00:00 2001 From: JC-comp <147694781+JC-comp@users.noreply.github.com> Date: Wed, 21 Feb 2024 02:13:26 +0800 Subject: [PATCH 063/305] Seperate OneDriveWebhook from OnedriveAPI instance (#2607) * Separate OneDrive webhook from OneDriveAPI * Ensure compatibility with the monitor mode --- Makefile.in | 1 + src/arsd/cgi.d | 9 +- src/main.d | 52 +++--- src/onedrive.d | 451 +++++-------------------------------------------- src/webhook.d | 339 +++++++++++++++++++++++++++++++++++++ 5 files changed, 419 insertions(+), 433 deletions(-) create mode 100644 src/webhook.d diff --git a/Makefile.in b/Makefile.in index 3cab7aeb8..8bc177526 100644 --- a/Makefile.in +++ b/Makefile.in @@ -73,6 +73,7 @@ SOURCES = \ src/qxor.d \ src/curlEngine.d \ src/onedrive.d \ + src/webhook.d \ src/sync.d \ src/itemdb.d \ src/sqlite.d \ diff --git a/src/arsd/cgi.d b/src/arsd/cgi.d index 79f5feaad..d9a3e6bdf 100644 --- a/src/arsd/cgi.d +++ b/src/arsd/cgi.d @@ -683,6 +683,7 @@ enum long defaultMaxContentLength = 5_000_000; public import std.string; public import std.stdio; public import std.conv; +import std.concurrency; import std.uri; import std.uni; import std.algorithm.comparison; @@ -3910,14 +3911,16 @@ struct RequestServer { If you want the forking worker process server, you do need to compile with the embedded_httpd_processes config though. +/ - void serveEmbeddedHttp(alias fun, CustomCgi = Cgi, long maxContentLength = defaultMaxContentLength)(ThisFor!fun _this) { + shared void serveEmbeddedHttp(alias fun, T, CustomCgi = Cgi, long maxContentLength = defaultMaxContentLength)(shared T _this) { globalStopFlag = false; static if(__traits(isStaticFunction, fun)) - alias funToUse = fun; + void funToUse(CustomCgi cgi) { + fun(_this, cgi); + } else void funToUse(CustomCgi cgi) { static if(__VERSION__ > 2097) - __traits(child, _this, fun)(cgi); + __traits(child, _inst_this, fun)(_inst_this, cgi); else static assert(0, "Not implemented in your compiler version!"); } auto manager = new ListeningConnectionManager(listeningHost, listeningPort, &doThreadHttpConnection!(CustomCgi, funToUse), null, useFork, numberOfThreads); diff --git a/src/main.d b/src/main.d index 0e1b0700c..019f2e758 100644 --- a/src/main.d +++ b/src/main.d @@ -31,13 +31,14 @@ import syncEngine; import itemdb; import clientSideFiltering; import monitor; +import webhook; // What other constant variables do we require? const int EXIT_RESYNC_REQUIRED = 126; // Class objects ApplicationConfig appConfig; -OneDriveApi oneDriveApiInstance; +OneDriveWebhook oneDriveWebhook; SyncEngine syncEngineInstance; ItemDatabase itemDB; ClientSideFiltering selectiveSync; @@ -411,13 +412,16 @@ int main(string[] cliArgs) { // Initialise the OneDrive API addLogEntry("Attempting to initialise the OneDrive API ...", ["verbose"]); - oneDriveApiInstance = new OneDriveApi(appConfig); + OneDriveApi oneDriveApiInstance = new OneDriveApi(appConfig); appConfig.apiWasInitialised = oneDriveApiInstance.initialise(); if (appConfig.apiWasInitialised) { addLogEntry("The OneDrive API was initialised successfully", ["verbose"]); // Flag that we were able to initalise the API in the application config oneDriveApiInstance.debugOutputConfiguredAPIItems(); + + oneDriveApiInstance.shutdown(); + object.destroy(oneDriveApiInstance); // Need to configure the itemDB and syncEngineInstance for 'sync' and 'non-sync' operations addLogEntry("Opening the item database ...", ["verbose"]); @@ -845,15 +849,16 @@ int main(string[] cliArgs) { addLogEntry("ERROR: The following inotify error was generated: " ~ e.msg); } } - - // Webhook Notification reset to false for this loop - notificationReceived = false; // Check for notifications pushed from Microsoft to the webhook if (webhookEnabled) { // Create a subscription on the first run, or renew the subscription // on subsequent runs when it is about to expire. - oneDriveApiInstance.createOrRenewSubscription(); + if (oneDriveWebhook is null) { + oneDriveWebhook = new OneDriveWebhook(thisTid, appConfig); + oneDriveWebhook.serve(); + } else + oneDriveWebhook.createOrRenewSubscription(); } // Get the current time this loop is starting @@ -996,19 +1001,21 @@ int main(string[] cliArgs) { if(filesystemMonitor.initialised || webhookEnabled) { if(filesystemMonitor.initialised) { - // If local monitor is on + // If local monitor is on and is waiting (previous event was not from webhook) // start the worker and wait for event - filesystemMonitor.send(true); + if (!notificationReceived) + filesystemMonitor.send(true); } if(webhookEnabled) { // if onedrive webhook is enabled // update sleep time based on renew interval - Duration nextWebhookCheckDuration = oneDriveApiInstance.getNextExpirationCheckDuration(); + Duration nextWebhookCheckDuration = oneDriveWebhook.getNextExpirationCheckDuration(); if (nextWebhookCheckDuration < sleepTime) { sleepTime = nextWebhookCheckDuration; addLogEntry("Update sleeping time to " ~ to!string(sleepTime), ["debug"]); } + // Webhook Notification reset to false for this loop notificationReceived = false; } @@ -1034,17 +1041,17 @@ int main(string[] cliArgs) { // do not contain any actual changes, and we will always rely do the // delta endpoint to sync to latest. Therefore, only one sync run is // good enough to catch up for multiple notifications. - int signalCount = notificationReceived ? 1 : 0; - for (;; signalCount++) { - signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); - if (signalExists) { - notificationReceived = true; - } else { - if (notificationReceived) { + if (notificationReceived) { + int signalCount = 1; + while (true) { + signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); + if (signalExists) { + signalCount++; + } else { addLogEntry("Received " ~ to!string(signalCount) ~ " refresh signals from the webhook"); oneDriveWebhookCallback(); + break; } - break; } } @@ -1081,11 +1088,10 @@ void performStandardExitProcess(string scopeCaller = null) { addLogEntry("Running performStandardExitProcess due to: " ~ scopeCaller, ["debug"]); } - // Shutdown the OneDrive API instance - if (oneDriveApiInstance !is null) { - addLogEntry("Shutdown OneDrive API instance", ["debug"]); - oneDriveApiInstance.shutdown(); - object.destroy(oneDriveApiInstance); + // Shutdown the OneDrive Webhook instance + if (oneDriveWebhook !is null) { + oneDriveWebhook.stop(); + object.destroy(oneDriveWebhook); } // Shutdown the sync engine @@ -1135,7 +1141,7 @@ void performStandardExitProcess(string scopeCaller = null) { addLogEntry("Setting ALL Class Objects to null due to failure scope", ["debug"]); itemDB = null; appConfig = null; - oneDriveApiInstance = null; + oneDriveWebhook = null; selectiveSync = null; syncEngineInstance = null; } else { diff --git a/src/onedrive.d b/src/onedrive.d index 745b55118..8b73b7c5a 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -22,9 +22,6 @@ import std.uri; import std.array; // Required for webhooks -import arsd.cgi; -import std.concurrency; -import core.atomic : atomicOp; import std.uuid; // What other modules that we have created do we need to import? @@ -56,116 +53,10 @@ class OneDriveException: Exception { } } -class OneDriveWebhook { - // We need OneDriveWebhook.serve to be a static function, otherwise we would hit the member function - // "requires a dual-context, which is deprecated" warning. The root cause is described here: - // - https://issues.dlang.org/show_bug.cgi?id=5710 - // - https://forum.dlang.org/post/fkyppfxzegenniyzztos@forum.dlang.org - // The problem is deemed a bug and should be fixed in the compilers eventually. The singleton stuff - // could be undone when it is fixed. - // - // Following the singleton pattern described here: https://wiki.dlang.org/Low-Lock_Singleton_Pattern - // Cache instantiation flag in thread-local bool - // Thread local - private static bool instantiated_; - private RequestServer server; - - // Thread global - private __gshared OneDriveWebhook instance_; - - private string host; - private ushort port; - private Tid parentTid; - private shared uint count; - private bool started; - - static OneDriveWebhook getOrCreate(string host, ushort port, Tid parentTid) { - if (!instantiated_) { - synchronized(OneDriveWebhook.classinfo) { - if (!instance_) { - instance_ = new OneDriveWebhook(host, port, parentTid); - } - - instantiated_ = true; - } - } - - return instance_; - } - - private this(string host, ushort port, Tid parentTid) { - this.host = host; - this.port = port; - this.parentTid = parentTid; - this.count = 0; - } - - void serve() { - spawn(&serveStatic); - this.started = true; - addLogEntry("Started webhook server"); - } - - void stop() { - if (this.started) { - server.stop(); - this.started = false; - } - addLogEntry("Stopped webhook server"); - object.destroy(server); - } - - // The static serve() is necessary because spawn() does not like instance methods - private static void serveStatic() { - // we won't create the singleton instance if it hasn't been created already - // such case is a bug which should crash the program and gets fixed - instance_.serveImpl(); - } - - // The static handle() is necessary to work around the dual-context warning mentioned above - private static void handle(Cgi cgi) { - // we won't create the singleton instance if it hasn't been created already - // such case is a bug which should crash the program and gets fixed - instance_.handleImpl(cgi); - } - - private void serveImpl() { - server = RequestServer(host, port); - server.serveEmbeddedHttp!handle(); - } - - private void handleImpl(Cgi cgi) { - if (debugHTTPResponseOutput) { - addLogEntry("Webhook request: " ~ to!string(cgi.requestMethod) ~ " " ~ to!string(cgi.requestUri)); - if (!cgi.postBody.empty) { - addLogEntry("Webhook post body: " ~ to!string(cgi.postBody)); - } - } - - cgi.setResponseContentType("text/plain"); - - if ("validationToken" in cgi.get) { - // For validation requests, respond with the validation token passed in the query string - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/webhook-receiver-validation-request - cgi.write(cgi.get["validationToken"]); - addLogEntry("Webhook: handled validation request"); - } else { - // Notifications don't include any information about the changes that triggered them. - // Put a refresh signal in the queue and let the main monitor loop process it. - // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/using-webhooks - count.atomicOp!"+="(1); - send(parentTid, to!ulong(count)); - cgi.write("OK"); - addLogEntry("Webhook: sent refresh signal #" ~ to!string(count)); - } - } -} - class OneDriveApi { // Class variables ApplicationConfig appConfig; CurlEngine curlEngine; - OneDriveWebhook webhook; string clientId = ""; string companyName = ""; @@ -179,20 +70,14 @@ class OneDriveApi { string itemByPathUrl = ""; string siteSearchUrl = ""; string siteDriveUrl = ""; + string subscriptionUrl = ""; string tenantId = ""; string authScope = ""; const(char)[] refreshToken = ""; bool dryRun = false; bool debugResponse = false; ulong retryAfterValue = 0; - - // Webhook Subscriptions - string subscriptionUrl = ""; - string subscriptionId = ""; - SysTime subscriptionExpiration, subscriptionLastErrorAt; - Duration subscriptionExpirationInterval, subscriptionRenewalInterval, subscriptionRetryInterval; - string notificationUrl = ""; - + this(ApplicationConfig appConfig) { // Configure the class varaible to consume the application configuration this.appConfig = appConfig; @@ -214,14 +99,9 @@ class OneDriveApi { siteSearchUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites?search"; siteDriveUrl = appConfig.globalGraphEndpoint ~ "/v1.0/sites/"; + // Subscriptions subscriptionUrl = appConfig.globalGraphEndpoint ~ "/v1.0/subscriptions"; - subscriptionExpiration = Clock.currTime(UTC()); - subscriptionLastErrorAt = SysTime.fromUnixTime(0); - subscriptionExpirationInterval = dur!"seconds"(appConfig.getValueLong("webhook_expiration_interval")); - subscriptionRenewalInterval = dur!"seconds"(appConfig.getValueLong("webhook_renewal_interval")); - subscriptionRetryInterval = dur!"seconds"(appConfig.getValueLong("webhook_retry_interval")); - notificationUrl = appConfig.getValueString("webhook_public_url"); } // Initialise the OneDrive API class @@ -475,20 +355,6 @@ class OneDriveApi { // Shutdown OneDrive API Curl Engine void shutdown() { - - // Delete subscription if there exists any - try { - deleteSubscription(); - } catch (OneDriveException e) { - logSubscriptionError(e); - } - - // Shutdown webhook server if it is running - if (webhook !is null) { - webhook.stop(); - object.destroy(webhook); - } - // Release curl instance if (curlEngine !is null) { curlEngine.release(); @@ -834,6 +700,47 @@ class OneDriveApi { url = siteDriveUrl ~ site_id ~ "/drives"; return get(url); } + + JSONValue createSubscription(string notificationUrl, SysTime expirationDateTime) { + checkAccessTokenExpired(); + string driveId = appConfig.getValueString("drive_id"); + string url = subscriptionUrl; + + // Create a resource item based on if we have a driveId + string resourceItem; + if (driveId.length) { + resourceItem = "/drives/" ~ driveId ~ "/root"; + } else { + resourceItem = "/me/drive/root"; + } + + // create JSON request to create webhook subscription + const JSONValue request = [ + "changeType": "updated", + "notificationUrl": notificationUrl, + "resource": resourceItem, + "expirationDateTime": expirationDateTime.toISOExtString(), + "clientState": randomUUID().toString() + ]; + curlEngine.http.addRequestHeader("Content-Type", "application/json"); + return post(url, request.toString()); + } + + JSONValue renewSubscription(string subscriptionId, SysTime expirationDateTime) { + string url; + url = subscriptionUrl ~ "/" ~ subscriptionId; + const JSONValue request = [ + "expirationDateTime": expirationDateTime.toISOExtString() + ]; + curlEngine.http.addRequestHeader("Content-Type", "application/json"); + return post(url, request.toString()); + } + + void deleteSubscription(string subscriptionId) { + string url; + url = subscriptionUrl ~ "/" ~ subscriptionId; + performDelete(url); + } // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/api/driveitem_get_content void downloadById(const(char)[] driveId, const(char)[] id, string saveToPath, long fileSize) { @@ -893,277 +800,7 @@ class OneDriveApi { retryAfterValue = 0; } - // Create a new subscription or renew the existing subscription - void createOrRenewSubscription() { - checkAccessTokenExpired(); - - // Kick off the webhook server first - if (webhook is null) { - webhook = OneDriveWebhook.getOrCreate( - appConfig.getValueString("webhook_listening_host"), - to!ushort(appConfig.getValueLong("webhook_listening_port")), - thisTid - ); - webhook.serve(); - } - - auto elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; - if (elapsed < subscriptionRetryInterval) { - return; - } - - try { - if (!hasValidSubscription()) { - createSubscription(); - } else if (isSubscriptionUpForRenewal()) { - renewSubscription(); - } - } catch (OneDriveException e) { - logSubscriptionError(e); - subscriptionLastErrorAt = Clock.currTime(UTC()); - addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); - } catch (JSONException e) { - addLogEntry("ERROR: Unexpected JSON error when attempting to validate subscription: " ~ e.msg); - subscriptionLastErrorAt = Clock.currTime(UTC()); - addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); - } - } - - // Return the duration to next subscriptionExpiration check - Duration getNextExpirationCheckDuration() { - SysTime now = Clock.currTime(UTC()); - if (hasValidSubscription()) { - Duration elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; - // Check if we are waiting for the next retry - if (elapsed < subscriptionRetryInterval) - return subscriptionRetryInterval - elapsed; - else - return subscriptionExpiration - now - subscriptionRenewalInterval; - } - else - return subscriptionRetryInterval; - } - // Private functions - private bool hasValidSubscription() { - return !subscriptionId.empty && subscriptionExpiration > Clock.currTime(UTC()); - } - - private bool isSubscriptionUpForRenewal() { - return subscriptionExpiration < Clock.currTime(UTC()) + subscriptionRenewalInterval; - } - - private void createSubscription() { - addLogEntry("Initializing subscription for updates ..."); - - auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - string driveId = appConfig.getValueString("drive_id"); - string url = subscriptionUrl; - - // Create a resource item based on if we have a driveId - string resourceItem; - if (driveId.length) { - resourceItem = "/drives/" ~ driveId ~ "/root"; - } else { - resourceItem = "/me/drive/root"; - } - - // create JSON request to create webhook subscription - const JSONValue request = [ - "changeType": "updated", - "notificationUrl": notificationUrl, - "resource": resourceItem, - "expirationDateTime": expirationDateTime.toISOExtString(), - "clientState": randomUUID().toString() - ]; - curlEngine.http.addRequestHeader("Content-Type", "application/json"); - - try { - JSONValue response = post(url, request.toString()); - - // Save important subscription metadata including id and expiration - subscriptionId = response["id"].str; - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); - addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); - } catch (OneDriveException e) { - if (e.httpStatusCode == 409) { - // Take over an existing subscription on HTTP 409. - // - // Sample 409 error: - // { - // "error": { - // "code": "ObjectIdentifierInUse", - // "innerError": { - // "client-request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d", - // "date": "2023-09-26T09:27:45", - // "request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d" - // }, - // "message": "Subscription Id c0bba80e-57a3-43a7-bac2-e6f525a76e7c already exists for the requested combination" - // } - // } - - // Make sure the error code is "ObjectIdentifierInUse" - try { - if (e.error["error"]["code"].str != "ObjectIdentifierInUse") { - throw e; - } - } catch (JSONException jsonEx) { - throw e; - } - - // Extract the existing subscription id from the error message - import std.regex; - auto idReg = ctRegex!(r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", "i"); - auto m = matchFirst(e.error["error"]["message"].str, idReg); - if (!m) { - throw e; - } - - // Save the subscription id and renew it immediately since we don't know the expiration timestamp - subscriptionId = m[0]; - addLogEntry("Found existing subscription " ~ subscriptionId); - renewSubscription(); - } else { - throw e; - } - } - } - - private void renewSubscription() { - addLogEntry("Renewing subscription for updates ..."); - - auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; - string url; - url = subscriptionUrl ~ "/" ~ subscriptionId; - const JSONValue request = [ - "expirationDateTime": expirationDateTime.toISOExtString() - ]; - curlEngine.http.addRequestHeader("Content-Type", "application/json"); - - try { - JSONValue response = patch(url, request.toString()); - - // Update subscription expiration from the response - subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); - addLogEntry("Renewed subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); - } catch (OneDriveException e) { - if (e.httpStatusCode == 404) { - addLogEntry("The subscription is not found on the server. Recreating subscription ..."); - subscriptionId = null; - subscriptionExpiration = Clock.currTime(UTC()); - createSubscription(); - } else { - throw e; - } - } - } - - private void deleteSubscription() { - if (!hasValidSubscription()) { - addLogEntry("No valid Microsoft OneDrive webhook subscription to delete", ["debug"]); - return; - } - - string url; - url = subscriptionUrl ~ "/" ~ subscriptionId; - performDelete(url); - addLogEntry("Deleted Microsoft OneDrive webhook subscription", ["debug"]); - } - - private void logSubscriptionError(OneDriveException e) { - if (e.httpStatusCode == 400) { - // Log known 400 error where Microsoft cannot get a 200 OK from the webhook endpoint - // - // Sample 400 error: - // { - // "error": { - // "code": "InvalidRequest", - // "innerError": { - // "client-request-id": "", - // "date": "", - // "request-id": "" - // }, - // "message": "Subscription validation request failed. Notification endpoint must respond with 200 OK to validation request." - // } - // } - - try { - if (e.error["error"]["code"].str == "InvalidRequest") { - import std.regex; - auto msgReg = ctRegex!(r"Subscription validation request failed", "i"); - auto m = matchFirst(e.error["error"]["message"].str, msgReg); - if (m) { - addLogEntry("ERROR: Cannot create or renew subscription: Microsoft did not get 200 OK from the webhook endpoint."); - return; - } - } - } catch (JSONException) { - // fallthrough - } - } else if (e.httpStatusCode == 401) { - // Log known 401 error where authentication failed - // - // Sample 401 error: - // { - // "error": { - // "code": "ExtensionError", - // "innerError": { - // "client-request-id": "", - // "date": "", - // "request-id": "" - // }, - // "message": "Operation: Create; Exception: [Status Code: Unauthorized; Reason: Authentication failed]" - // } - // } - - try { - if (e.error["error"]["code"].str == "ExtensionError") { - import std.regex; - auto msgReg = ctRegex!(r"Authentication failed", "i"); - auto m = matchFirst(e.error["error"]["message"].str, msgReg); - if (m) { - addLogEntry("ERROR: Cannot create or renew subscription: Authentication failed."); - return; - } - } - } catch (JSONException) { - // fallthrough - } - } else if (e.httpStatusCode == 403) { - // Log known 403 error where the number of subscriptions on item has exceeded limit - // - // Sample 403 error: - // { - // "error": { - // "code": "ExtensionError", - // "innerError": { - // "client-request-id": "", - // "date": "", - // "request-id": "" - // }, - // "message": "Operation: Create; Exception: [Status Code: Forbidden; Reason: Number of subscriptions on item has exceeded limit]" - // } - // } - try { - if (e.error["error"]["code"].str == "ExtensionError") { - import std.regex; - auto msgReg = ctRegex!(r"Number of subscriptions on item has exceeded limit", "i"); - auto m = matchFirst(e.error["error"]["message"].str, msgReg); - if (m) { - addLogEntry("ERROR: Cannot create or renew subscription: Number of subscriptions has exceeded limit."); - return; - } - } - } catch (JSONException) { - // fallthrough - } - } - - // Log detailed message for unknown errors - addLogEntry("ERROR: Cannot create or renew subscription."); - displayOneDriveErrorMessage(e.msg, getFunctionName!({})); - } - private void addAccessTokenHeader() { curlEngine.http.addRequestHeader("Authorization", appConfig.accessToken); } diff --git a/src/webhook.d b/src/webhook.d new file mode 100644 index 000000000..065e6fb64 --- /dev/null +++ b/src/webhook.d @@ -0,0 +1,339 @@ +module webhook; + +// What does this module require to function? +import core.atomic : atomicOp; +import std.datetime; +import std.concurrency; +import std.json; + +// What other modules that we have created do we need to import? +import arsd.cgi; +import config; +import onedrive; +import log; +import util; + +class OneDriveWebhook { + private RequestServer server; + private string host; + private ushort port; + private Tid parentTid; + private bool started; + + private ApplicationConfig appConfig; + private OneDriveApi oneDriveApiInstance; + string subscriptionId = ""; + SysTime subscriptionExpiration, subscriptionLastErrorAt; + Duration subscriptionExpirationInterval, subscriptionRenewalInterval, subscriptionRetryInterval; + string notificationUrl = ""; + + private uint count; + + this(Tid parentTid, ApplicationConfig appConfig) { + this.host = appConfig.getValueString("webhook_listening_host"); + this.port = to!ushort(appConfig.getValueLong("webhook_listening_port")); + this.parentTid = parentTid; + this.appConfig = appConfig; + + subscriptionExpiration = Clock.currTime(UTC()); + subscriptionLastErrorAt = SysTime.fromUnixTime(0); + subscriptionExpirationInterval = dur!"seconds"(appConfig.getValueLong("webhook_expiration_interval")); + subscriptionRenewalInterval = dur!"seconds"(appConfig.getValueLong("webhook_renewal_interval")); + subscriptionRetryInterval = dur!"seconds"(appConfig.getValueLong("webhook_retry_interval")); + notificationUrl = appConfig.getValueString("webhook_public_url"); + } + + // The static serve() is necessary because spawn() does not like instance methods + void serve() { + if (this.started) + return; + this.started = true; + this.count = 0; + + server.listeningHost = this.host; + server.listeningPort = this.port; + + spawn(&serveImpl, cast(shared) this); + addLogEntry("Started webhook server"); + + // Subscriptions + oneDriveApiInstance = new OneDriveApi(this.appConfig); + oneDriveApiInstance.initialise(); + + createOrRenewSubscription(); + } + + void stop() { + if (!this.started) + return; + server.stop(); + this.started = false; + + addLogEntry("Stopped webhook server"); + object.destroy(server); + + // Delete subscription if there exists any + try { + deleteSubscription(); + } catch (OneDriveException e) { + logSubscriptionError(e); + } + oneDriveApiInstance.shutdown(); + object.destroy(oneDriveApiInstance); + } + + private static void handle(shared OneDriveWebhook _this, Cgi cgi) { + if (debugHTTPResponseOutput) { + addLogEntry("Webhook request: " ~ to!string(cgi.requestMethod) ~ " " ~ to!string(cgi.requestUri)); + if (!cgi.postBody.empty) { + addLogEntry("Webhook post body: " ~ to!string(cgi.postBody)); + } + } + + cgi.setResponseContentType("text/plain"); + + if ("validationToken" in cgi.get) { + // For validation requests, respond with the validation token passed in the query string + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/webhook-receiver-validation-request + cgi.write(cgi.get["validationToken"]); + addLogEntry("Webhook: handled validation request"); + } else { + // Notifications don't include any information about the changes that triggered them. + // Put a refresh signal in the queue and let the main monitor loop process it. + // https://docs.microsoft.com/en-us/onedrive/developer/rest-api/concepts/using-webhooks + _this.count.atomicOp!"+="(1); + send(cast()_this.parentTid, to!ulong(_this.count)); + cgi.write("OK"); + addLogEntry("Webhook: sent refresh signal #" ~ to!string(_this.count)); + } + } + + private static void serveImpl(shared OneDriveWebhook _this) { + _this.server.serveEmbeddedHttp!(handle, OneDriveWebhook)(_this); + } + + // Create a new subscription or renew the existing subscription + void createOrRenewSubscription() { + auto elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; + if (elapsed < subscriptionRetryInterval) { + return; + } + + try { + if (!hasValidSubscription()) { + createSubscription(); + } else if (isSubscriptionUpForRenewal()) { + renewSubscription(); + } + } catch (OneDriveException e) { + logSubscriptionError(e); + subscriptionLastErrorAt = Clock.currTime(UTC()); + addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); + } catch (JSONException e) { + addLogEntry("ERROR: Unexpected JSON error when attempting to validate subscription: " ~ e.msg); + subscriptionLastErrorAt = Clock.currTime(UTC()); + addLogEntry("Will retry creating or renewing subscription in " ~ to!string(subscriptionRetryInterval)); + } + } + + // Return the duration to next subscriptionExpiration check + Duration getNextExpirationCheckDuration() { + SysTime now = Clock.currTime(UTC()); + if (hasValidSubscription()) { + Duration elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; + // Check if we are waiting for the next retry + if (elapsed < subscriptionRetryInterval) + return subscriptionRetryInterval - elapsed; + else + return subscriptionExpiration - now - subscriptionRenewalInterval; + } + else + return subscriptionRetryInterval; + } + + private bool hasValidSubscription() { + return !subscriptionId.empty && subscriptionExpiration > Clock.currTime(UTC()); + } + + private bool isSubscriptionUpForRenewal() { + return subscriptionExpiration < Clock.currTime(UTC()) + subscriptionRenewalInterval; + } + + private void createSubscription() { + addLogEntry("Initializing subscription for updates ..."); + + auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; + try { + JSONValue response = oneDriveApiInstance.createSubscription(notificationUrl, expirationDateTime); + // Save important subscription metadata including id and expiration + subscriptionId = response["id"].str; + subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); + } catch (OneDriveException e) { + if (e.httpStatusCode == 409) { + // Take over an existing subscription on HTTP 409. + // + // Sample 409 error: + // { + // "error": { + // "code": "ObjectIdentifierInUse", + // "innerError": { + // "client-request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d", + // "date": "2023-09-26T09:27:45", + // "request-id": "615af209-467a-4ab7-8eff-27c1d1efbc2d" + // }, + // "message": "Subscription Id c0bba80e-57a3-43a7-bac2-e6f525a76e7c already exists for the requested combination" + // } + // } + + // Make sure the error code is "ObjectIdentifierInUse" + try { + if (e.error["error"]["code"].str != "ObjectIdentifierInUse") { + throw e; + } + } catch (JSONException jsonEx) { + throw e; + } + + // Extract the existing subscription id from the error message + import std.regex; + auto idReg = ctRegex!(r"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", "i"); + auto m = matchFirst(e.error["error"]["message"].str, idReg); + if (!m) { + throw e; + } + + // Save the subscription id and renew it immediately since we don't know the expiration timestamp + subscriptionId = m[0]; + addLogEntry("Found existing subscription " ~ subscriptionId); + renewSubscription(); + } else { + throw e; + } + } + } + + private void renewSubscription() { + addLogEntry("Renewing subscription for updates ..."); + + auto expirationDateTime = Clock.currTime(UTC()) + subscriptionExpirationInterval; + try { + JSONValue response = oneDriveApiInstance.renewSubscription(subscriptionId, expirationDateTime); + + // Update subscription expiration from the response + subscriptionExpiration = SysTime.fromISOExtString(response["expirationDateTime"].str); + addLogEntry("Created new subscription " ~ subscriptionId ~ " with expiration: " ~ to!string(subscriptionExpiration.toISOExtString())); + } catch (OneDriveException e) { + if (e.httpStatusCode == 404) { + addLogEntry("The subscription is not found on the server. Recreating subscription ..."); + subscriptionId = null; + subscriptionExpiration = Clock.currTime(UTC()); + createSubscription(); + } else { + throw e; + } + } + } + + private void deleteSubscription() { + if (!hasValidSubscription()) { + return; + } + oneDriveApiInstance.deleteSubscription(subscriptionId); + addLogEntry("Deleted subscription"); + } + + private void logSubscriptionError(OneDriveException e) { + if (e.httpStatusCode == 400) { + // Log known 400 error where Microsoft cannot get a 200 OK from the webhook endpoint + // + // Sample 400 error: + // { + // "error": { + // "code": "InvalidRequest", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Subscription validation request failed. Notification endpoint must respond with 200 OK to validation request." + // } + // } + + try { + if (e.error["error"]["code"].str == "InvalidRequest") { + import std.regex; + auto msgReg = ctRegex!(r"Subscription validation request failed", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Microsoft did not get 200 OK from the webhook endpoint."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } else if (e.httpStatusCode == 401) { + // Log known 401 error where authentication failed + // + // Sample 401 error: + // { + // "error": { + // "code": "ExtensionError", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Operation: Create; Exception: [Status Code: Unauthorized; Reason: Authentication failed]" + // } + // } + + try { + if (e.error["error"]["code"].str == "ExtensionError") { + import std.regex; + auto msgReg = ctRegex!(r"Authentication failed", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Authentication failed."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } else if (e.httpStatusCode == 403) { + // Log known 403 error where the number of subscriptions on item has exceeded limit + // + // Sample 403 error: + // { + // "error": { + // "code": "ExtensionError", + // "innerError": { + // "client-request-id": "", + // "date": "", + // "request-id": "" + // }, + // "message": "Operation: Create; Exception: [Status Code: Forbidden; Reason: Number of subscriptions on item has exceeded limit]" + // } + // } + try { + if (e.error["error"]["code"].str == "ExtensionError") { + import std.regex; + auto msgReg = ctRegex!(r"Number of subscriptions on item has exceeded limit", "i"); + auto m = matchFirst(e.error["error"]["message"].str, msgReg); + if (m) { + addLogEntry("ERROR: Cannot create or renew subscription: Number of subscriptions has exceeded limit."); + return; + } + } + } catch (JSONException) { + // fallthrough + } + } + + // Log detailed message for unknown errors + addLogEntry("ERROR: Cannot create or renew subscription."); + displayOneDriveErrorMessage(e.msg, getFunctionName!({})); + } +} From 0404ee2e371464cf4501750036b468fb477234eb Mon Sep 17 00:00:00 2001 From: abraunegg Date: Wed, 21 Feb 2024 06:41:22 +1100 Subject: [PATCH 064/305] Add architecture documentation to 'alpha-5' (#2639) * Add Client Architecture Documentation --- docs/application-security.md | 4 +- docs/client-architecture.md | 314 ++++++++++++++++++ docs/puml/applyPotentiallyChangedItem.png | Bin 0 -> 76323 bytes docs/puml/applyPotentiallyChangedItem.puml | 48 +++ docs/puml/applyPotentiallyNewLocalItem.png | Bin 0 -> 144800 bytes docs/puml/applyPotentiallyNewLocalItem.puml | 90 +++++ docs/puml/client_side_filtering_rules.png | Bin 0 -> 94791 bytes docs/puml/client_side_filtering_rules.puml | 71 ++++ ...ode_functional_component_relationships.png | Bin 0 -> 121607 bytes ...de_functional_component_relationships.puml | 78 +++++ docs/puml/conflict_handling_default.png | Bin 0 -> 65635 bytes docs/puml/conflict_handling_default.puml | 31 ++ .../puml/conflict_handling_default_resync.png | Bin 0 -> 80416 bytes .../conflict_handling_default_resync.puml | 35 ++ .../conflict_handling_local-first_default.png | Bin 0 -> 101311 bytes ...conflict_handling_local-first_default.puml | 62 ++++ .../conflict_handling_local-first_resync.png | Bin 0 -> 127450 bytes .../conflict_handling_local-first_resync.puml | 70 ++++ docs/puml/database_schema.png | Bin 0 -> 36582 bytes docs/puml/database_schema.puml | 37 +++ docs/puml/downloadFile.png | Bin 0 -> 84579 bytes docs/puml/downloadFile.puml | 63 ++++ docs/puml/high_level_operational_process.png | Bin 0 -> 82658 bytes docs/puml/high_level_operational_process.puml | 55 +++ docs/puml/is_item_in_sync.png | Bin 0 -> 111425 bytes docs/puml/is_item_in_sync.puml | 79 +++++ docs/puml/main_activity_flows.png | Bin 0 -> 162036 bytes docs/puml/main_activity_flows.puml | 81 +++++ docs/puml/uploadFile.png | Bin 0 -> 97884 bytes docs/puml/uploadFile.puml | 62 ++++ docs/puml/uploadModifiedFile.png | Bin 0 -> 85069 bytes docs/puml/uploadModifiedFile.puml | 56 ++++ docs/usage.md | 2 +- 33 files changed, 1235 insertions(+), 3 deletions(-) create mode 100644 docs/client-architecture.md create mode 100644 docs/puml/applyPotentiallyChangedItem.png create mode 100644 docs/puml/applyPotentiallyChangedItem.puml create mode 100644 docs/puml/applyPotentiallyNewLocalItem.png create mode 100644 docs/puml/applyPotentiallyNewLocalItem.puml create mode 100644 docs/puml/client_side_filtering_rules.png create mode 100644 docs/puml/client_side_filtering_rules.puml create mode 100644 docs/puml/code_functional_component_relationships.png create mode 100644 docs/puml/code_functional_component_relationships.puml create mode 100644 docs/puml/conflict_handling_default.png create mode 100644 docs/puml/conflict_handling_default.puml create mode 100644 docs/puml/conflict_handling_default_resync.png create mode 100644 docs/puml/conflict_handling_default_resync.puml create mode 100644 docs/puml/conflict_handling_local-first_default.png create mode 100644 docs/puml/conflict_handling_local-first_default.puml create mode 100644 docs/puml/conflict_handling_local-first_resync.png create mode 100644 docs/puml/conflict_handling_local-first_resync.puml create mode 100644 docs/puml/database_schema.png create mode 100644 docs/puml/database_schema.puml create mode 100644 docs/puml/downloadFile.png create mode 100644 docs/puml/downloadFile.puml create mode 100644 docs/puml/high_level_operational_process.png create mode 100644 docs/puml/high_level_operational_process.puml create mode 100644 docs/puml/is_item_in_sync.png create mode 100644 docs/puml/is_item_in_sync.puml create mode 100644 docs/puml/main_activity_flows.png create mode 100644 docs/puml/main_activity_flows.puml create mode 100644 docs/puml/uploadFile.png create mode 100644 docs/puml/uploadFile.puml create mode 100644 docs/puml/uploadModifiedFile.png create mode 100644 docs/puml/uploadModifiedFile.puml diff --git a/docs/application-security.md b/docs/application-security.md index e0fad5a24..96d07566f 100644 --- a/docs/application-security.md +++ b/docs/application-security.md @@ -71,9 +71,9 @@ When using the OneDrive Client for Linux, the above authentication scopes will b This is similar to the Microsoft Windows OneDrive Client: -![Linux Authentication to Microsoft OneDrive](./puml/onedrive_windows_authentication.png) +![Windows Authentication to Microsoft OneDrive](./puml/onedrive_windows_authentication.png) -In a business environment, where IT Staff need to 'approve' the OneDrive Client for Linux, can do so knowing that the client is safe to use. The only concernt that the IT Staff should have is how is the client device, where the OneDrive Client for Linux is running, is being secured, as in a corporate setting, Windows would be controlled by Active Directory and applicable Group Policy Objects (GPO's) to ensure the security of corporate data on the client device. It is out of scope for this client to handle how Linux devices are being secure. +In a business setting, IT staff who need to authorise the use of the OneDrive Client for Linux in their environment can be assured of its safety. The primary concern for IT staff should be securing the device running the OneDrive Client for Linux. Unlike in a corporate environment where Windows devices are secured through Active Directory and Group Policy Objects (GPOs) to protect corporate data on the device, it is beyond the responsibility of this client to manage security on Linux devices. ## Configuring read-only access to your OneDrive data In some situations, it may be desirable to configure the OneDrive Client for Linux totally in read-only operation. diff --git a/docs/client-architecture.md b/docs/client-architecture.md new file mode 100644 index 000000000..a6d2fcf7c --- /dev/null +++ b/docs/client-architecture.md @@ -0,0 +1,314 @@ +# OneDrive Client for Linux Application Architecture + +## How does the client work at a high level? + +The diagram below outlines at a high level the operational workflow of the OneDrive Client for Linux, demonstrating how it interacts with the OneDrive API to maintain synchronisation, manage local and cloud data integrity, and ensure that user data is accurately mirrored between the local filesystem and OneDrive cloud storage. + +![High Level Application Sequence](./puml/high_level_operational_process.png) + +The above process involves several high level key stages: + +1. **Access Token Validation:** Initially, the client validates its access and the existing access token, refreshing it if necessary. This step ensures that the client has the required permissions to interact with the OneDrive API. + +2. **Query Microsoft OneDrive API:** The client queries the /delta API endpoint of Microsoft OneDrive, which returns JSON responses. The /delta endpoint is particularly used for syncing changes, helping the client to identify any updates in the OneDrive storage. + +3. **Process JSON Responses:** The client processes each JSON response to determine if it represents a 'root' or 'deleted' item. Items not marked as 'root' or 'deleted' are temporarily stored for further processing. For 'root' or 'deleted' items, the client processes them immediately, otherwise, the client evaluates the items against client-side filtering rules to decide whether to discard them or to process and save them in the local database cache for actions like creating directories or downloading files. + +4. **Local Cache Database Processing for Data Integrity:** The client processes its local cache database to check for data integrity and differences compared to the OneDrive storage. If differences are found, such as a file or folder change including deletions, the client uploads these changes to OneDrive. Responses from the API, including item metadata, are saved to the local cache database. + +5. **Local Filesystem Scanning:** The client scans the local filesystem for new files or folders. Each new item is checked against client-side filtering rules. If an item passes the filtering, it is uploaded to OneDrive. Otherwise, it is discarded if it doesn't meet the filtering criteria. + +6. **Final Data True-Up:** Lastly, the client queries the /delta link for a final true-up, processing any further online JSON changes if required. This ensures that the local and OneDrive storages are fully synchronised. + +## What are the operational modes of the client? + +There are 2 main operational modes that the client can utilise: + +1. Standalone sync mode that performs a single sync action against Microsoft OneDrive. This method is used when you utilise `--sync`. +2. Ongoing sync mode that continuously syncs your data with Microsoft OneDrive and utilises 'inotify' to watch for local system changes. This method is used when you utilise `--monitor`. + +By default, both modes consider all data stored online within Microsoft OneDrive as the 'source-of-truth' - that is, what is online, is the correct data (file version, file content, file timestamp, folder structure and so on). This consideration also matches how the Microsoft OneDrive Client for Windows operates. + +However, in standalone mode (`--sync`), you can *change* what reference the client will use as the 'source-of-truth' for your data by using the `--local-first` option so that the application will look at your local files *first* and consider your local files as your 'source-of-truth' to replicate that directory structure to Microsoft OneDrive. + +**Critical Advisory:** Please be aware that if you designate a network mount point (such as NFS, Windows Network Share, or Samba Network Share) as your `sync_dir`, this setup inherently lacks 'inotify' support. Support for 'inotify' is essential for real-time tracking of file changes, which means that the client's 'Monitor Mode' cannot immediately detect changes in files located on these network shares. Instead, synchronisation between your local filesystem and Microsoft OneDrive will occur at intervals specified by the `monitor_interval` setting. This limitation regarding 'inotify' support on network mount points like NFS or Samba is beyond the control of this client. + +## OneDrive Client for Linux High Level Activity Flows + +The diagrams below show the high level process flow and decision making when running the application + +### Main functional activity flows +![Main Activity](./puml/main_activity_flows.png) + +### Processing a potentially new local item +![applyPotentiallyNewLocalItem](./puml/applyPotentiallyNewLocalItem.png) + +### Processing a potentially changed local item +![applyPotentiallyChangedItem](./puml/applyPotentiallyChangedItem.png) + +### Download a file from Microsoft OneDrive +![downloadFile](./puml/downloadFile.png) + +### Upload a modified file to Microsoft OneDrive +![uploadModifiedFile](./puml/uploadModifiedFile.png) + +### Upload a new local file to Microsoft OneDrive +![uploadFile](./puml/uploadFile.png) + +### Determining if an 'item' is syncronised between Microsoft OneDrive and the local file system +![Item Sync Determination](./puml/is_item_in_sync.png) + +### Determining if an 'item' is excluded due to 'Client Side Filtering' rules + +By default, the OneDrive Client for Linux will sync all files and folders between Microsoft OneDrive and the local filesystem. + +Client Side Filtering in the context of this client refers to user-configured rules that determine what files and directories the client should upload or download from Microsoft OneDrive. These rules are crucial for optimising synchronisation, especially when dealing with large numbers of files or specific file types. The OneDrive Client for Linux offers several configuration options to facilitate this: + +* **skip_dir:** This option allows the user to specify directories that should not be synchronised with OneDrive. It's particularly useful for omitting large or irrelevant directories from the sync process. + +* **skip_dotfiles:** Dotfiles, usually configuration files or scripts, can be excluded from the sync. This is useful for users who prefer to keep these files local. + +* **skip_file:** Specific files can be excluded from synchronisation using this option. It provides flexibility in selecting which files are essential for cloud storage. + +* **skip_symlinks:** Symlinks often point to files outside the OneDrive directory or to locations that are not relevant for cloud storage. This option prevents them from being included in the sync. + +This exclusion process can be illustrated by the following activity diagram. A 'true' return value means that the path being evaluated needs to be excluded: + +![Client Side Filtering Determination](./puml/client_side_filtering_rules.png) + +## File conflict handling - default operational modes + +When using the default operational modes (`--sync` or `--monitor`) the client application is conforming to how the Microsoft Windows OneDrive client operates in terms of resolving conflicts for files. + +Additionally, when using `--resync` this conflict resolution can differ slightly, as, when using `--resync` you are *deleting* the known application state, thus, the application has zero reference as to what was previously in sync with the local file system. + +Due to this factor, when using `--resync` the online source is always going to be considered accurate and the source-of-truth, regardless of the local file state, file timestamp or file hash. + +### Default Operational Modes - Conflict Handling + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync` +3. Modify file online +4. Modify file locally with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync` + +![conflict_handling_default](./puml/conflict_handling_default.png) + +#### Evidence of Conflict Handling +``` +... +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +The local file to replace (./1.txt) has been modified locally since the last download. Renaming it to avoid potential local data loss. +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt +Downloading file ./1.txt ... done +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Processing α +... +The file has not changed +Processing เอกสาร +The directory has not changed +Processing 1.txt +The file has not changed +Scanning the local file system '~/OneDrive' for new data to upload +... +New items to upload to OneDrive: 1 +Total New Data to Upload: 52 Bytes +Uploading new file ./1-onedrive-client-dev.txt ... done. +Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + +### Default Operational Modes - Conflict Handling with --resync + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync` +3. Modify file online +4. Modify file locally with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync --resync` + +![conflict_handling_default_resync](./puml/conflict_handling_default_resync.png) + +#### Evidence of Conflict Handling +``` +... +Deleting the saved application sync status ... +Using IPv4 and IPv6 (if configured) for all network operations +Checking Application Version ... +... +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 14 +Finished processing /delta JSON response from the OneDrive API +Processing 13 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Local file time discrepancy detected: ./1.txt +This local file has a different modified time 2024-Feb-19 19:32:55Z (UTC) when compared to remote modified time 2024-Feb-19 19:32:36Z (UTC) +The local file has a different hash when compared to remote file hash +Local item does not exist in local database - replacing with file from OneDrive - failed download? +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt +Number of items to download from OneDrive: 1 +Downloading file ./1.txt ... done +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Processing α +... +Processing เอกสาร +The directory has not changed +Processing 1.txt +The file has not changed +Scanning the local file system '~/OneDrive' for new data to upload +... +New items to upload to OneDrive: 1 +Total New Data to Upload: 52 Bytes +Uploading new file ./1-onedrive-client-dev.txt ... done. +Performing a last examination of the most recent online data within Microsoft OneDrive to complete the reconciliation process +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 2 +Finished processing /delta JSON response from the OneDrive API +Processing 1 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + +## File conflict handling - local-first operational mode + +When using `--local-first` as your operational parameter the client application is now using your local filesystem data as the 'source-of-truth' as to what should be stored online. + +However - Microsoft OneDrive itself, has *zero* acknowledgement of this concept, thus, conflict handling needs to be aligned to how Microsoft OneDrive on other platforms operate, that is, rename the local offending file. + +Additionally, when using `--resync` you are *deleting* the known application state, thus, the application has zero reference as to what was previously in sync with the local file system. + +Due to this factor, when using `--resync` the online source is always going to be considered accurate and the source-of-truth, regardless of the local file state, file timestamp or file hash or use of `--local-first`. + +### Local First Operational Modes - Conflict Handling + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first` +3. Modify file locally with different data|contents +4. Modify file online with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first` + +![conflict_handling_local-first_default](./puml/conflict_handling_local-first_default.png) + +#### Evidence of Conflict Handling +``` +Reading configuration file: /home/alex/.config/onedrive/config +... +Using IPv4 and IPv6 (if configured) for all network operations +Checking Application Version ... +... +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Processing α +The directory has not changed +... +The file has not changed +Processing เอกสาร +The directory has not changed +Processing 1.txt +Local file time discrepancy detected: 1.txt +The file content has changed locally and has a newer timestamp, thus needs to be uploaded to OneDrive +Changed local items to upload to OneDrive: 1 +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: 1.txt -> 1-onedrive-client-dev.txt +Uploading new file 1-onedrive-client-dev.txt ... done. +Scanning the local file system '~/OneDrive' for new data to upload +... +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 3 +Finished processing /delta JSON response from the OneDrive API +Processing 2 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +Downloading file ./1.txt ... done + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + + +### Local First Operational Modes - Conflict Handling with --resync + +#### Scenario +1. Create a local file +2. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first` +3. Modify file locally with different data|contents +4. Modify file online with different data|contents +5. Perform a sync with Microsoft OneDrive using `onedrive --sync --local-first --resync` + +#### Evidence of Conflict Handling +``` +... + +The usage of --resync will delete your local 'onedrive' client state, thus no record of your current 'sync status' will exist. +This has the potential to overwrite local versions of files with perhaps older versions of documents downloaded from OneDrive, resulting in local data loss. +If in doubt, backup your local data before using --resync + +Are you sure you wish to proceed with --resync? [Y/N] y + +Deleting the saved application sync status ... +Using IPv4 and IPv6 (if configured) for all network operations +... +Sync Engine Initialised with new Onedrive API instance +All application operations will be performed in the configured local 'sync_dir' directory: /home/alex/OneDrive +Performing a database consistency and integrity check on locally stored data +Processing DB entries for this Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing ~/OneDrive +The directory has not changed +Scanning the local file system '~/OneDrive' for new data to upload +Skipping item - excluded by sync_list config: ./random_25k_files +OneDrive Client requested to create this directory online: ./α +The requested directory to create was found on OneDrive - skipping creating the directory: ./α +... +New items to upload to OneDrive: 9 +Total New Data to Upload: 49 KB +... +The file we are attemtping to upload as a new file already exists on Microsoft OneDrive: ./1.txt +Skipping uploading this item as a new file, will upload as a modified file (online file already exists): ./1.txt +The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ./1.txt -> ./1-onedrive-client-dev.txt +Uploading new file ./1-onedrive-client-dev.txt ... done. +Fetching /delta response from the OneDrive API for Drive ID: b!bO8V7s9SSk6r7mWHpIjURotN33W1W2tEv3OXV_oFIdQimEdOHR-1So7CqeT1MfHA +Processing API Response Bundle: 1 - Quantity of 'changes|items' in this bundle to process: 15 +Finished processing /delta JSON response from the OneDrive API +Processing 14 applicable changes and items received from Microsoft OneDrive +Processing OneDrive JSON item batch [1/1] to ensure consistent local state +Number of items to download from OneDrive: 1 +Downloading file ./1.txt ... done + +Sync with Microsoft OneDrive is complete +Waiting for all internal threads to complete before exiting application +``` + +## Client Functional Component Architecture Relationships + +The diagram below shows the main functional relationship of application code components, and how these relate to each relevant code module within this application: + +![Functional Code Components](./puml/code_functional_component_relationships.png) + +## Database Schema + +The diagram below shows the database schema that is used within the application + +![Database Schema](./puml/database_schema.png) diff --git a/docs/puml/applyPotentiallyChangedItem.png b/docs/puml/applyPotentiallyChangedItem.png new file mode 100644 index 0000000000000000000000000000000000000000..cbb7f9566e891a39636c52d643d4d3499b3153e4 GIT binary patch literal 76323 zcmd43cRZH=|2}*g2_-6}>`jEC%#0F3W=6KMN>+r-3#Bydm8^tpLP#=_vdP|~kR93c zy6)#m?{_|*-}ipp_v81w|G58Xa9!tlzUK3JJdfjeoljJi<;X~>Nf87glfQCV9YF|# z5o8|&F+Tj}>G#of_=oGB%=LRFwstO7X6E-0IWrqG`y2PnOc`&wFy6X%&(1-VkI&BP zhRr=kYb#z8TWhDLItB#6kGQRI{oX&nNATc2_hZ5}3{?8gP1sWq}#oiHGA%O09}u=&bit0xn8AcsXisQCr5??9qfM|zT@W~?z zc$3JWtbxh;XLdwF$C^k;gOTGz6!h*gid2v45 zyyI;udNq*_vLBzwm#&qB+$2s2A|mo%k|w=|dtz|uws!Q_#Cz2B51-q;xX*4?eyx9b zznPSA#Xi?!jw~F*^gZ&jv-(e41QYKRJejp_SO1lti0>14-asg=^r3eyhUZ;OS2%Oy zAZ6N8!K4nLh#}PTa@qyk%A>`irz}{JqGu^;}F%0rv-;25irv1cRn?PQgj1R&Dqb1{AU% zK2NUF?G{Sp3Au5LV_(Z-e2=?Q&Erq6G8S=P5M8q5odW+-khSGXmOIl+&NnMy#V)E z=Be6^neS#FlznUk8@OoH7sj8B&>S;nr)%+vJXI1d(kZHXs>mneGz-^iwNdxOH##FB zUP(7h6p`$N?&sTh65Bm3CG~+H5yT6TzkE^SzTwP(i#z3)G91S2a3wGPODX2f<7S5` zzcVv4ldgV7-yh z&2+G474PZB=9YS^;Q4{JYqhLVCk>k=7gE!*(uDuf^u%%*~fSH_oTR2=N=Qx_YXACODj-&(s{Du5uw z`}k>)ik_#(xT%F~GRFm|JT{ir*T>_RnGi%O7;X0rB zFXdW_a{2a3v;-}JaN@(mrB-M1o2PD{zV-MJUF%~QK^ZCZ>qG}nypqd(<$>E?-LOND zSHv||2tpS{hIq|iL_c2}xB^4HAAa#_L;(Iv%z$<>pm}3I!sw1R0%62yTbwxY1o-sE7xQ2v`koozKr?%Cdk8hmwC91mI;ujWSC}X z60wVUc5AcM)Tsbkk=G2wZl7X`OO?*d5^!%2Go>h>g3e2-s<)GDJ!>EeV?>+Flw@;@ zz%;w2#wy+4elj_7(0Rz?F*!pg%{s}A7(3djhA4sk;)SDgq9FlJim3ahS zSh(Wu?!LY-1{-3#s~qb%@WF9PHHamQb+cj3$m$BaFUUY)3gB<|cfT?#bbO!99>`A{Q&_`0M>hKu*~PmzbE_=SO5L zvd`32*F;0$vk_un$r0d(9NsX;+OXIvIct%EA$#$++DK#qLPbZtqymckKtzj9Ev zqd8hA$Eqh!61SBfhFKqrw21d8G%R%;tB4wg1oikGxeX za_ga?MCA1P8CqEi;amfbT?Y9U&1E$adHv?{?Cv!|OzB-P-q=t38LpSkP;HKz9#i!o zIGC})t5=u_D|m+lbooiDErC<)iQH4Go1(BX#xK^+(BlAt3w#fm#4>idV16y zIyAMHr@NX5Gw+Uky?y((+v=}a36Br`7UhdgVxJhoX|_E!SLBcAl{jUkN8Vyu#$i{> zArmLaJUedS{`*UcN_@mZg*O4r_N?%N9D@gIX`$_q=UcCIT5;EV`R0uhn==9~qECY1 z8JM3@7VS*Sr;o(*^Yg=exNa|ZInVZxiNgy zZf2p|B=F1Eue>^Wm*4z?&X;X1)boi(mf<(F;4~>D zKED*Q&93`It0}aBTaU-BPb3EFTK5*&&JD?y4c;H|w|C$h8>tSAb9ZLolLVr+cOqsh1(oj~Nxh#svRLyyHqA&7J(u$+!@YlTU3 zD95~gIXh6&HTB-K_DGtVBlsUKQ!qoFG4+BsMpb_f?*ve@0-Ei~kQtw7tFi|YN_ zk7sP(t%l0BR~?u*=4)2E-`Wg_-l3H-_);6nk+lN3LEXPYdKnkINp7Z@E2Re;5bHNvfu)bYBeCJCVxhHD&Xmmq1n1-CuiAKHU|@J>NjlK>xIGxpnW+#`l_ zw}NOPpM`rOQn_ewrMNU@D0D7L=~p>yh>dU3X9G}1zv$FI$5(iBe)K}Kmwl*_t%RUq1uZ*^5NZVtw*pjS^)DBpRN zp>A-{!F)>~dL*1^y(AcGablFDyJT(rbF;)|gJ;d%2o3W^%&G18dROKQGp`#3W!Qmj zxofaJPcJWMX6#qZL?i#hpbuSSO6Gd`=KkBB;%sa(B|4FWHyvpu+~2SC1$qbS3Qa?t zdO!Il`qzi)Z0z=?-fV&3@#O=aHsy}%%UwoBrkgtc!Vr64I|6Dj>*&7Se6yJZE8WxQ z*7O{+vrcS_$ByFytJ+^b!<{5@!C&E>>QWeQh)SHo@Ch?Ga#oy98}ffw9wz< z?&N_pGjqSjC)ZS&-r^R^C)xs=PFS}(Z>~nc+VxoO&`$qa?msXRC~26Y9IGO^tsxt6b~gTE zp7WgH`3^0|uKG9c+k7 z9oYtb8<{wCSB};M+r%&1#@-0xFrHg*`(zPR_2rAlN{_|DWdVz(u;{nXnzpewymY9~ z-TF2bX~fHy-0X$T<6~eL8W*^p#~nS{uNrc1YhATvRsUa}(gc{L0K z8bxL0&O3=}*9|5Q@O`u!3)h}1`2M(pC`@!GhmS@TS90gmNw}~13k~``d6Fw3c}Caw zbGQwpE!Z#Jut9zelt2(SEVdt;Cke74wvrNN+!@KZDDIE^(dRNWGddyFFB7P7&}M7! z>p>D@OAqJP&W3bxq`O^C_z@324F&t|Ua^l*XHE2lFA9Y6Khr>YyuT12@8e zTcc8SZO6nY852|zqBx3TL;J8mUy^(%FMcKR??tJpQU3AyOthK01q!%MLRHtDn|~{R z`l9E;CIP~G4nv=b$QocP_Kk0P9#f(?)oAbhWSLn&g%34`j0XShRls6Bp&?>SS zc>n%=L&Me5QwVbAdsH!W#p{*6V~1u)C-r$tRwK5z9XUq0M>ICyXaL|!?>2?FE+eOm znu^N#i%6`PbDW^{%Ryr3!op4$q*NCc7e_`$vKl(y-!+l&0DSm*arcwAuB*SY@cRn7 z4C_+@p}08m=XP4MvX3^@^r8i=*Om^$&{ggZ9Z@?+@cbkT1hJ>P!*}rG##BZ~&+eDL z?0o6Iu>0CP6f2fbX|%n>x{e7v7)=zE}}(7F$z1Ji*oTkDG`F5+WGycl-}Z9j)f9AJedpM3|GXwD&a zAyH8xH>>;rFHC2UAQjJcn)c1i!h(a14IcgGAcF5&;#KwrxQ^Wc85g%hNbGs3V^&X zlxx17N!)kbT~SDefob%Dg&xZ+K-pmj%(17~r)%5+X9)yiBfjOa_wbUIxe#JniI7k8 zJL?2P%%0B!h!HTGJ@={AHbAG1fMH4R(xi}hK8%T>y zd_1nlB3^vHg24B^e;c7>B>v3G@B82rI#9j5MZZ4nVq)rU_ZIU(l_bzFzkk< z8)!v6@r_9{J~nnBMTH?PQ!DG`Wr;h4j!P(-T&UwtmKE2~c(Amz^y0+{onS7_^m}be zLOnEV`f)aGZ4!>X_18eM8Pt}esW|JRW&M>K+F80|_X|7o+X09{*-#(Jr=qMJk#`|6j84uI zK=3q>0+1BSw`XmSBy}Wc8+mAcsFx|Q9m4k6mM@DKgy9OhXZwruN(cmToqQ%|ZA~zfcd@Tr)`ErD*)7Py+7V zn#vnJ#UfjxYLVZ?SMv-WdM@6Ah(vMl&*H0yQnGO{JKtS!CsK=V6{C(@YExklbGlkq z=kYOpuCHirzNrI?T&O4JP@S*Y%nxLSOGHut5EPUf0>;zW1eWs|qf#XEoC`V0Tz0=oj1T71?N7 zmjRf5_>j=(Q#@DrscSz0>c1tMuU7gF0qV%nqxpGxGmmG;X7>5_wVkqe=+84#JgNU{ z)1&YrtFNJbEt~Df-;esSy8Z=X8gAxo*=pG`>{YI<`wko@bm^OQDqOJ$W@RyRA=S)= znYXl3^B1xm%%pFE+V#fgN8>`jv-FGe%o|@yls{W2KQ88IpjT+6Ms7CBD4;hy9b)xr zitf*7O!a4G#ECg)f3QqUbJ%1*c!6i7cFA^frEyr0!zR>fT==^U0d=F)Z;!<_U}~h! z?4-vE27C*_Vztl?V_5#WrA~?|HsWl<@*|exH3lZIAd+_0GP!4~A{SrDOdgaDcOFw# zM{A1}rCbAK#3F}VZ^ooQUb#4Gv;39DHf#mz{g6JR+0>Cm-L6yvMaZl{MFTb#(`%=7 zdtS3Qx^gxHW--%@;11WD=i0}mm6=MtX%xn_893u)5YTng)!-@aU0<>PoP(&4V}}jZ zv4(wIHZ1p*&4+D?ibU?-vEAv0Xk55fVJ2UpEj+~dKWyuh<$-i;Wz-i@nkps=p_^N~8^#02> zRAcwO@ybZD@}|awqXKva%1-wGtg;T7&E$Cc>OVX6z`}eJwlf4p5r>~wmy3fT40TBe zwR+NY!48pEK*@l?G7mRh-IwX*BLQMiukaR6Hq_Va<=*t2N|s%l%E+3rbc23RtxKJ0aZ8*uj#v-J%)ZSP$ zve=uG3p*6B0WLxM@u+!`!*~<6=&hFA=39x#d`GwA)zK!7eFH_dM$7v(0I1JGHSWGI zQQpJV^@DySt>&NcI9@d16ur@w3E&9WJhghRUg8z)s>KiPylpmA#_mR+ighqVP-d1u z(7^F}r!-N`N2`Ww@>SQ~vzm0kncKx2jjw0z(@~++8M;6Xz>Va0kpCoEF*1F~ zG6VKX|5=m+u_?N1FD7hUfz%qe<^kx1Vlyv2(yg%^yPU&+X?dz+b9p+&Rfna9>|T~` z0niDl^jum5Zi@aRZH;l_eH*`fy$SY1^>W5zR4OYA*pTQ6#!y6h>WzF&!!nl+x%*_OMAReHHG(o8!~@)w-7l z@9XL@_x>M{`gjLW!~=}Y5}-P)0lrFX^K|sL!Fmk;fbQRLH={y%@ju6i=|uyPki*Xg zwQ^%E{;kgkE9FQq0x!5~ZRz(6q<4e4B?M+jY@wAAJbJoCHrv23Oz}Kbk{@(pIgz~9 zXq?Vk?xL=)zT^vdt9p0%nM6c%%D-Qd0^p-~|0 zg*_j~%LdYC!Dn8@z&%OewiY*z5FmCxcAz@;obBMp=g$vr|2%*kRDy^Ebm88Q??HMw z%pyA>WP}(qb8|maVZiemc0p?auf7xOm3I)hZg z_j7b~V$}?>D}%NJ2jy7#kYr1N6`}Om31oI4Ld1uJgv7*rG;gHdrYA#4u_H52j&cMr zNP3Pp#cq(|Bg6qaU!AvuAPh^t&N^zeh@|2niMIY-UqJ35nIzDPACgT(Ud+3X_B#28 zYd*cx&|Mmw;?i!Zu|sb&KHM#scDZmMo_A`a9paqgpL_gMouZ}P4txGjJx6cv;4n*G z(C~&1DlgeTl^4_b^XJ9I2V8G8TdDKOWj#iaim;t#BNs-;$AzqW+n-u%nJX$P-WmK@ zOuL4^mj+K5XN z7Dhw^I|u=6ipmw^i<3tQ5h=pmS@1h}r2EmqlOds@Biz=&0d;05#58>VmLTm>C5T9k z?iQ7P$oHe&mHTta5DL28n^)3#HIZ=LawSEIY@=;0}Ry?I)bXwyOv#?lu+dP9?v@5hfHQJNKyy87Tp z7l%uid`b_1_Z2=iQ72;6}V#rXe>tq>TFjwZqo-*mu~h z;lV!q8x;o8Eco7t|T=&XbV2 zyhc7-<~E+q-!_~t31+}-C_OsWHFhoMpz%_$HQiGq|+Xw|bY{OQ5M8FBG#NWq6FvPpUD%ok%B z`uh24;xEDu=H4C58&v{tb=0!7a}3Su!iuNTGkOc|aF7GRz8qP;C1ZbO@n@sk(gagu z=2uVw%YVMSGu4&DM>30cGdU(G=oQvzroX?R^cWYgcG7i6OhAxvhVo58Ct@3A#Vp*v z`%)z-LNrM7UcUAoN@YRI_ABB8@9&PZAFYjo8e2L*Vtq6e%|Vp_eKUZ`Wn;NF*~-Sv zt!N1oSqynFJ+Z`jE+T?!M#Rt;rQM;j?R$5dg=K$}l|*@bAuxD9|12tB3**He{^%8l z!f2Zhx3OF6=@%e@l3Yml&-@OBb;!bg26u|1M+KaJ>7ObGCW3472?W5}&!}2)Rn%$b zI*>5Q>PNI;Rcbx}IG%pV(sBw&Fizi@nHg9rI?L*+9QjVOx-NszVxc>oR?rr$_aX0A zu$yy=7hh`t2blEMW$|Y{U-<<-K9#zo>{lnca^fu~?;pF=)JP3bVcpLf@$%cv6coG= zKnl(3kBehsn$PCTUp}Q*IHNhvdR)~SpeZMlLY*XttWe}>xSi@s*HjE2liz>v1f$L` zTg+zkg^yzqdi(+c=n4}Wyf1zJ1&9bJ(dH~~mwqiJpuNT`WC`Ygz2(<4zA{ynBxRVr z>?je3H>2XyktbGXdRXvOex!!6wZjHfA>;l%_nBNW>=I;ONl;(nH#Kju5Qg?tL1~i> zissTp-^bk~<7M}qdrd^0$+IaUHrAlbeH|7?eAm*Jpq5{29RviMp1iT6Di;zwv+lyS zK!MNF2C%*E`^qnzglfgIgX;&s0Lwd%%Gbl1r5AH*z*6$)zPqBN6z&uA9khfDE;celryesD zl{z$wr&zl4YQwk;TseP9T|LCYoaHSZqJYZVucP>kt@5kSkX!)XPZbkY2wE8`>g(j2 zLl{_a+*2VNUd}1?6R_j0ic29=jP_l_7$NLm_`o#Cu1lgY}~T# zjsHg4%k98v_i;&IoJ9~P6c2pY-Za40%c8E%py1ZZ3}WqpdHeKWKWq^~2+JrtD;2i5 z@42;|v@rqlkOVU6Wl4tIMhb5T$r6zA>@Ln9?(>mFKRgB>>T|kR6*5wEBgl8OUZtX7 z$mL-KB*AzP2$kW=A1sB%jL^f%qtHWMVTqg34Csee=!cg^Sqgi0u0&l0RGd#ZAv(pX z_L?%ZW+?Q-#wP|g^DWA?hOsAl5F=g(f?5nGck`}*>?Q?*P%$Q!w(i7W#QgaR7#Db@ zWZjZ?Q(Qy$Y68ho_Jg#-s^Ng}rrXmTrrJ-Pf&`LeJ5&Z(+Oj9l4CpQ}IcP+Wj*dc_ z(+4d=ozEldE`s>6!-A2zoi``#;^Go%YN`aab`Gg8OD0kR^x_3)IDl{{^Lyf@fex10TCGEN!>jRE3<^dQ{B$*B00ldR`*1rch6Yf`*Mv_I5%c`E5BMt6{yUz6RQBmZ8yh=y8%^ zfG&76D(28$lGHah19C1t)|csV#Zvd(Ehagf6%0JAPOlK>E(2#si$-4r3&`a~BqXrE zs~NhY@e#XEJJ0BaynfTfq(i08lp!SN$oC&Ven4nk2EJE+9=%@u8-xPLtOd7PF0C+; zSRMHe(LPlOhNC6m%;KF2Y*i9{`!E$lr zioN{;Kmhj}w}1cm;;MP#dWq9))sv$@I+r_)D|;y8A)GqU9zWd!RJ>^Zg}tzJ*=HE# z%d?6GU!J*9iIx?$`;b>j&_YI0v_@%pE#pe+T?}FHE(FQq;^GZT0z{P=#*V`GPt_sr z{^~6t&akks%T_P~q-YF@212zFl6W8;C-oC+$etsStau1pAhcBtwJXWX%exLsahjT$ zB_}0K2pmGB&Z6v(MwF1PA?OQnf)vbX{tBQITaw?8yb?y~+Vto2;=0WziDUmMt&*T6 zF=$J~p(tBdEc_7>5>kmNBHQ)J_|b#p*I#HIP0vrE4(zm&qGDSLJ>qo}Ofy~+!7(wJ z90w^0B%ToN+HNSs4P7VM#0@X|`O^q#WoRkooL5^q>?3relF(xcMothNIeKq_zJIuM z+1MlRucBL@6>XerP>(yCeH1#~s9#5FiSx2~A0UNT=RutU%MO^9L$tzYAoX|5b>}8R ztPkhXv}{eP1-Xh#Bh}8cTv9evx&WG~oin&FRv#J~noLVY#bMU*_3k`mn!&r(be+P1 z;w#;WffAOn(dVroq?x9o47K^ixIju%lhD#P>fRtK(p|VepYWJI&r^z<5c9U+&pQjjlPi-c@e$Gs%b-wSliueb zvC?kQu{ztg3h^x>Ma*uv!n$Zc@cz7U$JfPC0#qH`u{yneeA~Rfwze4R&|RW2R;&n! zMy+jNvo*It&Y=|uIX#Jvn?2AsVabtPz=L;;OS})#UxH@Ke-BlWNHZ!gqi&4rX@3j+ zeY$$`)AM2D>uVN;z$`~_Yd1};eW5a9Wo1nj^j48L^LOT6m>1fXZ)@cREuUdwNdz<( zu#Cqa!lg%+nTKPvwEJDhopWz)(14Wu!h8kPGn#z3s$<5f$hZl^4G6J$a~fiD3)g{^ zd?y&d!Egc%WT=gcLL7VoD~TxRJGmXhV>fe`Mt=rqcMz2L%SKaSf20{=pD5CB)hzbB zz473@&%Lm`jgv29fyEo!4Q&ulye1A`H5W4tjgQCl0sH@JKukzT+g=0qHDnNt5itDp zV{qH8=D?ByeYK*(|EY?;=#vcm2w!mpsa>xW-&8QZb{$3GE7aV>wIoY(Ly}@*dUbt8 zwknE>`9fM|)&H?J;-Y3>FYix)56C`BvB z2M5-|m(nj_Dz_Zi*2BK=kd7qDopk`QO33^r8nixD0Ho6+}^Jf#*Uk$MSJD z?#w1o@?_#&!OusA22MFhLiBv3cOct?x_b8PS&&>NhRQLJYX)DN`gW{ejS*J+`}JxF z|7AF=oA?dJByjlv;!IZ&N02WlxM>*yTL0Rct1Z$*mn?w`0b#rI-vE-zZ`qy-SbtiC zfC*~e&kX27MkoKZf~@RURDMkLrf>nL8-%boDxz>p)UekAy5-ra@*vq;$;rt)`tMca zZJ^;N(aRcvNr6dSSXfwU@MeZ`W(<5t%f&>g|Rz`P}mlfS5GXrMiNvkF4g^7=nv{H6RTq_K9u)J?cPbJM;;YXS3ypl@P85`(p3 zs*Myf>`ufI1928O=rsT^92^|wl;%6gOw-;Qu&g8phg>`)IRyfAN?!gi=dG&?*QBXfQbWena4j+p{BOhWn(gBhpRb(76s=PeVvUDSX99vzd(O(UpUF<*NgRCDz4xvgr`KP2F4jyRjYpa9v?2ANufN5-cJK;e!6TIPL_O^vV%kyTYy zPyww@v`B|MPY39}>RJ=I zIbL=RZX#aANdoQ1Lv&nztot0Wy3|^;Iaki-3x%g{)a;vRo0pQ9GA@;P2?ORJO29Go z;oL>wgwo{O=eCw>A*j<0<}8*m&_%TEUv?NC1BeX5ova&G*6G`gqRA2-8>xu_CcPqY z`7cBrEuqY(;??JD(Wne>UeWi?sllak2{Qu|vXf((4WR^G{+x>d(ae&|3Xri50EcFj z(hoVbI1=T^^=bz$_aivBfnKAL^xUE{a{ul?VxXRSJ@<9`*7wH@(D~##2pbVItS(p8 zZvsf@|8OS-^${Q@y8TT$#W?Jy$<3Q(8&llN-(0gRv_~=L-hw?qA57CC9#rH$YJ58Z z!b_d;@J#@eQ`HK1X#Fmk5Ah95kXQ)0uG2mz7QxM?^734oVk9q&uERkC-DGM3$bIT< zkWrF8f@e^3vU`9he?9Ov)o@H8tw?L3wO&tOW_;!CPYttAw*qySv8L&d51wS6UDn1& zyu6;bMt(_++^H3%CSkesfiE)->N#Ifb+`{W&h|P2c@QGyzAp-lbLZ8C0jG32NS5f8 zxues|W~7;_$^_QJ@$jCN**=b_scHsH*utxe4>XcZoQ*!s$);Zmad;2dY2a(Sa9sZV zncQl6&$aIj4Gk#UwOAy3z+DC_r`f{*b+zg(*~$8w?e#kq(yR70A3tj-&2ND{1VROP zF|5oIppYAaDMuYySFeWUK%R-LL+gAao3J`q;O<+$tIK?h#H_x(@`ge`_3q626gVRM z!KQW?d}k*!TFXnCw*{`Ws}(i;yx)W}T#rFG2my6J6zWz^mj-H{`C}sV%W_cHQ~IiH z<0AU?*u7u(khrOrdKl;crW;{p zr)5k3@NGVU-N!`YNalO?&z#$RXDRf>N1267dq#lq`I?`#%ntn!vFFyrd~oX< zJGZvjd+UI}Zu0=Nd58AigI1pSB&0RP8}chmrCbM}Q=fkd-ys&+(*mO-hCk;tm=I+n7_%la;*al|Z?@r>0hX3TVp~vi3U!1WUmeOo$ zY|H}v7JNcU?%+U#db<$z31IV5*X6p}S~Y6`2+`+m+XEg`EL|%23e@_AlYhqrd6E0S z!#h0W`H#-^N}MyU@H%zv4Qgoy4~LG9PC3<<+`j(@{$bv9&)^!%_B}4!{3Z6p^uY1c zL1*4rLKX#K7qhW++f6{`W*_)$hJc6wO+$QZ{`1BSIGFK#&y;xqn~(|-1<;<}yIaAU zZBYM!<|cnBbOFj))X?A?%AxYBuSn(m^?Y+Bs0y=Iz%RyuGvGV%&(cA;1T?8%xv~Z! zAnPU6dMk57eMN9m*EO;s5imduV4mv5){FeabSlYviEn6!dSEj~Nmnjf0PLu!m zSW>babQv&~6u91+m7e>!s0?KJ*hIV%&^Ek#7A-Qku{vi|WMgowuU+CC=sbTtfKrra z6^xGmv)1M#qM}aH&_siRldFWX6bol~d6lP77US~aJ&VBW)w|nmvQCUotQR!#^?+N4 z^C+(YcQAg~vkMVIAoNdhy**XbZ(k4JUy_BpQ2obUKn2r_;*pRj1HuCZD2!8kS1Vx# zcy_q*9p?sPQ3Rq-ym#1q!lR;kUR@;Uc?=%+W)!mU67KEtKQ2M90}uBu9B){+`fBq! zxDB9!UI*{_h}cLX-WE0$-@{N?O0^QKAU-9J7mvr)*Vng=A40rT&+Hkg8w&V+=ouIg zp$-V_icsNKTY{LRQF8r<@t$d_4Cd1((7TW3#-~sAJdZQv{|$@Cs*x&KTU(0)JW%qI z4L5X0y4Q)VeqJ!?gWV6_cM1E^8b}mfV8?{ASYsKsjS@t6;K6{zbDAIiv?zv$@ZQ@q zG_S6(vg&)Tj{%?$TL5$d*6_?JFl$G!DMqB)TUw@96%HN1>Pw3V-1L3OnXP7j@b0x4w&WH63kt4}`rj(pWn^7QnCb3A}^JUl<+;;RaQ zWhq<#84W|@jwO|%$u!6$);V*mc_fQI0iRq4vBCuvUG1=N?FVCgz)^!_F*{*fo<=HPfhqmH`D` zInA5Dd5Hq(dk8qHlG$qi_XH>-(?u?TltorO{2kp?XWq{4Le?@LPJZ)-6Wex&ud~qf z`~zzpE~uz~&_+olvh%p^pAkK42af>Jw`Ud@31Jq3fQ0HIs)oDugkSCbO(~r35|?E0 zK=pT^HAu?*dlt4#9&9>5vQ4pKsEuwTmvPO9*h;Hr5;Pj{5oLdvN_U#=t^Eu@c6*Eu z6Kny5sY@Wecv4iyeE>FK%2Wav)7ILGUB%$E>!38o;;0JG=W!jR5zGYTqgCDYHqRF* z&4Fs}hteLr<*jvAd>`k14i3JSIWYMmDr$03OddxxRZQp?lb1S4Nv|`b=i1vosByKyBF)?KeuF=UV*tv+Jf^((6NSCG3~m!($zRp zHK&!}rw@2*K>b>ODcH_ zv@VTw4f%pdd&oD~6TfLUYg!dGMGO1Ef>o8WwdH;)$DpLM&I&NF4XA+-WLaA}(ympH zNA}RtbU%izAE&2S{1mlMOm|v?hy&-se4Dq^3HtKPo=MC}CAA1iB5cWawo<#%{gI!= zQUp@@wCV|%N&5Ts#Z6Jg?=Ki0KQCV1sMW_1*HeGII+#umm>mGiwNceoRj8xA4rIA& z;VTLK)N2-k)e&Vr-7EizoJ*z<)}KI8UnSj`AqajDdx%rU*NuRB7w%oYkXS@WVdH+W zN|1ZsY)^j5C0|kyK+?+~iGzBlEvYUBaSXW28GlU?ji^R#(4y1Q(#m{}YLTHZm}CzO z-e#qvvEo66uCN#k?DBqHdQjx1izT)HU}z6s?b$~ak0vUV_lLc%EXDKR1zgN=EqeIG z(x_HPt^rH~2E?{@qc^i)I|V3$nw@XneCgXdes;&!424nt^D|B?n$lv&4iC?LSMdo2@;d|yK=T2 zF~!^OCrjZz=)(l`;UW3oeD8Mw^ehO*g@OXpJimWN+ZKWp5`gWY^s@_*2EZqD7_XM{ za5sgSDc>D9=kepqvkvJyDWTM97SQC7znSxL5-1;EtE)4*-wYlBXa}CF6T9XZFXAN_ z{^~DZ(z@R`iJnsQZ=Y7eqvY7rge!0YB+U5|Tm>f`F5>IKj6z-z*cECnGxoHwCe(Ja zt0_5&u5+2C-GB$b*z@3Yu%XsQCjdOqrUdty9FTZBpe7Tjw{%Q!n6+}5f{rEp>S@&8 ztKj=-@7|#NDX_S0aA1NW*VO0^h^Pf^@=-A2F4`}W{>c&2Np;}5dG_pD-*rer;6MI4 zWJ-X%ell|hw$?lT=&+tiuaG@u%Hd2nbz@1h3Fmj3P!ROG8N};B5QTasN4Y`t2_xOp zdt>OmhNR_mf3W~2lnn8c$SWzZ>QRTz*1>Yy`%SAqZXKz2Q;>aT?g2Pc)%^>Ls}G8gwcik7q^vo(Sk65+Sh@TG=#I3RIh#OtQIsSDWXP`gQ+IKiw#9*4t4`i z>g*a2zfN<5+hF^?+}259_S!1tnS|w>vXYWG2!U84X^p=t&FSKGsB%E*76PRaqVyn@ z<8eO2Ol_I>kX1kdG6C!c@K?-jB@=6pFU7gFwb%?CW5Vh4dBZh%i8+^vIa-R^WEhIS zgwXJ~NW+=DX}U2b+T(c%(Zp@yTLQRJH|%}<_!GUbQ}|LhK;P2TI_8mu zN5O1A9Uc}2*m9MgIrQH}Qkcr7_-N-WhzHX}LR}E?`b{Qt&LS_O`OWG;B~HuXM=mMY zRInDt3g5ki+0Tsscf6nJg3Vx*!>WaqhsXMRrSC@lL4+MTF<;9OKQVp4>n3B-4U@0W zTZSI`N&STduiK_NGS+DdCqiRl+-JJaZhs;28m_@#fI*w)hKm+~hyN{^AcyyNd|D;} z42yYgy)Uz8tehPxmjpT-lZ(eF4EPknVw?7Ho( zMxJ*nW7O=@WZQ<~A;i#h5?vxo=l4mSbpz3voIxV9oawOI-V0Gem>w$d56^NCVP=7B*@O8AV1DMo%;rUlT^MFe#gNN)dsV=97&6Ogc|yyktz~K z?6Nv(d*w3a-=3*W_)-~u(|*PX=EpL~)UH`1&4&eT`mX~Cs7WtieiiCWZa82uKYX#8 zC_&Sm<#EteV_(u^OTCLQA3hC4uP6*X*UdaGp1Sxc)7Jag-IIJbS3@jZD+2^(iupd@ z9ds`&2?Jd`mFhKT=&3o0ZL%kRe+xYx(tV*lP+N8u+|kx~WGemvTr$AktA%~JviDp| zg~%uJV@mx_hd_OhVnPq%WYn%db^K0d@6ks2JM3wjI7TlK`OKcC zvne;R*{o+Z=UwyLW5+bcKo>Ctb2OaP*_yS*0N(NI?6&^eZ{Gbw@cqb}6^kN#?Bb+| z3?A`a>*>#Ll z@<^q{cxT0L+IA9u_# zo|N|*hgq><4;RYC7M)y-KN(-o<_9@?WmiQk3<3^iZSFl5#0Sy@yC|j3RB+YFb#m&^ zK%pvjvx-;~GYz%?x4BYvz7lyjs!Q9&TeRGGSNxv7TfsqNYl?t7bFXEdv0kGSch&eB zDQ5rQ1Ki$J{q)?z#}J|R%djqNpbXJ9sXn<_-2C>7wOzH6()6@&ymty`#Xh2mF$b4r zI+a%IbGV+BW7dOP27cUG98YX#5EuS5R;5;h;xO*JW6w zXMro|vH|}PYV>r@jU3>ubGTj<5Kv!clI$e0a>ay<`>0uLpyiC9iH(5OxZwNd+t(aS z;+re3Q4Zr1EG-m&+>l5Ky+_-4xA2*;Ep|vGF6RpJU;BPvlT}`8?VS&9t2SAKchkbX zIQ%nWS#(liL8;>)w{Ps1oMa1+G;rzxKDa>oDXuc*uGO&9p#YA|aGsT~c~V30Vgi0; zm~zuJ@$!T_55T#!DTy7COtJ%Q16&xriIgR200M+3S> zH@yu@MbKtCq51Y~M}!WJtg>vlzOj@rWantj#E^RUw#rq5u)ep)|MYz1S|(q&XqF&$ zd_>R${=zibJlKp8Jnuj5E-Dd=A*fbJ@U+lOeT>a1Yo27KlXHlfWp0YbIR}h$A3>sc zTKDALOWp#}Im2^3?UMREBH0h75<}aYb7p`;GQibL?M+zoy;)EBtiNg~$tqVHf|G1)JN-u4;V z@ivF2{k&)F_)5<7xD;5ll>P3PJ>=DJd~#2I1{XPM!hq5VPF6%sIk14Rk_|^UJT&25 z4bE@`qJu9COmX5Ma&-)Wu>_8gL}uzh;RiB=YH2&M5yW1wdYMsa^YCB~#D7hPS06lf zAJGva3>Y7FstnNc>A3!N>(9xY&2QW};Fb>8w&Wo=7vyl|>eY{6a%-0^Iy%1_av;Jf zZ7bCPbAiU|KjXzz)8JGiD@d%9m|4Dc8!$B}QGbGH}8={ zH?vCl1KwR#QHeafu`-*%MOU&(s@#BkP5Zb**lDTw^d>eOIIAr63wf0vvL586lASms#sA`+RsZ|k9|g5bB)0(~(|a_x;kc>RtV{nVDsHsg2)3Ltqzxx)FL!veE=O|BJ z8gL=+tov5rbpc(vM7J12*=--AE?)6_VtBZy-^D748#Cu!vVo%6z_^C9z}b*XZ~)if zUK!AYHA=JSQ5?k|fbch9dg_#l{}?ue3Y= zjl2Kj2_#XK93CAox`RN?N!ZM=?}QHIToByc_Z7AO>k%PVK4o5L*WG>ry;X>FJoeG$ z%YWJRxn^>xEW2mo8npdi5&s z4?x_j3m5p9eW z6JlYJHWH-xj{5mbst^5BbbzgD5-mDHqoUTK?wGWdxlH|c8d@g7#@C5kelqbE&fRGn zy2xOX@hYm|_b&ADihyTQ=8#6}n_i{F>uED;TpP+ox;D<2O9 z9rq@UYUb$Ab=U))L!N$D_f#JJAasC?pTx)NM+-|Nq=Xxv#)~3^%?u=HV1^URejvoY zu!F9G438OMy16Hxc*eUg)I#C%9*k}tG8gZI?htwm^P}ns9F6h{3iV}UV>5#zI_@E? z6qi6_Re-bDdVJeFkFS{jTYt{_z}q1BimDk0}AG5JDk&4G7t3=s69 z()r2*0D#+#6cJ)N9n&}rb{&pxJHjXcv2E&%T&*pU9TbCQpj+Z(4et)tGWkKcz#+qZ)Sq~aHagx?7bE_o7_=ZMT|2O z*WY>BWvdX1yZ;l3_5X`skJj$Ifdi$z5QO=V7#)Sg-<-#^ko4xwo0Jqi@>yGwKTaU# z!~gvq^mvy|e|K*~W>FPD9}p>bt;AA}RA&}N;rA2r6h#6DE259z!O_7i_+7beE{k@8~gKk~Akx9QA0 zw((@$>kl6?RFYd`e_=i2T<@o=#5<&a$WZQDZ;?+f)}I_T`XzyX`JvVZ|E&Uh)G~JY ztcc__b#>D#_h!0-hiZwCD*ECNW@cu!;mX)Ch5i5WRUt=?BXBw5r%5MzFNmAYnE{Vk zhTG%hJ6#dfj`lkG!%>oQl#bK1_aA|4gZ@4ytP73>JSL-AQF!?L#Mw)a9&X+7VAf|n zx8W`^4J#s*ko3R-aFHxqo}8|BUh-QD&)#Zh0xIk)koQ^pNm7^IwMJgd2seSZ%p~Zu zzTMs|$8Caa=pMWX2G#4&Dz6xBLcp0|Da4a9%LBhC6nn}q&%yds>LmZgN!i04s|_za z6NO?3Vkic@ncz$;q!)OV*Q#Jdl%*AbzJX?yOWHW&AjmPQE;T8?;dl#ppe(lE$|eWH zDbCo~QaHjn{fU5!Eo;f_o8{OGINg|Q3)W3IGNL{G<;xea$4R;@BxN$dI}+dwn6`nV zCipJVoI^sN4lgfJhNYSPaY5l!WTC9&aYDiT;aqEYMG;VQ=y5P$9B9s4zJqsyX<{Mk zLGWeg=FR{wkBp4WIIPH9Af!dQQ&Uk=UYC5Mwtkh{MEs%DSYmkDLES7`a0l|N(23ZO z!r3~M*UAJL%M~i+^sNXz8=d!d3A)XVjp5x+g)`00@UF*#q@>H?G~Cbz%*C70rh+Hs zQ18NWPrgOboi62YMcrt4`9LPfTr+_81%>dC2~jYzx*w|7Y~H~0ud@XD3MBe~WvM9| z$?t5(kK@M5HeIQQC6Tb1G6u z6O|!z2$e+AfMiyNQevA4smNF=iV7i-p~yUEON9m{Y&4j5#xfP544LiU`lxeG=hXLo zuitgO=g*EkJ)h^fpZi|-TI>G%@v?JFkJlBc@SyV=s$iWz&vE;(8%RZg7E@-rYe`r~`o-&(ms3Y0u{CJ)NW9+S2yw%nJ{A zaY~ba&)D&Zf7bj=ATIaN(0he7nlJ!p#QK3u1+JdTv@TcJg$oyeafAu4y8x8=Fl>P| z<@XV~%aX-1jjcix^=qKp82;?I$u->u?A{5i%OTaOtYV5nc);1~BrpE#)9MITb##mCR**~D@IeQ+6G8`fXrgG`)Hsoa0i*_!%yFE+o?`UH9h46a-Ul6iT^~f#9+(kEE z^{()>!ZSJx9RS2h@H8@XL6=0n8BlumFme?{WVK|@5b(j=Dx~T*TUA@2k^VM6H6cEJ zj9#JPG~W$R5>erf>nbp_$Jo-UF2fB)QEnI=%xXAFJa}+uSlZ%s3}`p6I!)gtKSKZ@ z=mbLP4-{`Puac&69Ibi*Cs`0q-y>y)38nNI^5D`261Ne7z4h$fbm#JZO;DVU{`I3m zmpym>We?7shu>hXq8HjvBT$ljr>^8-tVM{(jlK&p5v2I zg-;XUvj9pV>8vQvvx>;A+Il?xTglQ~&<#KP2vdkGk7-xJA%Nq^nb_RTcaQ@E&DX(Q z1vy`eyJ3-syRUpVt7`?f{~8dh|+cX)Hs^e4ipywf9e%BZ|q(qLYZ{fKAT zFl*VV<79n$MVFM74!uabWkNnacQkrMpUFS_Q`+8W?K>1OCQ~QmqHW2+-?*cj(ke>@ z?{>pHNNwugC_#SU8c8lptxGc;iI$5xesbe5^4r&t#p5P$0~ajRicKVUZO(BrQ+)7%jNhI3b4PA_l4aWjDftn-Kkrm~i!~viJ_=17D19+v z6LdlJ;io&$Q$!scp4`?F;UPqQY`5uP=+W;`1$RIYOL!9!y#GO=_uC`F!ua)x25wM? z&YA|b33H0V*+TQGwM|sM+otB`6DA{PDJymiGAJ!Ap^{Dg;5aCUzQD^WeV@cg%VH<= zcx1lw7rQfM8Te9Vk8bSZQ!KUFv+rzgZlpuok+IQW{RT?c?X8a*JdYgN$=SN!LayWx zS$6oH9$nEsY@w?&CA`<)!^TOb;mwcxt@-sfLCun`Z^6c5O{;DZ0S~T2-u$jVB=D1QoPX--^XkEFkuy{v^U^>h<*p4(F z{GJmCT6*?+WvZvWh{=x3+X2IpTnhK+%jNjNy6pV}b|?GIX7+&uHfLah7#o z;Uhy(QAG>xfA?Z22-xiSW%OP`lq-1es@Bzs&Dmz}?4{jAFW48z-P?VI&(E(WILJyR z<=LG{Zn^%3=(^mVfSMG32nr|VTUJ||O4ASUY~tb330{~jqR*%9QcAfM?>TsCR>9>u z{v~Q^1^c(DE9cux`gnSbSQuJ-{@!GH{N1|_|Fq@=b@ki@y3^Ct1^6=K!x>^hbsuHF z*OhP;FKKqhZZ*ggV-21rFyXl47-hHy|x<=QAe1+?@yGRdZSQqv%YJnU=k4{!d z>9{kV&zd6|S^jM*!$xr7Z>xB8K-j{n*;Z(eb1riACfF;uP^CV9gV zmX0%K(KQ8f?JYRVhXgN05wco7j%sA}pOZoS7+7GJ&i;u;ghSDn-4Wu(uhq(5BtZ&< zd}Sr>Pq8OaR}cuKE#Jn$kpcaTp%%lOGxKfC+ghpfSlAS+Y*^QNj67;-ax53={s5c1 zj#+dtqY=?0Je>3QhJyp6Vr7tR>X$!n!6rvGjgiDfH)y6Gd0dhnJU9dB%u$L!Gm?*Y z7A;sO`3j)F$}YeCjJ~yc2JdEI zERds5F55FUi+S?O{cU4K5%KL&XCJmW*NF^hJiGbj$vgFt&}SQXTa4VkW#|@%m=*gb zbhvBGJbz}nlY5mvU!=^?PY>+jgM*ugE!d;E=o4L#yX=p))yJ)si#Jq}*1 z)%4RUjLAxK?1p7H6i}ujX=fZ*>nmTDgfqtsI8kjR!jp9ODDK81@z1DUgydM8zKlOC zAAKY@ELEqu?n+MF9(sDAJ`0^m-~2_bt!nl23&saB3`baVJ?K;P0ji|CX$P12sknrM%Cg%l;tIP@jYM@MPxcg1 z(+!3d?>5Tk%yFGr;a4`__Vpwxi5U+SQvlA9$>e7Eo;Y@7?l}1LBR|4s1i~h7*G-zg zS8SoVWT6((edb-s8>%Uj6tsokN$qahdbxccfNgHGe9Pk$YfF}tG0wHp1svyp>2fch z@P$3k6$}@!hTBXqH!G#3R8Ea3$>i8JKKORdX`OyGxMpC;b#_wt_OCeo{YxpxneLON z{tj&y3%?tDf8eld$8`I(28!5lw5_ZtCH7Oq;%Ll^xMot3v6KsI%1nheGY zw&ZXczoxxRZVfD@&=JPwSl+4C?dqUMsWs_polb6*@!oMbYJL~j<;qd_RnliGU0QPV zJ?u2xcMT^n)3uu%JWAghHJm)wkn>H>Fy=sQdW3x+kA~DBAEW!Q>EngR0}t%E>el&I z)moN)Wz(&*Fm8LeCqL`b?iVQ?9}1^jLOZkfQTF2k2>P($T4PN{%lW50^U4fDi8wvC zyCgfm^-7BX$Mt_7QpjrzN#EG9A~dYX+l6pqEhc#1FgkrSH1p?`o<&j>ykbKS+~^PN-yaFD-p&1B!fnk_w(kym3XoRD;x%_o zS~#6g39`!mNBuk6I6hR*c_FPg=i2@qkETz5sb431FX*Q2vb#=iu&*z{y$m*qtk&pu zBu3DD9$yFd1eNY+S#uZ6^&+iTUVk^+Ky+C@*8x1HB@bRTe+!gTrUHv(v9hF6)sFS z?fCknXtjKn^6>XYn3&Nb4f-D3?b*fLDb=EQQx>n!GzhqVeZB%xULFd5L@ZNJy zdHZkNaE1kDn%u|;jqq*p@$rE>Ca%pDrAk3BAvZQel>qQ|xa2t1Qfa*gh+m+GXzNFB zcge1{1U?Ipd#f7fNC0LxsK-p~F_2!zlROMHm%(9}7@~3r-v4P(b?-W|bi*_fTV(+h zX|gBk25{zL4z-Wyph25h(#qAR6@tvc+~jSkP1L*#P=)aQd?S7RHUDd~ z$}1NRa4c|0>G{zZ>PXHC2lbxLBXOZWVekj$%C_NU<|q zudlgq8EbN`+w)o_XDcfw(O^+XEj_WzK9aqTVgE56DY&8GMf@-%`JrXwtm6V*|9%ALp^jmV_U^SYiW(yc#0%LpI4iKKngac*U?Hnd!pc#k+JEbmfBs1OAsf@2>9{ zNd2~nGWKx?<%RB1*>b$27NJZ|i>_SCx+>fjRs|RofaDk)0pMe&9UQXo&H%`l{`s9% zR8?`dt@a-YxGRD52^p8m_ps!w13RqH{F7UEq||!kh1$>0xdl{aT(-_v3lv9SyV+~& zt^?&T5?=Po#^ECz%I)hjV{bi9zB|z9RE0Ygy}Ty|Y`*Ivv2+or0U$~^-nxR5lT*Sage{!H$IqW|?_MWV6Df@1KEy4y8HFAKvB6!^ou|MM zcyjcwB`%Rzb>+$hD*N8_2Ok$oxfDF60@iiK(a;xS5Wj?1nL!)AaM=7Ei!+QFVA-Gg z0~vXtNWQWG4QU-Gy$(QFQQMr8b6HuOw%~{<5+q&EVops{U;`kTBn=<3TA!0ed_{*i zSYSy>>8cq%I{DcV^G8lF&n=!+=|&294T>a4$uIDhhmJD{rXUY3L{(8cE6M(ukGQb# zIBuJ$MvsYJt4_ja!V#}yo_SsR;YU1_Z?=XyH}zh+&i5|c`wzb1Qfhc3C}{oOcvMM^ z=NZ18$Is~i2%7~a{l?+kT#;tYb|$1AwKSJps!+9x5pyl1sCyx_+0#cWjm06 zKK4^|*8(NK4Qv8wuRNNWIQ)j!I5qSwIu7C4?55Io55 z`eHJit6;qpnL&pot(42lQBYJ=TK@hbh4LJ4i^YZ|R9(#(`Z4O%faqvt!Z0s&w>W>K z{96|LF7=_ymn7v~FE~*d6EzHiX@ft*MqaJ8A6sPJuB2QIFK?s1 zlsnb7cX#8Ru)%qWTiC?O3_h1?+nE){hQH7x(ZhQoEk!bu!+%3X_Hj}+7~2pQ9#CY) zY5#a!`{u$fW{o$G*W{3%?Xe!17c=?NpEDSG^NvwQ;h@msf8;f_(vzyy2~!88F>QH#IzZMa;U=}a|hHYF-};`DaS@m(cBmv!#G zF?@eMXBRUIxr6xr`>_H~gg_+~%slK=Ar;%=uJmSRi6f}eurr;$s<$1UGse)c{o!R` zZEA-166R@BzkDEibv4^IC~Sv)IMD-5P1yZvi>KEnp{|>ASjfbNIHGxaeIP@k^T^WO z*7e)SqV^IEAWoRvUg}ehB!loTwobYV*d2aeM;53#sbY(g4w$feUWQoi=B z%Kp=r4nKZQiMZ|oQm>!YOFx#E&-pdvniABCSv&j3J z{FHkB`mP^d*+i*Rs7BQJ?j6nXj?&kZJHqJ>)5V)k_h*`T+}-Z^Zt^tdbGT>wApz8{ z6*|{e?vSeC(ZTuGy6x&dz+-TlOY{E4G@^r%uUXkCp0s^XzA}16XWtsVXOXnp3}btn zXOGt|Y$}@5KkB$z=!UV&OJT1~?`};pd<}iXkVxbVyM=Rr*!2*kJ~W;HU8ARNPdx(* z);G8bK+%DV+82NunYVihw`xR#+03N? z)EYS05)CCAE*6sJy(xOl8c@CJ4k1N?SJbVWH%BpV2JNL+T3cVsvVU45y+2$c9_6!@ z98T-0BWdMKMj#buEuhN?j!v}F^$hF#ZoW4{_h7S51oG4#|KSMxLK;VFNb*HQMJpsj zsO`@m%=wZ(7B6C4*jT@-p-zvZi zlCaliBb*nVKH{EIrAvHZs&3?H-94wf_UO>jM=2Dn_WfQ+Icz1Cg$O*#_+rfvzSCoB zxX$}wx8|PrDivtR-yEkLj2h$Wx@M5)uIc?yD#!K{(+QR-CXtEbJe;)jIN9MngnJDb zl%Zvm{+nW7Wo<@rvGas{8Ds=_1{Y-bxEA?2}wdR=3Qf=e>V+s)W(D(yf+O zsFkY*f$k54Lr3>=Ha1<@)>usZDB{P;3Ux<^ zGd$(cj0o?G?0N3tc#ls3%#T0?-Kf=#^T3oKkeRn%9>a?a%{g%~e}Od`{qP%=AdpgDoh;!v>PQyYEG@Eb zFFD1}3@_|7)=rz~zv)Wsfty03{mJ2N>9=U72#fb7kQKAvVowKd*gMX6(#%C9c4O7# zwZRheyyNSlZ?PmlKJJU~C9;{m;hK0{KX^bee#t3diyeGq)lfs=zB)-Jk3%(X zm`|6CwJ}ZJv3VrLRQ>up_j`6^ZFv&Znv4_}xV@Us#&NUIJXj_f1e4^(`eO~JgLWtuwrQV^2J6sA2FL*Zs>Y-Am<@8zHH6czSWzm z47y(cexU!hx5v;guIaZ%1J4%OYxa7{XXsUqLe(V^d=^0f29NG$MR7bCs%m|+D(ZFe z-d?K)>pOTYm>BU3Z6hYiPEqN*X{W5oI+;zn4Y#hk&;6{@d?z(E6-|(&OWqG3{+DR1}nUDbFDkW zk)-$o8*BXKWQsORHq`aK`%KGTtMq&DxFC>o_(UN#Kv`rGsuu~>N9{qXxODh)_UO7G z<-q_*<$e1s;C;7W0Qb9xhq-)%<|%sx-4eHh+rlpoBl3V~D8T{!>u20(d>0Ue3UP0k z?$RnkMugOg9xo^lHx!VKQ-74jM`e#oK6LNP?}|&?Ut8kOd3^b}%t%O6NF{wHF6+#{ zpE8ZOyy$HWxZUk6xUhE1EvgQoE+IM;E&*up6Z}QNMR$lBpG((b;tr#;HM!w6M=Q49 zfvD~JYjNB+@$;8Nn)*-dL3^u&UKsc@GCR=B!Z=UoK--nOS+l2;*l^y>^RVBjh{%hD zjyK_^GwAlws(9S>Qk5`@e{;UqI&7hV+&ouL;Xjihwyr;DmdN`~+APRMu1H};9!)p$^FGmwq<@aB!%71dP7i7vP zKr^qKPs5o>$DUjgVcGff`Tl4>Fqq6W-$C+W;G!tZ{+YjVgjcLG*gMCU695IvT(^40 zrR_gAwW&1Pv~1ku6P>dZR(w6iq;&mfmi+^n`+xc&u0LR+O1JVH5=nhi1!?`OpLhK$ z8=}XmFa-1Yit=*FwYRF=W=dZYDdSInCJ6QmmAcs^!l6?YKv_Oz(p1TBFDy^xyQp^S|j1F8eh16VBFuf^1_KYg9Uz?s@3xi%er z(qRT?)J@LaI@Y`-ue~?Pr2jsU_vrVj#7o5kW^9BVsNj-!i@4AIOct(CI~S+jWi0lB z$PA95Ab=2bG`7pXFM!Dth=Nij8jv(rH0>qDD2o5S=!-N^UP?wkx<*pief$=~e>XsV zOrO#-;f93~#;-zx_w{Nv`dqcic zf=?tm;qJg}!2%lDosTm1nZa`1c9wk}l~q-Qmz0l8pT~k2N0Z;a5=%7hY2K%;Fv#5M zHkv85ErxZYsAy#=yb5!wQui()tq=J9Ggk*~-vmwzy6{g|F4rg}WUR<`(}zbH6qJ7d zVZJh~^?;q2S8q95F)#5(`!d{N%{gm#lYCeIzH=N1zddj|*JthygOyM!tf*aurPOYI z?H*kqmX&!Z6v_mYLhiNi=FI>78SOOUl(B1sCO$2k6w(YH3ydyo%Oxiyz%F~~%OT}98*nw{k1S%+V$uW+p%5y@I? z@cRxcz6K=yMA)lO!yoQUepSSpnIn+r#b+P{9>)(Tb?=k{?mtr_$`+sEb;1s0}fAKjYIsf=P*t{jV z3ROMgnfJ!kXcv1Yl|t?{A8?o|qUXJ!3#i^MnsR?c*G>=9psbQCAGhC;p?PK_%X*0tv7wL7x7r0G~GKKXh=Sa?*>A~U4s#{`*zTBiKevX?|yJL~$d$22-R>{)P z`g6GSBJSE->gL58&SuFb8e4Yrg+BYsqTKftzl-pWB%|v7o<`7 zAm^}dl zOWD8Q4Gw2KyvhIJrRam7jsgX)+r_8TpF_8D7ezQ|GqB>RMDD((Fy>3da{|+-UpzQH zIwE#_uhdeIXFB;hD|H7Eb5HiZ*R{gfn*C9_Vihc9OHeNp6D^MTet8bxTd+VGoS3I= zBqQ|p- zy!PFl-ND7h-O9>R@w&$}J+4QM^j7t}AAZ?gvkU~h>1g|_n@crM(DeH)a zZlSELjqAh6teT0T&q>m;Wi8QLW8o%UG(Eg~z`(6!dVI8Pq&7+0@ViFx?JZ-sbs{#q zRM#9dUs=$Lt{fz^18wOUW=#~d%=;b-449aOrRVeYG!%J!rlZYvsKG;6Obx`84rb5M zDMU$+PAX^5)=Ljc{&3zbFph+|Xrig#4%1zSPhh}7&<%n$2_7IGK+?0dW>9?!C0WA7 zymKwjlg*ZIO~ED(wW|nMdzsxH4_9mup-{FFKuYpe^g4@w;6&<{8v~P&!tT1b=-cg{ zuh3&}6c7mXBqo;XS~TDHG#%)d*H}ice1TTR_>)=8uc9VFVnnT*37T4Kv%jK?JmPa7 zdeI#0?64GY2ak7;p`l<-2!EV@s6uIXUQsVML+hxY8k)cv50hWCKAqCZUw*2)I{j$B z3TQgdv*qRpnVLQA>YvVRE2_HjVyrAV0Tumvx zNtkz}@!eGVBfG#$tS?RHMR`2L6e@wS&W~3-)7Ga8Z`iY4r2o7@URuUO_fN61Dk?IQ zElK_0a`g3N*dM>K%HFr_%HnGEPvt%fj;C_*k`Q?B{OguKw`HjcID)z<{yvvczj7KU zapKn3OE|E`m}YOKo21q8sJ~EBKIQEzL>}m8cn;oV3UGEJH#~O95u`)Pg&Eh?51*b| zrZt*kJXL0~1v04`4d(}7)9D+-q7W7-f{SLD57os*PPzOTK2VUn-M)Ey6ID+~M+Yp4 zyO)Sr=OQA#M%rhK=pOonm*oUq8L9XuAAMvaZ-qm(XTlqR@waFZeIo_Tn&GbH zw`T?@;^SrQGEKLcZNx?%qiE3oBg5fnGPmv`2C%iasZT$! zozSAwPHntmyK$WSyi4oMWHI0HCi|N0F7esxEh~)dga>zcq_|Jih8YFvq=ZIt*Th%0 zsM|AVbum?%VZ&mRHv7CO_B8C)O+2vew4GRm{e#$wZ2GkJbwyRIo&I-?g53yJHS1;d z)84;Gpnp5ReYoQeXHbd9cmpLPlp2&!9p>oW-9VOfxocB?{_&xP@#%G2Un!E$i~J$C zQ55=xC^B*UZ=%Qwol;ZiK`}r~ys%$OhZ(hBqH)nQT!0f75}J8VN_Sli$jGJ>at)YW zL(CyakMPWH^65A$-Nv-7;Sk$)3EKA}#29Y1>WAHrCtGro9;uWSb@Dx>P3}(mKmnOb<>E9|}Pm_=umQcQUZM);a zA`j-d8C)B!^_y8ruxYr;JI@K5O4t7(>*R-9i&84Xtho>+?5V||FTOTWF|mHYP6W9w zsC3QgMh6g%mD{(#`3U7{h1|YoMA4|?75egSqbE#EKEfv>-USs2VyCNi(IR3p9N3*n zb`_H{>!3>a07}kTG(YV2?OxClut}p2)6v7ZzD8S_O7VM>|EV^u0ZhV^xXb)% z-K`q@-F4K$hRLdptckbs!Mn6Ieo@Iq@2WDc*gpKO_8oonSmWrI@nNyWI-gp4!Llm{ z2TI2tT3Y+iilmOKvJWv0bBhN$Ylb@J|>OxGGL}tDmc5zL6{ZBN|@I zaxQ`2ui@_{*GHr?|38=^aeLRgC=DNtTk59(;1mpDbu)Klv04e}EoU=#ZZ>O_$ws#A z-M7k;=5sx<_U>M0OXpJFu%Vthb&1$2u{9TN(rhL?;TIkshwcrR+gESTWeBYxa=6- zR!W+E_)sbP2;}Cw;GRxR$eT>okXy2fPywXaHL0>L^DMNF8*idyZ9R`x)&^c&RKv9m z$3^GgqAmQMe&gf^&z5}+2jYw<=^l0`$_(Y{?)eHe!WsK)!Py~#W%ixr$U@*o6hAJ$ zuR)P+SL40`T-)ha8%~GYf3@%k-|OD8(q410Gr%}{wEXrE?{?LW)Kqe3lx@$vQiIMJ z%G786;{Wjge8BkTce3+a4AlHP{o8@6&*l{4SM3d+dn#E!62uVGbvaMj|4=@B7+rH< z%Zoi^LmU=FNmDc@M9}#hh7{8}?Ss@--`cj75)y_k>=LVv{;4iN^sKx^TF);n(eOv$ zETeSi{TY%*Sq_Y!CI1k~KB5#hDs;=2n$7J>z@F-!+z<+ly|{N^~3wHv)GP5`mOP`*Qd^EdvuqxUK+m*R`rsLk@7 zOPGlRZy5IN=Kev&$b9j8#+FWvkm<*&<;-Q&v70kz4!HZ-YgN^tLg8QvUrhN!<{kmE zh&A%J+CyGt-N4V_Wdu+?XYD?&DL8K zIvQ)1l%6DUP)|tDyqFFD^J$F5i$G$T zKWm4L_kR%}NlmHGeU7nGICXD)46f8o!+{M^9&<@L)= z3~xI8860fAC1!C{+t>C(@t=W6Z!IIE$cP9-0|P7f^(5o@-fC|GY_plO`LpF~`CCDE z`r~r%I&$O)ncUg$NuNOH$}n0sTCDxaJ)ac~rjN3nY4d2ydYtAn{11SWj4^*?1ALxU zH%x{$17B;%XmY#dt+s%*!Bi+aZI%6FamcRaM{@__?He-y4{)8J4W z7M>c+UubjL(?;7k)y&lNKIDPtOwd=KZ`1KV7T#Ol&o(S9ANB{3VPxTBHQjslG&EYb zWeNyVl;yjxT5X*=Znq;dJVG22!ikzxLrO zL1N?}kjho!+?d8BDd`026q1Hfn01Z}xK;7l&Re|lK9rz@L(fAg>2Kg=!D9MyKjsL9 zME{DhU!h)nB{@&pPZw!S3G6%r%$e70Yu`pmdeSL7GGXJ1mhllxyvEnovQ0tDiEzm5 zNRS$v)hhBggtP7$kAxH&Uznfjo<%;}vhQ?O1#3MOeVv%XQ^CDtQ&YmsKjdGXPkipa zpv>y~mfGbU)|8XS4X-~TYZyNQF)6sH3QD|k#pGEAp zT)Y6re@hN-`y~=xOiYmC!ML$)MxTEHqF3FCs=ZEv&GL!(S-->}Usr=&u&z&h9eL5JL_Zo%1uk9cI!pmAPlb|!`L2ZCPssFAEeKUxMLDAbhq%3HM*xKJzO{$SGE4Ow>r;}9uI)0WaxDB9vs&VWant? zsJ+7Fb;99izYgO#iti%S;7g&kt)Mm!bEY5#Bbatj_Ci@R*fAER_P!8a6d>8UV$1-V zoBo{UNR)v}JQ#)-_(O}o8JM%Y9^7R#gf+F1CRPRbCxKBQ#iw zG^ZbyWot0K0Ku%UsKMv*m>-OB5_jP~Qx%x`So@8eRD3TCgu2r6sN|RTxvK8Hp^=f1 zfq^@1W|5Sb{$g&zr;KH9wTun&pp>yIQicz+MD`t(BrHLIRlRI6|L@<5{n$3PMTc_O zTy2=dGdWGUOw~<2Q`Jd`BftC#((L6bHt!Z=ysy&IUp~z1=(!SUVtOIvAI|A4(w6zl z8;J9MGvRL%D}1V{@)TlH!8e7!DY1ARPiH+bH?erZuWzc6R8I`{B;NPHuiwGqdOcs= zy5Ii!)0>A&;7nT6czF1yF#$>8TCFptBPMqCB`5q`N|NjSbd0`Hk~)1*?x+5r^!l#z z8hAe>=RfMU{Nav3@>-kL1EYM+iY8t4A80)TX~^O*2KD$_^`*Q1(B3a5#q90bhPl2v zPN!?${g6(QNb}$c(0M7e=X!@eiDcsU57V3EbslqGp|NO-9oxo3aJ^xOzv3S$KNcqj zOk_#PcfPhSo%gB48AVOS3zA4#zj!T>p3u;ki3kjgHHmn*q|_BcNI!Y7x7M9oMwi-K;QXrmv$;nSSNhlDqmPH4LU~zPP>%)-NTH_eh|OjF z&j)k!fIfIbNP{Q@TL@yHO~een`nNxW40}JaiOERNKO9Duk`$=_i19IJLKnO|-J{kZ z=)v_VRectT#pxdzIRi;?7+*YzT8ml2GnNzcemMpt6igEB+ltYgBRQ-u9?~^3vhlF6 zG;^taMhGiNq?OgT_dS@v$vfb4lLlUWjj^~cH4W=1wRYU;$DpK(KyS;O6JuhYARv;m zHWpL$;0OTH_lm3vi(yRKg4k+qk#!is*TlE!7#zcI-3(3$Xgd0NI~L>TED+D}1Cl{Z z;e8=pV+HCOs7k9h9reoX9wRInaQU&a)f1-ALLgugX~T9XTr0vQ`L}M(N3>#Xhzprb z0qw84TBqdEsTmIq(v$G5M9ly3w|<0~Fvchyeob6ZB$D`kp&UOVX)~js0D4EzgMTc*modfpHka*egwTMls8~EbZw~`0anVW1 z2AGcPkO{tHThrd0$7xYd;caF$HPC5kA+O0OG{qm>Aqh(9$m$Nl2T)42B!y51p;mcK{{(TuD zk7mhkvTO=%MvDV?`|D4LAY;Ac-Dc34+1}!SGt^!&oBvk`Y2`rrTqRF;a{)h$0y*e2 zSj7-FHaMwm=jxycNKF;ClY==5G3-fUj-a}YHnjfZ?i8@32wRO8()#N#IpHG%F>5QU z@r5+~3ArAg9^>ytwr<_JVZ(VM*xM4Xz3?u;b|+&54XBy@qh9Ih9YjDSw)F>=A3+&< z4qWb6u3Sk&TxNkXgXFbo0WanATxkTFwZy*L@gv0}c@pn;XwGFKOCu2@Wl0}8xrks) znoSH*Rr1<86NKl(`IY3mh^+yXAVtJq?)kX&xJ`H{rOv)c2o+d}x8bpz346N?>QsI_ z=bwT0(*5+18UK{O;^z{}!$NCj_W#eYxccowJw8Ps7+?Z@l2xo>~&a$HIk%p!H`2=W9mtZRPCZ{d>^34adKv(!I@O=4c+vwpB1 zD_*_Q$Wl9e$rN<|sicIcY`r-o?!K8t`9DTU2Vw-LuNTPrP(u>dIL=TRb;2-p;I#qr zRiG}UrQ#s><}*5fe!F|x788bwDVS_yAcV;r#6qrLvUdATxD$_Uh4AuY-v$z?o(L-I z@4^KP$I+%f)40f^Hi3bpK+MJEmVk?`WC#uVt4bom&G#Wb{w7=_^Oq2Dlyr{xMeFx8 z`T2mlKw_DV2X0dK7a>7~K=2*cxQ=y%rK#)aLBaziLUgJ<#_6#U{~Pm?}x3o|q-p4W_vjZ256CWPR3(0 zrmwCOx)x?;5^3!^%rVn>51b0I>bM%SuMf&fc(c{H`e3Hrd@ReIx2W2l9bPYV4MbOi zYq-c~UNf{6#@mGAAfF)+AkTeB!ws&Xh&n<{Z5z_hJo*$s9q~VNfSwYSsB0wNkdb)9 zD#CsxmLV`SIQTe#7SfrSQL>k)>~rD&Pxz6g#d~9rTKO2iN{pLb?G* zt;&IuyHpo#Dl%;fQ^zUPVZl>Z!gPgu`??>XWtET>-G{U=0fhyx5VEQ&OXBDPP)!NgBn~T~pm7!s35wUR-xrs-( zBV97WZNB`s;$_GRZrJ2tVw|54a`eLNj2ArNZs4vKZT>!S=QC)wJ)kFIBVl{NB2Lou z6^nLLwElQl5D17(U>(EkL`+!t3%re?7!9!cmoC3^u&XNU5Xh;=EHQCY=58lJNtepW z$>H5nfOWN+8t=(Cm}Vn$?A|tF=$cs-TzUu>0%Bryf=!;qbX3JD`B~UiB*B^8qwXRR z(awIiJ+El07ae#3Ay~!bgEsE(_+s@@-0LG~ELtH^>W;0hf=OMx6{jj4H$8UY6e^0E z=Rewq6^WF8dX69SmrPq?a-4H_b=C|w*O|6B_Xs9C8Mmt4Ogi8%M|3VHM;agRRwXlK zN3#=wcMh@7ha7Ixgo|SS%ig{fJ#gFD6S9mTntuYIdfmHw7PrNkeD*}OG!Smwvszgn zd_IbfWPfbjUS;i>ejj_>y{xqs%g9hzM9JE{Io0D$gfKSZX}u{)q3QvGFoOR7!IYa~8#kWOC!AQE!rg5do$%OlM97v$u2c~(hd zS&_I*FxtGC44P#tM{Mtyb3`N=KNCVz0!o~doeh5>QvnX5%foEj#?K5I=#MTCTj=fE z?jFt&)A5Bv86IYKpgJRvWa>zM*N6(S?da&hu-;54DJ`$H>HREjJ1EaETXM>BOwVpX^>ZBhDyeH;9XWgNyO! zx{Mb1gNcMpO@!U7hGi9&pynURYy2%2=ICGbxIR&|ncXDbw~Cga&ec{JIBhr-eq;bz z+M|zt{RYcS$V@NLzUIlBP5<^Ru``-Sh}dBoJMkr1+zGQuwMBM>l?!YwV{$iNBp%2c zVoctYhAnb@eyC)&8^4^Y)w`zt(H4H`M1@fp;81&Q={W{$LU zn!nWkn2&7Bm&1SxGh&7TP z=D@fu3hqd8z90zNHa)DlwW&1_bPLRtB9zaKCufsbX68-{U`s`T=>;1k;{aC2HQDUC zdIRa5ot^9)9EL3?acQyQ39u;tH{JMA7Z$HevyT6f`u;5U{vYQ(v&vg!a*QsG_L!4M zhlt00EaWfsqw%GgN(DGp4srZ%A~QxYvNEgU!4l=aNS`yKTt}PWe1!@@?|)@AU~FJ4 z?LW02?p&3>YQLXZ4J_{AohvOQq?*2qn4PGsoDVj9YRHe7?JP^t#f&uawtM)QZ|opr z1KU9(@gLkx5^i;pQZYh4CiRKQCm;tW=$a%$;wzgrV&vT)GaLG(QwWVv^Abg!0&zDe z6r<~*l)j62wHG&WXni63`#@w#vv=Y%Zd|GABJ8@(93+N~=P!XP`O9%73j!1GA?kcX zyo5BuBZ2D70$jr#d4;1NnXENm0geNaab`N}ewhE0j0QqvGeZdfE0#gxB{FUy+&?NU z8)6>%#275Z$-BNhfpCxt#}{W zM1-L@W+<7tqh*G&TBn1UFb8={jVplOVrhK=5fKpite^uGl+mNQg$QQ~coU9&`9s2o zG8JkuF{KGmk*=tvZC_)4wgi#_@DS?ZkLRUA435ppfM+!^6ut#^*B_hF3`Ex(-f;7gHe51S_HevJ4kMV-@OS zf(}QuCyz&r)Ak*o0s&J_mDopYIQ_*J;CBs(?I@^Ndz2x3#$ecW3)E^ok-%b;mz80m zceZ(12|@Rx+QL~C(mA(9CTOqJcZ@0NwX2S5 z!L4z!G6=ILaPwe5PR7G-!Lif$xmt$~9jZr5yHrxE9$5gPbu25qKC1y4pD}1g)|O|& zj~m87LA(#an5d8(VzHb=@0)c{D$-0#>R**%Gg9$TDTn^wX?fIl!}XScIKCMI1t0VYqz%(@F%SGZiFD2f3r`2 zAYA_V%SHMve>=sQxnr0MvOxKwVV4ly?UH2`1r1u5D!3L zBue5kBe#vWYLG!6x+O?VB!l(p*>p>V>?&e9n%I#d^D7OK>?D(G|N0#9m*Nx*)GCqA z;e4ao-qC?d3jv9+z}yr2)W`*Ht4K4g+>z@{7jW(r_1(e@@23g>Ie^ZF1N&x*N0ZRn z60zPcU&3M1@OgVY@j`m zKhD4%YT+IyOO&c!H{*vLID^lXQ2uQ?rLJEF-dRJ;&RJ~GxraFwCQw~Eq*9u z)-sJJc4=w`4sY#esi$tiHzWW1n>94Ppj_T_G^Z5{a5MH_3qVb=K_92{8nMI||Fy*5 zVg`z(86!mg{mG}6&zM;35EZF0*OrhW&LZc^awYZ@1p=Zyp!`FhP=d zhnYV(h~pI=Zn^(lz%^BYKu5w@;+vXD1Kd-O1o(gjz*4^`GK<8LLgXp0K}da{sJTIE zfgV>_wkWTbUD`q&Mzr1b!r!UGA5+Pp5b%QPfZLTwU1E?(6VN4wYw;BhI0(?o9^TlO zpDU=bpuN5Q)mV5)h*@M7Dryy^Dpgf^sOFWJ5l3bd^Nw+?nUcw6m`*3u15UCJy{h^4 zyuG>|bUJ;(110^{voDd9=kqAc*15j(n36`|&Yrfy7mLl4=b8`gs!81UbU z{5RPxWz%1DCx%hi!;6LTw2IqTnDH3Xb(WQJ3+2b(v`j<{esJAd5V@nY_vHBZ!Ebx( zD)JNhUEM}pXzg={pW{%VhH-*rSn0-)r9$2!l9D-yAg=Nx1w|rN;vw)PJZ^~Xc{;R? znCTk~9Z)E6fQM80p{aM#)78nhW*Sg?%59ZRf8P;zoI;ZZTgmQBrBtr4Auxq6)G z>xG1_EM+43rL!UAt$zd!UnFOya8HW2>pdJEs|Vk^Yp!6TmKQVL;>l5n|GQJ9T{=E* zNTh=`uSSfkY+fL`1%V^CdTa$1vN1-A(heP#qwe?JP3%Wyr5|%+61^NS@q7>tyb`5>~F%*farXf}_#w$r) za@bE|Az@)%9UaIub)$=83k~qS_Gb*Q@-x^dEG#T4S}S&PfCj@7+~)CopE%}|jsj+R zA}O&oSm3?)ta(HEtGZyX9NxsP#WWo6L*>J?d$ zygp$IRM%n)fQdJ>btUU-_>V#mdbj1M5}~`FaQGq`^8&;HY=rV67yV-feL`DLl!E1&WB3)3AR^oF z?k*{Vs9&0wHy$}5Y(Nh-;VBW`U1;|p?HIzraA;1ft-L9|0X_-_2keIFK8-i19M~2j z0YGpdwmdl^CLrKYIQQ{Dnb0n~b&VcaJ3^F?gwJTn05RhlK9}7&;NDK8!aI=c=kLuHP7o`9A1Y-^x#cI3vmoCZk4v*W z(B2r(WP2zs>V3{jWcW6;e7gSVL`qI98-c+QC%0NJKr&*v4}m!WGtznY6pVXtKS9mL zm}wO+&8GAW3Vb2(Gl|4aCRyO#{?#yx#lu0s-+RJpDs z?%SSAjTa1WFHR$(s}-=$7>ZRejpxpThm+AnW&y&v#*{1>)ypUXPm^^G)e*ZF=`ui~ zD`#~#nQJ2tGLLk42_$p9lSYBIlL?!f()n%vYiA91+Y@aQ20`N={Z&kGyNvzcFxViwIZGc3vGbbU> zsVY!ekS|bz(iOl?d$Terrnmo-6?ZrdQ)=1&)alnPtTV($%8oSx7!xw zjlAEVT52P;%#6&Wvv&y4_XHBboQLj9U&htGD_CvEVdpIU%Ml&m!BOeW)8fKCMd1Gt z5j{i1m>;5)4wr?owWxk}@zhIbF9C*!f>nA-`i@UyH6AC@_|=wy0{yrRA>q7#I{zAf zJ2vhJOggx>pE;qgAMV-)e^cEF?A|_%N*@-o0)o9H>dnKE74BWw_oZ1|=@ugG&DN4x zd^t7yhr-p2TslJ3<%V^hbbjBxHn=b9z2mFH4{B(y7b1Sg+(T@#Di5ae5iZ0xZY)>U z2H<|ewd}?>!u8;7oHF#feIRJ`1_XUHxNjkjEo;6({Hs$VTk)lI3BGV zITGO~g-r>~CNYXAx{!^HO)pPu;O3&!cX##uXAf$UrFk&SuR^KPM&s|m$~NiQ7xgJV0@iAZ_=&+stzV^csA2cOJVYF%;6l>T^T-}lA=*aQrOw=8^C$lugLril6 zX0P0+f0<`n=5IAF#Yj4YKE>BVmX@D=mu_g+jV_Fx4hJCzLmXz(wB6Ou;G!1o+MUEh zZ<0J-kuot0t+fCo;BQkSEnJ|HBj>@blUmiuV!uk-hM29{J~z!MU#rOF z>nj4X%`bI6kX+QjM=vP%90qh51>6wNFV1*5Ng;s9O3QH6AEQ44Io)w2R#kjE1j1ak z8H3JZlmi_FDdsFI6dm*R9vB$7bLY;3Nq&uy4SENVc(=5-&=|eCu&G$sQXCf)ui3y( za(kW}w~FXzdfDrTsB-tM$Rf3_(&KHKDLLKBlZ_Z1N_^*#S~3Mcza$#qt%iCXtiG~) zgh`_pb(Lji8lNrMf?*1c;wB7FWKvZg&NMbhLv{0Kf4qv!z3`as+W=w$#gn|$GG3O` z{)b3k7pDZ-&2&hoHIE$Wo7NNxdvEaLqYn~=e;V32z#hil92Si~T)R!4arpnE?LFYJ z{@bwOi>Ra$O(B%1tkOT(R5Fs4m6e83vLYi}DO9pTGLy=vgoq@RiWCiGh9aemjAZ3G zzL)Bs{GaE2pXYtv&*%Q!_Z^q(H@@pUkMlT=lLmJ>Mrvu7baZsu+uFp!_FgmEaprb; zy-7appj<9W%@coa)H8tn(A?#yz^#&)&?^(h_VgQx<{AWF^VCXHL}&5|_)sFfUQzI$aKGq%csA#C$cT zp#)&+P(onJPx2lN#x9|Doro*jd+4gLx%O@YqCrw-qF+?7!ueE>^8`AA0?uE_PT|CO z4@&kG6%`ToYtzvM!j&)KZEkynq3Lyq(mZaYD({7(rjhptzY zEONiQgpf+HoOESjQ0Jtfcp;eSef2X#f-gv&$-gO*&5mrinEdE{$TU(=q8uQWitZ>8 zyH`bAnoVKG6m~iAZ%%}A2S@K^xXOK0y>Ex;06@_3(E zqyA*cz*m@pgB&qpttN7|bHp3B0ry4ki(x5U$ZVhoPpHfZE=|&{ys%+gVV&{`O=B;f zVGTM8CWqWS^p4#R+;gY3$0w8$cjr6Z@GK9bT)ul>xgOtRgR5=Q*3h^Geuc*=`a*(q z5taP-%~6iacUSDF`1P3#fMY=QKRNJ!SHd=eW?6%5J%@2OYZ?4jQVx=1YX1XY1}XK* zAsjl_nq5o|XumNm6J10jhYfyj7izO)kH0?W{zf=TVDI^Or}^5l>OuWo^YDHY@R1wv z`bKXN@{uf5H4v&O7LLn2O1@bcYPp71ZarqeVRw!YskS|zC;!x_kGxv)r`FiRN8Hy% zAh3Fk#zY0xj2R8~TZ3QA!iR6LMysr##JUsqw^!yR+@^<^64%izdE2P`p z^^4vihCA@bDOPmxh*O&U1wZWCovN+JBqIZD&9fxF?mlC8g1u~`rs%oO;GxZ?2cEm+ zruaV8pz|)p`?}6*ap$xsla|)2e{+Rb#Jcl;H(Ai+Kwsc?B2lnu{0k_h~^JA-5rDrLSyv2m_~a$KODe~$nHpW2AUy=xROFIxRoP4crE%) z<}2_>l4A7j5q}P^@*Y%lnZGS|pQBcZDD-J6_Hd(ae}VFRQ~d(|HlY$!T)`h$qj(`U zR?yHq@ymKVan#eMW$xEX0N>7>yDUK{w=uJYvD<13n=E(F-hb}A6>Q^Hg&og`$^j7L zfdr^~C~D`=7XR@C{d6DB+bq^F{Mlq;1dSM07hx`wg4ynk6*l4MM!UGB66SC?*{kni z((LB-LOcT5wTPS6Wa42HHCt%Uz1;hLze{Ss@?u8LUw33x%dd@st0tgx*& zSbUFhh{v$tYsQlml3-8V0~%9aXWllSG&T^t)3`D}jP)3zCnABV7I#rgug_i>c_I&0 z1N<-_Xb}Qf>9O92YIj;ZPD;^#5(m}x6FU7UMeRhJX(K@sr#iDvV{T{WWSKrFb&QRZ z;kbAEg&Ifv9S-ieK$p$tw(>pqVK_`C2>ymht_$N zd6|LaRBxDAx4rc11X|^)%s)RP!tB#TL-P;a@gcT8`6`8Frzs0olV$R_?nWlBwt3ia zR9g82jG+$!Nio|8ksnf-;IeqP8yCxQ?&ko3GR(0$l5Q_i8Uee9WY3sQhjq!;*46h} zsBz$94f>u#a)(mKYS5!!Xl7c^5CpbUxdEXExs|1T*zL1+-!wAav{s6 z##slX=&xPNLPAljxWfJE#kQz5Y>JMhqn~Z&-q(6*WM{$Ht$H&5#ACyB2AL!|M>&#U zS!IFK!IG%v;dg9lF<<*whbRaJP;8byfIrDb_wT*9e}=b8rSF|p;;`Em|K1vcs;=H6 zMR-nPd4+O=(h&`5Ogw^bcX+k(cLsunhpef;*{w%#S2SZJ+r|xO#HfQBMQKycB4L4KS5!5JfTB2W19@`AfFBk_V?TOPDdA5n01Xv)| z(2d;|Hyh{I^`8rMvGs}+)ni0;yLL8aE^vZp&!0QG5|jMRO-I9aep<)RA1R>!&htGn zsLM1{iTW`|dI*lc0KtL|d(R)3ad5xqHq%Z;P*8 zSB?wFJfc0TS<(4GspRUJ^Vwa{-hdM!CrcHGr7PTvV+9Ys=WF9Q`=Lg1>xxh-uZkG) zS1Y&J0I&lEw?3b`{?g|zTb}@E2Szat23zBl$cZfa@3_s4>yzm#T?MYB$l;jv`;NcX z`rve@$;lAn97NSlAk|Hx&Z}tk;|9}-QJUa5+*0Ucw;uQoC*^-8PARM&Rw5?Jz`j7&BP{`P^3h@q9GgAsb zu-ljvUt4hVnj3ioe4Gc?upu^C#q`->Y#7rBCWE9Pw>l5mo!Q7a zdN!*;Vu`)TJatT?y!i@)aF|K|Q7vT2b?nqi3LO%hTOY>a`&^GdTOw(nKG@KswPATR z%22xs>);%Il~g8ey+)(o&UmT(dmVs>@UcH7cV*V2bJQbP`>X;v!2qYg$g2YDhm3n< z7ntN#C7`3k zAN#=SVMEUuCFyK)=unF1dyha(W7*ZMcqj8n^aVz72Z`^l>Ght3`&tR!oU(*P z7cj%K&~}}|*JZw%#+>UH(G&r9)-c4&T)ttyJnloLV{77{3Eum*hso!J@$|U_cW;3& zDnafQQJ{hMEW{I^`-Q8h=^UtuU=0<3urgl5yFtOR=>Vc6C5-N#dUV7hN;QDt79NUE zue?3~y-f{3Y-Nmin5^gGEyvGwmMka1u0M&yH8cKCbE^AhIzm3V2TEwYZFh$srItE- z1^+*#d&zEq4!b$CnbU62Q+O7nDf|DM<8|)Q)YL=?zyGpZ$&UQ%j-GTBo+W>M0y!H@ z`xiFEP}8(Sk=cb#_?|^mKK1S$vD-wNgqE7iGq5qMqhN++gKrm8g2k@$a{rNhp22cM zcTXo!4)#Z%MLLIl1yQMxpddA)^Q-15`6GM%Dm&3Hi`sM^n&u)ATenJ=))0MJNyq4+ z8Po2f9oDzu8Ll|ta#Ec@m!>(A$G*WE1}Jkl#E8XNG>t! zqVKYQ)97==k2$=~b*TIKYeh=Ytf>!h84r;BtOn)lI1oi-_HJU>@NXW?p28PW>W>N* zrG-g1Y{4Jn7x^pkx;5omySWxDL-5fiu>sK&YX)ggtXJ|F=|cOLfrT?EE+v27I!&Pb z#V0G=tPslh+|5KU`hvCQ(ABQT5~YAWvj5?f58siCot1M}(F#O}j6bhg#H@6j%Bkm4 znDS&Aw8{%1Ed|}-_>@5?Xlrg7OW~F}GrTovUhP2TtvrqoKbjr`4(JFIJ z{(Q=4gxscOo`H%`1KzeE<111h8R;~z%Q1389__5FTSIyPTJ@uJe+UBpx59HuL@B+# zf8O6par%`rMei)XmarQ=0~Ns(#m3YWm@RIAN2!XN@=*r*#MxGA-wBThoSYYc8B~Pw z3n2skRnLWn_{N=G!ieijTLXpET`5graK z>L8dXsZIKJVuGM{f*2&}*uHkwlD>o6R;0c}nqsh;)x(`4C%9W@@$Y#Uz@x3H`4vP? zg<Q2jYi%u@ffd{I2;am(##h#DC7;pLf(V1mn*HBJx6bhTnoxfL1N$n z8k9jL3Zf$D<1hlB>V?cX@Ao)uJ|$HZ94dNid}|(b#A&Jj?o%OcD@$^CLZ10_EqzK) zN!R~l6d8CNtW^5`BY=h44A>5j}tpjVKK2>y8y8bvDY;sic# zIOX#;WrS^EbDoj%HZyhIzx(%#?~>4E{4?$6vw1VRAY5?GxmoI89lhTi>zGb-Gj7~= z%8diH6g`*w{jYfeT(MT{-MbmR5jr}5Y^DGu-}|3o`s>bsXa=qllKK0KW8F&7j8bag z#Wu^ld-ujLu(D`3D}YmI&F0NRD0tp$InH&UtbKRDx~*1evA2aZ=>PPr=T_TChnBvd4EnD zpIpWj8Q63H#ZI8#@#ov9ea>AWCC10sK}ySSAo);v59R@>S7K9DdcQi-DD&XUM3c=v zAL;)4+YrHnD)VMJGh^{mPKxEj)YLytsd`9ic9&F*SBZgx^=s>v~>0oLJcTAC9f4bS4{u9k6@GTiW1zsIr3Ui3H|inD@g~) zBL?^T@870jhZWg>Y((jv#8Zh)pqcRZ%1ZWu$MdHXOft)U*Jaq{x(NH!_V^XDxG+^%ysf>M73+}#seNTbE{@RJi!X60rkHDAGVo)Ke%r=aYaQ}&5?gtlhsjVg0}9+v1&it zHPfcpOG2oHP-T!oL}LQj%?>L`w5OKwKQZl<$R9#x4KC$K)w1Mt%x!6#Mllcei9QwT z48CD>@GwnL#FW@&IqLhKAEGPX?^f) z@KTnHIX93l)j2TQdL*hNWU6nFWc-%XzVSqLEq0@eqMnNmI<7>P8&*0F2tB5W`fX}3SdQi`So zat{s?$Xh^6^cUovt#CHl;Es(k7S^C{?fN~Y{1r7)g_}gY2RDqD3!_GeIOT>~sM(>H zk~Duks3sb8@y0Jjomx#<`4GCMLp}^io^*kqN^YKK?ziQ$^HPEX-=>~0rt5sZ=Pbu{ zm;6neS&K$4-1KBoHaEW-R%aT8Bw}h%`k?ee9hx6h;ay-a)YH~xmvMK2Ydzyqp(@Ed z-{@Gs!|6W3(R1oP-jJx>p78d|quS!rcMKgEqv||U@V#Iio@XRXILf{Ke+G@~Ma*+1 ze)LR41f7K@t&^YD=O<8&EY=p(Z^)3G;YD`nJR8q=<;@SCcLUsN?jt%u>Y9PM(DxP% z?UG4g1h}Z_$PX}GQl0uWuOkjJUTbgykY2n=H%YgC+WOWOM9Ez(Aa}-@TawkS*tTpi7|{Ame19Ul zgGV>t;h-CX^9}aLu`{9 zLr85=)NA{vJ)FGX(|@i8@_w9DFUmCnC=1l?zX0s`Jw098c{9uBnaKI4G-!P61!G6 zX4@}wUOw9)xx0YQtJA@tvzGwMW>~t}Qd7|Aj@*R?<}JC;zLl5ph4iB8 zw<7b;tZf({Yb z#C%0sqU55H))-x#%Oud(4am6B;`zlZ6;00zBJJKheV(gzC-}|!ws2=#C!m-h!#kzV z^OemBwmsS$(qj1jD5JCFO8?-xIgE@~jQ-=HwujZRBkD%(Wmv7Z65eCs?&trJR(@BE z*~m7V(<;U(z+L2xbG_b8BZBVH-I0V1({_d#lfX9MQM=7E`RKCUu`gPdw^ppw;7BGN z7X(R9q22^*!Mzfp26uFSc&n6qxZS+*P|?w3?sWIu=MoP%@a81~iy6CZm@(m) zMGSRx!q20EN?WvTO76ZY?b(v#A3Nkd+z@!Jh;m3vKiUWyBpZ7&?M?B!-^S1O8^1dc2f-|6dMzs|QRrTm;WS?BDYEa?j@cIGkN{Jv}U67H<&iNZe9XI6NJsApZj zc#*7Y)U7~;(@C=d{0xLp<+1>26bc0*AZy+W?j>Rq_nx=LU1nPTkJ8)EV`^`Z_34Zv z&$dJRy0Y^HnVI%*CiCfh{k&Wp;Y!~gwNS69Ccg3wvjJ|msq@-N6 z`p@W+JPqhAQMTaY=Pw`bZ)B3{l_Th~sP$d< zVS5Y7Zt=T2znV#6Hvae%GA`da4%$%I5G;wmV29TUB+W0r80>ltLh13#A<+OX7q!`PRu|7jroi_$>J`PHiR&3L?CBsEqyVV@+;@H;Gh<(os@zEM$={68QD>={i z?kVyQ_x6X@HG*v!C*3cqR_+ zXqZCkr1)m+=6o$QEx=GP&ABUY-p>wj)p$x?rhr%i0O%{e2XZTGA$S2(>2lB^k~vOI zz%FC{p?~Q|hkabk&p?2|UO#fd>C)e^mH%V%PU012R}s19YsLPQX~}%zubTMleF>g0 zUm(O?nTzV%|BjTIui)UZuFKpWzi7r_?LxF@daW+|Y1^>&w1?A_0J;iVDYc}3#>e$j zJUX@$I_nX3-@ik+m9E(MI5+I4%a}J9zw-c`X>JC!XiU4w>q=H=efJ%y(V)19hOIk< zJ_rUzYUfSMcpuat;K3XkKneK-pK{`TI7c?4E{k@Bt30YsDgOOfnj66~j9n2N;rM_SR!gsf{1wI|I366-qlL~tAva6E-_00yGF$-jGn;?G-71NU?f-PuZ);> z6AOg(_4Qd^;WpL~d>K-`bjSb;7VbYg?U}xcK&-`@M%ojsF*|qj{)lCG9jdD<;$&EsPFoTFm%$y=_NcCbg!jKckPdGT)(j~R zo;}m=!K71}=YR4W+92KXH7~Im zBytUpjH|CM=7QA3V3j>>Yw@|@3O{}-HoRV?=U)H0h1D(M<8d@opNT>LW=pmh{pz)P%RxL!`lCl@R&rdkI6RiL?-OZ|?q;y?3bAF*8-U7)C}zD46L zJLRGaOjxQ-OIT_tMVwO--T!MG%Ku5dFJKL!O18Iub$PgZ@88A1fP+eVK|ZZT>IM&mE@FImxXsZ?W6ED(4aHv@i5sC`2U5Ow)Lx z@=77Z&a{8d-+xkWs)4;jTbpOYP7`K@e|%IkU^Hqb&LY0TW4ryd0%sHtVomJ8E?AvV zm3MwtTMP|t;#q5Chj+g|WYp4>X|4PPXQ{ zkS{{OG>5U; ze8S;)r&75u0-6qMkIVk&Wc)!#E&EC1rG&nOF*}0!rqq#mMN}#7pd8(205!0#t|+)N z_3o8uf_@+3#Zo8J!+}hDnw$v2REnhytitdj!k=BezngKmu~aT_ zzoFe;htfS&Z)`G~2L_H}-4`_@Z1r9agU~z&i>@=-!L$(>f?M}G;5B?5f?dE5l=Uf9 z@}RGvxLhh+vvrOOVtNG$HsiRL%q1-1D{i+bTMbJpYy|cv)C3d+c2Klp8H>l&N z(o-DQO)hK7Y~l@+e0Xv-%ZZUsvQI8z@7d%=zH4MygWyF68z~ZMQ{E{gUUZ*N4E(62 zcz?dMN{FsQgR3pBOu9BXU6+ALp3+v`_;6}ngMyMHJIwVM^gVQ!Q_Z3FKIwFT7@o&; zQ2)@R7Zk_;!IxnAgB?A}{;GzSYPOW12BKd0`SW73cV+#OsMKhRw+VR!30)KsP`l*I z5s%-7XZeiTbbUOVA1lV8*nx_X? z_lfiP+;H4<=nd%6!2{&W#o$pN?M|C*+q`Gb>Kl&WF%h9BABAd@Q1S?c5iZRe)yk+J z{-i8Zz$D`m!VZnE)h2NDbC{3^#4z-kYKk`e+GIheg>$<)%C9Xe8--* zu>fl*3m_JtEZ9s9Kp^?24pi>3){{%y9Q>)jC)0GnWIv0 z3=A+mI4BfP;`mS!VZB!iWjWj#beC9SS3u!5H^=!fIqOR_CgTCPo*YFpLKW>W^>lm% zqfo9l%*uL+W|K?T5|>`w09JXsdqz#F0njK03=Q=i*Wu+*0w)*6-JdI6TSq4)y$MiiY+RfgUS-if z>H?6PO;cXGg!@(=e))aW3WG&OZYi?`ryS@{%gYBBQz$%~lNWz5N1nOCY6kAcZX=Z= z!4)c3LbcIMh?xBQ^|O5gsB7MG@)0RnE!5u^Eu37M@{c&Eo^URcrA+UGFx))WZ%&J^ z+%s7zEIKZqjfjJ+_uk1;FvW={>ys+{`L0f0;})4HMcaO^btq~mE!>m$_XW@cr8oHJ zwt&S#aVe#~X^JG~0!7O7=ipLkM<6fSYlnu~%psPWz2Yx3%udpC|=^mFY zi3Vw5K*sb)i3r=#&)4t217QY2#W^HHLP3;B3B2onj+FsO#p|-ru?YnqEDwgsca}&# z9~UPSbpz9me4e6CE!0u=ZkZHcQ(7kJ1{47}5i*9>L9_jPuu5J4kFuzUiHeVxeJPA; z#HuJ&BFg>@d5Zu)|LY!hZ2Xf$yWoL&efbKZh#3HQ%5IIP& zgw3$GST=dnE~q;nV4;{-#@+CnQDuu)2j(dHS!8=E(opsvt-=L$Aw-z*AW`P!$rUFIp*8* z$v^c0nve2p+Q@ZI1{**KXy^697E>^8avHg`G&wg9HaG8k2#wJaA*1AG%vC8f>h$cD zLw^F9W6CZ881z9;NU@m36}FoS3!x{881`t?b7NILC7E! zT~WZ(HDP{SR}I(THZ3_p#saM_d9U7#1!m^_Yt~G8ilPg3GC#h*Zyx)w8OV+-%*?Uf z48jDER3NgM1Yg?jLrGrq+tD*3wN*O@W^poV#w{lY#o@`t%d~7`!}g`d1E?_{3|x0x zY6`DkdlbFOe!9bxZu387!Ir}O{KR-5DFZA^mf;w|H zF5p^l(?6mHuiN!TuL0aLGqZNo5v!Mg#$NIYl#~_=JqeDL?kS6HoP9K+h*$(=YQz(L zLS?e4U(rW#>Db4?D-xqTF$nZ_}Yy zZIFqCe}jL|tXZyOg#m)Yu2p01%ckUzLOj`AKhSln~k~FWR)2VYoNiKxJxgLd2VJ| zqOL`dc}V1Wju=BSgNf{WLa&@E(a5ym8uNS@`x3g{s-8kL$tu7@|2>-@KjX#TQ=4v5 z2%Izq@Oz|@eHl@~+iA*geH6^Sjpu}NCESI8_og^8aD($5ZG{$k<`6cgyGCZwGi-$l z2VF?wMl=I>vraaUn+!ldARg4PSFJh#YFR}poFj}Jt94$zMKK;kOW!dSn-HeA1($!X zHYI6`U4tEYArx=d2S4|@gCd5ZE+F(GpBb#FJ@eAD?9ZuPwy^=OE4t}>1c?j!-H(Z9 z(Bq`=ll1<}^S?oXlkbkMT)=bR%I?=nvj{o4mMG0(vzL>)@frMCrj!vaOCs^TeM z-GVjIh@NgT&5Fkjvn{J}Bn(C)-n=$>RII2)f<`#YYh_s)ffAqNl)d@qzWo^R`v88Y zzkT~79Ah_{&0#}lKDnnq#->erW2%pZ+#(Jkhzmabd7KaoT^d{INC{`aA&fjZh&49< zJfiT*pTq59;FB!NbBs2Wq$ti_`tzi^0_R_0{Y-f#8YOm?TH+Oq2OLM9vVd)Jrwv-X zWC<8TpEW4~dtrQmTn_n_obk`osE4#~{Ao>ML0hGVeF&XD4Yq8dnc!ieXgry`j}4kS zI-6i;is^u?=i@MCune>dRCGrG&LRgfEUVl(KGd65%n{a=dcNUB0%`WcQ*CT-U5jS8a;7!%;`x#CU` z#SpWOZ(ie{O%R{;yt-1Zqi38Am5Ww0?DHEZW88asaN&XE>>HxYOet`G;c2%!+XOag zFuDmaKr$Ix`%T4h8Jc^&1PfUwKdVInZo@)3%I0C}HxV(G_mL0<#Zk3v6$4WDHERWH z@-ASjMq3&R*SVi7X9K*1NhdBS6?UqqpkK+E+e&v2*4}E}&gYoM+l9wPp?WP~sMWLo1NKpfCn z$`C6H?%SEIG$28`vBiiTv3xS2xF4;bK4V%rRqN=Dqb-+xIyxyYVQ;7^3_Kc9iQ*y^ zw{U$%oy80IK|--b$aLgL#%qziMc$xssca2v$64131zG4xBtXiyH8^yy?|unWyK7$a zXblkn-Nmejsscyr(HqEu(wtLM&?@LERi65#%oFF~%{y^Pk8*;ex*|Kh*uBb6G`rWMxfk1J(ZV>4FR?A6Ud{ z-=EBI7ONPY;?_=B^L&E6G2QkuK_nTeMEZxNed4w(luz8-UP1(%p7-G*XjWx=i2HGN z9UPSE{7YV94y4%>voXEd*tX(fEa^!Ip0W^(Aqmlxo2gRpJEt;`n+1~-;cY{{b2?8j zQlK%+n>TN^b6+gty$LlSWa=7%L=+wBgxiTCsH5d2l-uCeZJCGH-FI1lc*$~a)5MF0 zqjqwH8DpmYum(9M+n>ODU|V8j)(%0jUpL|}pL?g-({KsT*^^RI^MDHk`%~}WyFCAQ z;)gvTZ@;dL52QNn!VKQK+72}{EpkB3!@?mIyW=ll!Y3>UG{>qcIiziyJ}hP&?bblz zCi(vCM*L6glDaRjW4RJRZW-W6a(hdl-xpT*5`b`WMXqltJz3{wY>9Y@3HNzZP!E8)* zlP?!*+G-;@1CA=d*rLTvl=D(1jnYi&so{P+DzK6cg$B%ARFO--jWZoa;*FM>cDje6 ztO3e|o`Lh=x^l1@9sAUsoW6ncV73q56&PK!NaL&H4W%tSOYGlsyUl#?WkX{CricFc zwjIj*F^I6ZQ_&Wslu`3SZ}s(AU*o;k=5*C&yaxe!7t?GiLtHHF_=GjwYGtATU4vM{ z7w7VzOcu<0edH6^fqsCsgFC2iG70G-f;)^VXgVCyu19lumfZVlC%vs3mle>@Ou{X` z8bG2n^W;R<6KJGm;WMi- zK{+eNt$S5wPW^NNCFA+Yq6nU3D@v#jb@z_nYaTY2wj{@rE~jnF?;GYxMHn&^RR2uD zkg<6-j|W;i4YT0_EY!FoI2k1)R0(1@n>MmRhg1>N8zskrt}f9%nspSONlYgI2M_^V zD%u`L)s@EZ{;Vh8Uv;TZ4@U`eUZ@ncG@*l<%SBw!G<_o^N_% zYHFHXd)1tp2O1dEPr@<5Tr1pri&2xquhvBisGPa|*_ftiDfK<0vcS0XsV^^@$nF2h z@C}ceB=}NTe6`oFH3K}#Qk|4pmL{OW7(UP2gc?04S)uF<=~FijlQcMh59Y(bdq9m6 ztU={I9%QMG(WCMeO{j;Dk`flsZlSl#yRyY!ZU!g9JQ0~G7zOkUHdr!$Kt zV=o?pP!g7&>S1!YrD*YaDssvthDjih_+%=oe>Ib)cLQ~aG1wM)(^wifU{{5!1m#CB zW*vHhDVc@o5h7Ic-GGpgtC|<#qH8^BatonAC&1iw|Hq1rFxM0<5 zE{d4JJ3e`m!gk++gtJ9Vp#Og7@Su3E+!Xk9CKoI9c|09Zv&2cEzEcF22q95{_d5CA zRc&rJc25)*NCtH$-kp&3l)pVwzcE};j?OeYL0~KYX`OTR?@n<&qL+3|Np?IbpSCh- z(b?o12`+NwnycuyGv&T}JmgHbb)`n~>6K^XxLH*Rjgrp=>=v^%HQ&T(7YpK=Y z_^|51{KDA;XvZAEHdIUmlQ$!` z!V8L!)R8AMu^rc0oL$X>4p<=sAwBPP2D=sId3M;$^@%%N)L- z19@oI18sJ72o-D@JB)nA`ZU(lBx&BA$f^oyc&2;u}cPiPnz6y*THHwMN>?;UARz4GL z0#uf1t*)-FR^)K3!ueyVH)g`p07;gp+0*kV2DG3ZS!Owd)eD!$qJy@py1ED}iYrR+ z1uT(-W?W$sqLf@1Gi8M`<3}#tE2hRsA}PL#YRjW-~7 z^w>$m^$oGo8LZ5YOsZTQlUM!_VQ3Qtq4}QWTQ3xuE1m(sbsG@~kz(EBg9fYX&WV}x zLd{%}96;~FRsH2DcoGPdb&=UZQg;IeAL==)cio+k)U!BE+dvR0#Ce#%T0d35#*K0Muvs80*c>XzFpec4CzgJ;mw?Ds?og1 zM%v(dpFv%To1T?B=~ zJEAPVMf+&2CgpP2X{*6G2Q>cs7gIOB#s^}Nh%@scxau4>ZUO=-Cu`&`P#N4biqu!nz4i`i|d4UCM8 zg|NFE7dL=S!^(xCk(H(v=hrJ4iLw_+AWsz_SIF4t zbQ1P1sRc=VZ;TuQ+n1|Hn=lsN(BD+n4_8}u@MdsIUEd)v;lL!rcx}B;*Ixg!CEHR= zMF)u;3(oepz$QqeF`@X>EZA`2n1EC`0kJo13eOkp4Yg+I#p}du*$o4V^QGHE;w9%< zqBuo>GK5``gszN?*l+%*iSp*|n+xpbbU)n>+XZ-@hLQeU00Z|dUOsyL+%8>R zUdhxS13k^n`o(qXQT>)h)qBAD8vvLrARY4LoRL6X#Qf5n`jOa< zqxwjY9CEwcFMFkyX1~W34RWm3GR-OfT=frG=g>Z}ZL;K14P3TitMvWGuG%lGHbF(o ze6c)!1cA-E_)f;P=$1=dh)(~RRPJr3lrBkaCm3Env-K%4Tj?R8i{4rDn>P=w$?FK{ zFq+rx?WWZ9TtdCZJSHrn@`zykv4g`gE?^4$9MzchoL-t z1{#8w2o#46jEt`9d@E-Y&F>?&pg)Wg|3M?1JNW-vX~U2-XnLMjiG?_2&98A?KEyp% z(T-DC9OpPf4^Nb3mPsDav-7g_*Kw1w_7q%ZxR6+pgYX8V^ibq?vh~z%otbl@mDjK_ z=|Wwb;wgdEt528DQRWS}yWkyhJwk()bv>}&EZBQd()p%QH$p(n9PRPe#G3o(O;7}~ zS4B$-d&nrtuiP=oT-}swL0_n4r?&_6IA%rmOH0n+;X)VZq#pW`6*1?wUbel3*{23r z0~Vqf3O%U)HMkb;vW^Efo|@TlNqUskehuyXrR#Oo;`E+4&^<76b$kIS`!QsAnfJa) zZ3;Zv0?l3Md?mAEu2Lw?N2GCBcvu&4y0p-UA0A$p@=E`CALs7EeP5+kZMb#nHW$r% zoc6kbbU{IGOI3^X{d%I^&P#O!STN0yXj$>9#oXF*$Xoo-FuPuOe#fPDhj%okR;5%E z0&e@QLYD=#thPy=WU`vIO+9C~vUFD6*HL(xph$Pi@&~VCL=KT(7wRdZmGC<>k?NXPQ}+nLbyi4H0`9%N8&Cpr8&RwPFj7`?fTf z+m5M=yDfX+{;HS{P;Zb>B16mLO~} zF)Im>jIeNJs=j8~gHj=rdoIKXi=Ped6>pw)?I!&XN)uD}%^TbzR*_wgx@((-#`|ri z`cCM@T-UV*t6HzPe~}cw;ifZ%I7!tdYG4sKs4u^7SkdlZM*mK1J$<(m9`aErBD z_q#=QHF52p9u7gFN8((*y>O_F-L|~(`g2=nw}zhFZu|Fw@BN;gnoGq-SMpslyS?4& zA$I^~KZ9D%h4tn+ppleyJ{gw$focGFosU!X#l^kOOu)%E&AuzLao5cyJ2AGAi)#_n zjA90t2eIko;y`lKzbf;O`uMsg^;dyzy1kP0l}~esS1Am$u;c6IQb##veBk<*yuF)N z)Ynz6-^T1NR+wSmmoMSTXJ$JZd#2f6T>ZJS+xthJ7G>!i<%PEO+nw!KG}-PRt58~D zR4+UK&e&)|j9z9}c+}Z@M^I)<@`S(%oCz~wom{6bgSEFOvR|P?oNaFFsi#kO z=rsReYK{#(+?aK|d990XgwtD>_w&| zUo0+Mnk^I4y!((;`nhIham9!IMz{&WZU{R$0`<9=2;Fq7fC0@J$K#1 zRoRqnUHLxMen{%JyP6b?n^~!b0rF}rWR@?nEu+slp{$w{vy?Z>x-nN%SDZMoV_JjI z$jCZ|%*D`8SNq$wSM$(O>4&z2eyy=f>q(qu#9PzzU;~Pp{3kPO1%-ri7)f&$^he!f z)9BLr;AH0GyO85lWuTVvdDz60uPHZwYthiH@iCbqo1A2^ zo#;5571F15TpAx?HDA-{ck9lK$4|@pJ6+Csi!P7rNUZ)UxrtBzGV~rAo$M$=p<>v8 zv|Lb6Qq9vNOR?y~u>o(g_oaR4c}M7&c}s{x_k{IX6Y5L)WMyr#$keftX|LAy%+DhZkH#${n+lr#yfYn z#s!aTTc|XPAS$iB(837v^Jf)03SGWpv?sf8Q>MT5k4!EX#lVWeC7xKFaAF5 z=1tNK2i3&tJKsbM&}4VdxNY@hZH%jZweFSgADi%%&<(ZQRKd$ z8&4l)=(%R?`K^sPOB z?_2UB(>14XN5CEirCGd&q>(G3qnO;F$ejiF2tdQo%ZT>SF2|t7?<;8Q>D|E|wnw2{lykPv+n6S3Mj}T(L zjufOUkc9`Z$=#zVKN9F(Jt2hw|6ma43Q+gWJQb1d&P6A3y38D86P?|=m8@ngaL8Qd zi9>!+ktbM}`V7A3BO{$ZzSt7+vM-#;S59F=-Gks-w9ie(gM3t3M)CI}h#TI^$U|nO z&19Up4XXUM_4+I*rR6nThVz_%?b>6g+Yj?7Q!)^`M!+Ae5-K?_0b16*1RRhr$5WfJ zKA*Lfi}8Y0D>35C&HaubZ|qA}<$&WnN?R}hAKXUd`SUwt#6n~p6>7*Z3a$p#`vM40 zE=iT(JejcynhY)tJ`Rf!BHzM=N9I=CThqLXIo`_eYw9o%AD}c2W*q%=!GiW-o!jLe z@4WsMJ^x4}x{-woN_oi5uYM0O_#*t9k&pGER(9PWd&k7$dAo3q^?cl}R0sFK4X7-^ zVelxlu_L|}-goVTX}x0zr3jdimGdVE!NKqwZW7Yd z%avaST30CN3m9$Ux(KStJ1Q0)5CsGkjMIelYu*x9e!*A$k{r2;B0QJ%=J^Z2*j0?c z$%eF7cSiLAeEQ74Heo)YcK^bcvZOXOu3`>Pxy#?3AG&8d*LkISU1c zClI=;4=l0^GTl<2zI?$E(D{y#VaU<=rL`ZVt2%;mllk2rl83rOt}DOSnURQYs$BC) z(!Jy<;w)m<=daRTzr`Fs|P1&&bslGzp}6PmOuy2 z$?r4&2ECymoC!?+p%HJzd4!=mCG%-vi8LhXd6sgq;6pE%MP}kg%%hijJLTqwu-;T3 zp*s>}YSO7UFPU`GoqUDnFcHbUZ}jvm<{0+o-NM<|f9Kd9Vr<8t9D#S+D-by_#cFPb z-}9AN7583K82Y~KHZYj~u>Yg7N=8FKx6nqT_yLBe52QoiGS?*4x+KnsTase3 zDM1aiWB(asqC~MyXejDhiR@qrBu+tKC4j&OIs3=(UBQjV?ZH*fJZFS;ryKWfL?o|* zP9;O-w8fKGuk4q_DXJTsZc;ny9Mw9Kq^@znP5$$)#8pd-nwvbg?y_p!&Ao6};gkng zIVgP>VIO)apm>I5@CNtZLVqQm6E?XRwAT)u;lpX z{04QU(}8UKrd$7dY0B3XA*88*ePT)%-V1Rt_g$z47lAV9p$pNTLT8V@zBpQXu08MV zjE)Ni*$w)@&%yd4K2bQF0c1YiqcM_gt`&G5uJP^dDb5okLp?p$xnw7_^N{=%11UtA zt`A41GU|(GNdx7q%y(~CD?Nyt62^SD&YCrAg6CM~BO-$jfOeA`+J~u2eBxOfg^WyR zv_!gLk$odTk5Xlb#ty)GKJ`WV0nutSGW{bhJ%T*Bb9LoRFa*~Wg#Jz?M*{P{evGL6 zYZs5Jl)Kay!1w^3YndPPKc(B3q@(Y3e4xHi#eX3+3r{-FmdPu z-(cFzDu!PRL^O-1<}xC}o7dk~eM9LZ9mPic(+1}Oi#gG$>B!uAjQrJRxwK{qh&D6Fw^TGhR0twA0;u@niNL4ZgkNC1cvE_Zux0K(nVecn?+ z&4}KZr1ilPY>ljJbL#McENHHXokm5$7pFEI{b~N#W^TpD=}F=B zWW~ZEtJk7lKgJfSi52(tTnLkhX-cSKICv{FvkxFv#LcI7>pZJ%pFL!6>NzSktGH}8 zcW&k5rcEj7Kf-bqBdRK_x(_ch_w71jm($~1787dISf-B*Cg!-X{so!Cf3L><(w2pl zIWT5tEqd(tk7d98aiU`2c|-TbaQ{GEU^r_y#+K4*?< zR_DBe9PQ_Y=C$El06h!+dbcr(cg2oY=tRaMYQ@@o421yS}sqnU$XTqsktGw)Yf)se{=i zZJ~@97N+6tUV)95m#&{*q4h1_RPEmsZBL}Y&iGA#Z*6ZbcON}USTKbaTAXj!n8c=^ z##C2}xb;o=!{U67E>&;Rc}Q^0xPe0^+mWFiuo6x&zFENBHvB_Q&SLCaxML@Ljb5Rj zc6#9tgQvvd2`O^a_6YTu7ix`7AFv}_InL7x!pEKuNm(oq)9@U~7D=bV!yPDB-G{kRRs0jM*PiU@8qPM~Xr6;3`01wXiNz7@r9aZ*poq^0 z-@s{bBJF)0YoSM^pdk!&p49XAfNOgbQCi`+hWK5YQ7f>RppBTL*P?2;sDIau4Bj+Z^ zR1UWk!SWKsFYASc(@_bjV9M@4{M`JR)jMK5`5s+IsHxIk;HL=#V+gnkp%HfKZcKUe zt!HP^eQGgm{Z8&QhdN_xx5o??mrSAwH}a_5l3d95oo0ePwc^J>(zr@LB4X+x z1m@(`K2h2QW=P%N5n_mPJXd#!fJ*tFJG0>$ytO#ah=2~NfkutwF zZ^JbJj6o?pecy(*GZ#!q)aE|FmR^fXhCN0E$@y;t85$Z|ou>CWjW0pS?c*iCZ)nd| zIcSOiOwn~p1bGXn!Dcva5=tiHngpARh|NLc84QS{#T_b*>daV)_Q;cQUxYW<2xyZR zkyJ`!f4C5ptC2{uW^=BrS%zRuhKJum&_(`{?( zUASETp()-=^rFdleO|~z^@FDK!shvl+3cE(%thtr%U%jWmdW)lSDEI2`DGFehy)-F zId7&v!Z^F+?=Su+ue_IlTExz7zhUA9E4h**VYHJE#cltcA$GyyS~Ry3IHY=?rGe$a zv_J2S@loZ@S?yakQ=WbE+R1c;ZKYrQ3+_B3Ti2X+i}N1}LmQhDJnc%>i-_jTaM@pf zab%Rin5w%mFgCtX*s^NpUM-eD>sPZr1q>*9aXUY<&ZI%(vENtbJ0*Lm{OMC5FK8n6 z0=)vqv+Ezs5?yU-6^*HfK%W>FNWU~GKeQpnO0MaF>YT~Rcd9g?L;WL|8#!>^!;S3U5AK@84;Dz5WYUZ9wnxOq;*3Ax+9 z2a$$ZWZwh2#XN9ewF5sf7HB)zGtm-`mQYQ=KRo#9C}}c?ohH;NY11L%&Iw+F_>B+E z`*6q!<@8{Ri~ZUm*cL2?Ndurl(Blzq6p|j5UX{Fg{GV#C(nhy`cN0A{4BMrGk{qyX z&=c|Bl{nAe7o3Kh$WMTv=-FOB=4q1@u5INoOEj8&dqEd(#tpy^C888fNJ1u8!i@Q{ z3@QLqtKRPJH(kA~ftEHpjNKW0jSXWHbrgBx>OxIt=ZQVTzJZQ0(#`nb$#d4+Gon>T zxTHgVd=!Qv550|leqZMbet9dEe{Rvp(LpmixoEs4%H~jYw0XbN&zV&^5$YgQ(Uh|O zj`I`dJAgXH=56%Hw~1Q(kJ7F^D#kR7e~sK7m(a;h*(q6vR%7jIPmwdoHr2LbHYLi) zsGaFX6j2&wMJ_uhVbfMlSh_K$3+cwGjwT!1h?>UzAf?$5ma?_KXJ%~1zdQdpN8k5; z@9lfv_j{h-MR(Cx0~cKroC|}|yr~1@2$Mwpyc+U|wMU{*SfjXsD><%mg^6t27KLH; zFmX%((CK#d!4FNU8BsJksIb+%|C(=kxqe96?<@^=jj07npFp-kYFy@V-TQ4l z(zv|YSFBoght-WXs$k-%7dj5lY2NtLaqB(fJ zXNFTT$Serf=agTkGy_s;VpqT3TJH8n96J#b&a2&wZESy59K5{^SnNR8!6m@gy7}SU zddF(>ZII2^RL%JKy4vEe0S)?DR{~H=ggAF2C^Y}A#F17 z3Z5L$v0_zvii^|>Zo(iC^Da`b3w*f4$zed4*?>APv!|#Lq|kZUt>RM|-wDBf_SYB6 z-zRdY09V2GstzeFts%B6e~NA>7&GFjarPUogsmte{YKC3;LuCWsPtru|9R7hL+VU? z-S(R>$95SizNT|5)sRdTC&bnLWOn?2l8N*voSBx>c6kZ9f z#_)?`k{}DQU*;-tkE7RAOqvE;ETCprBY!^Wd;G83Bi;nvL?iv@%vBfUAZ5U%EwOMt z0=`f%2DWk0f&~kJZ3L1C^5zfnIj`4o`${mSUt!f@B#!IvUxK!BMtpS%ZpDG(CGaua z)7pJsU%Ps)lY79 z&np048())0KD`bKvjT7Lb@8NP?|icKRj2yK8E&up$T#Ql$i=!xLid3*Aj9Huyjkg- zT?+HnNzrj?A3u2&1Qe$0THAOx_?hbD-pP%UgI>*Ln$jnFPAi@ zR)|)MFWX~ub#>o_uqa&N`oV6v|0TLo36v%hJD)2A>AP@h;(qQ^?vW?dDpl8^?$B=-H6v*bIv)lXYXSl`LSc*D7Ltb_Q79DJ z=MzWaU$o_TzrqhDTVYjOJqt@GQ~mq4C{cZLee1in`uEQ2I-NDNwY9Y2VPms2y=!i3 zXJ*Q(XJKaF+(3;&9mg_KRJA?)chnK+$1!qDxzD19^R!2&w$*%U<5cpP9VGV_sPdECx42Rtw%FKk`Ygd5_^Cii%q}6?}CbZL}#3Rc@vCS)C__+H8e7 zqHh30M}^_7b}VTtJ7Y@+p0n1Y_b0uv7_XV=+@z6c6wh4!Cb22t&3#p;!8Gl-jG$#9 z|M6Ktn`7@&WsfL7KVsW-fh&$a=3PpV$+Rfm59{xt-e@C7+uuA;+mHq-0PfW z;oXOc^u0$fDUW?Us^35?x5}Jx?S>!E2%aX2D*Wb{wW0W{;|yZS$3BNKUw`=8BAg(H zcck_m8{Ij{O|L7e1edKQ%zu%$9_=u4PGm`b^DAzgSD(luQrxTi+wDt*X=wJDZ!dM- z&qEUrq(1sgNhH@t>Ft)_M1zLwtMh7(m9C%EqTX`9(9+J^A#nGqi}|Ts_b^p8oGYJ^ zfYac|BaImwL2cPd%~Kc5<$^tq+_mm&U}o%%l~O+Y^t|50^Vr)V(mm&!H-i0+&6`wI zGq&e0oQ+_A$me}EHvLNGuOvOU=*6nDyu$ns(Wg-f>UTVD5Basyn%5-tT!>^0isetW z_A;vX)4zkGF3s9Scv4Q0MWDPsdAZbBGKf2XFx%z)Y}d@1MQVx1%%J_YRjW`PjYZw( z*I7L>uNv#@UIpuwQJn20D6x1ICjV=Evu<-@+xv`Uk+XS2rjvtgq@;8c(FvQE@BK<@ zY9?$SIhG{e3~DK?4Im#>^4=v*Q}gzg=H^kO6x!}kY+mh&|B@manx3^w+TN$+-A=6D z|C*@aY@=1654m%P#bv@B-8(rh{;$|xyz?Nem8btK@w0i$t2#$AI+#@F!F8$Y8go{w z!>+sT+?3^?i%ycd*~*;@I9mU@&O4RZf|ca5`YgK9zyNVxlhx^;lHp!O@2Y2JLsyL^ zYlTzt=cl*B#SBiWwwvU_3Z3uhbc&zn>pjE=`mIVO}Z z89um^x*ln+*Y3WjbBm`7 z9ABD<3E90$;^b@ja4Na!v{1c>V%0YO`YA_I=G6>;s^h^c>*na>9krp~#!@?p)6=!K zJ3Q0~E&Xf?T#tp72d1%#5L*nc8)3~lo?aHwJXzQ@LRE`E}Na!H0yEFN|Y* zN6dxNdT~5_?n|;_=3ThO5SMxWSW$d7*9LLC#f|C!;hI1c$`d6fbVJcmYr4-VT=DB0 z0Ud%6;p4*WeWpxD@y?*#@VQW+^Tek)Sh7hdHWn|MG?J0AbNCC&Q1JiNM_n$)CME_g zQEn6O!lLZEjrd$vC@$%I{B97E6O$v&D1#3)T3= z238el#3d`;CT;D=hlb7v!%TAM$ulcn8}^I4Tg&0Lv2>@v^6t_-q)7xxn+Wm~HQ70^ zvywklI*PV-(8sf#~mil);1q%q*Tg6(I!Pas_jy4}gt73Rnc z&u?TczQ{v~{F{`QAr0~qwJf~1a-KY=&5k3#$CRykbs&G~cyQQaELeu0l-JRcBPTq1 zbFN(AfA;Y63Ay>xCq%zm<3yiDL`3}5K)%j_{rko82i%KQ(zV)t>P9lBmt5xAvlOW( zhuxd>7FfY&()5v*H|^}~Y>AMsM_~PH=8yf?ng6|h?Eh{3R-U}M?$GbH+D9MdsLRo^ zR*1=3Fqqq^E*x}Oc5d%4f+B|v$p7CpU~jAK?(R-} zdz*K$o<<=-@}#@Sb!0X@(UT9yDHxn-){|#=f58SC502hPK1Rvnii?ZKN+EycAobX2 zkB)s+(=tYARx*>fxuHPxNkjQ#t=cBi4{rP{5}Qq2o=>%j4``sL_v{til!0D0knck9K|0O%mknus&b} zzrJX{Ut&u+YO~B2GIJl(4l}b7laFP8`h{=qtyGNBscbFoSf3gKGuP#>DxK-t+H!Jj zi89gIE&SJc7^IHi+u21iEzd5Nw<~{8`~3PETkboYu)zB9Nf(Bm&qR|%x{ryS)%F+N z_tBo^3bqs6FSQoJFIUa6`qA_X3~TrS29;y|Id6Xw_Vc95nBIKgbP}VSjT>LG6x&VN z*HJnM(S7PGHfm1*V_KeQr~ghx!s|Fc7N!leVj6wLK>F8@&$}!61MP?Mao2dL7sC<~Ql(I~!owPiBUcs9dHqgQa;G3+f&m z!${L8yj-W4oZ7H`@4QiY*LCc`VC`O#ybJVA1#2~a#QA1DoQ?z)3$HE8Nd0$*a6L!@ zajBZ`7`zFMr044nA35T_wNRJeYu(6RM}cn}`tl`uBTi3w>u2|fu?e?`F|8Nl{2M>x zj-g|n;J@UBj;&VeY^SfUKOcCU%_}-p`GdIh3ntzLqx}ucfVXe%QVQYAOsY8x4&|GD zFK8uFQc@ni9C;?N6YIw+PWcTy2O1_($!fn#d!mfR_pGtf)&AEWa|u$fzrDZHUgGo% zeEZ`WW|cIo?%j>f@`S*X#Xnl2%Ll-vc4g9V?JPX==*`rt^S>gn)OxGpjz!5r?S^J$ zExTSV1m`fT4E4-MQn6x~$m(yLxm<=MWL;{oLRYEsE z34L+TbSQFOp1g(2$FVhBn?dW3dtZHPWu`|x$MDR>>s&Zgl|$atR9KkwyOS#AJa;P} z)r-rIIeNld;Jmcj-Co65&2?R0e09a($&B{T`vRXAm$LdK7JCQR!@|NaaOoXI(XmKQ zmk@`Bh7L7edjE>`zs(l=S9oSiJ(DeQK1_S&jr>7y}o@=hY`@LLqsgY zq2zf~H?#%@%H+E4EwJ@g*pFyPF-?<5oqq@#9>nlaXPHs|t9zb{@ZUzf^aj&n8V zbi(|UZ9O2tv6Cz)x-w0@U8ZO zX?{hX2rp&)mFcH5OmT2kzsp?dsE;2J%PF-=Hsz+oYfT@wJ3Y2lHdkhLS3PzeV1K-O z_pW?rIlXFODBR2byG8jnIu6BJExE<<8JPOaD~WWIT8=}HM|t1Qv*uY0L{XH(#1EDc8Fwpggt2PAWKp-@ zm`EIO7;ux}T@ZTBX~w@bUseA#*qZmF&WqbBUx-V;kFs1asCQ!0mLLO}B`;H|P!yKi_)q)Ol=)a;eYOV3e|`<*eI_Q=hvO^FSipu03yXCW`&h08jo^O%Qe2LSL@#)X1z*hV`@>l{0;WQK&-Hz zL6j4nsTg(eq^i7wqWPuX!h;)L z=xEts;%IfqP9TmXdC#=o`Ks!bS2WwZwY8CFHoPC(;zTjRFw^jCS+vVsA$~>wSYW?h z{@q`Ot*%sv>gL*I(Hyz7J*4Iv@(;_z>a}U%gN^k;EthBNefIt4NYYO^d@Egg5DARG zXX$sQX?no?3PY%SO{UKKtLX~hsaLM+MhWa*DM)kEI*E@m1)b2`P)r`N*>g+`E7DbL=8~Es zc{LtIQ^l9Of*QlvaJ8e{Hzv%wbJpjpaI3(NU` zl9-wKM>N0rhF`ds6)+edhBi}<%)7$mCdS6IfWE=WTJ#pMcXt+Wyt)2CzmBkYXAGKTz`S2 zaKjOrXS{fk9zACHeAw7kNpf+c`vz;ZkKzp@qWY^0M?x4CB5oX`>Dd*;2!{*m?cS-Kttz{i!PHF{^X8kZoyLq8sP4KVZ}}Uz|w5O=m1W?(q|XD$OOBCeXaZ}tpGM{+IvIM05+Dg4EY7lQ^lac?6E8Xps6n zOOQI&7^V#Y27Fc97Ui&hKSzE=!uoQlFLnJ;gB$ec2KmmUUW+N=kO0uXY|6 zh?~Pk`s4ode!eXo$qHc?@8AV=XLgzajEpcza{{*#H@CN7-~Vda1gS)cPQ^pR`ark% zloaFXuFSS;C!ssfzh3?P&lBpohOLWXwl47n7+eqPN$2L3#p!$_V(RrHu8`18(=4xxcdNUO_#O-KO1 za{-8xZHkoGAAJd#@3%xzx>X^q>M#5@Bv}qRnJ9{Yjy4_1NG|q`C&)}a7yC0)h9RM$ z!oO_S@07U$0{9&FL0f1Vi>+);x7dEBas&ZV76WAf7N#KDyD4f`Yi(ePX&T(^g9y`h z4GTK+l>B2A!g^og#oBl^>7D`TCpCy{OVcdoH0zGE{UQcAN!Kr1ot-O+Dk>N>yIbP| zqSwowb~KK8Oz`E?cjE26dA-740~^IRa~%y5#o=3O1@z|7o*EpyxSV=Hq6M&_=IlG4}tp(?Lh05JiOnQ`Chasf<9;WQd_Lt^bR0S*Hjn+4$5 z?E9Z#3)}p7Wsrbd0RH1TEH?m71f~F@J5_8Bk(^bt=r}zN1=sw$Fs)N+kTX|`9fj{T z6s#$dT8u&>$3Ob~Run+nLC3L%P;W~6E{OZ`7WWoMKUF-cu#=FGuqa#qQ9awKS!8Rp z;YqO{6BY^nvXacUTF8y=e~u{KUg-&1f&GCoBe40sCC|K%Y>Rg+M4@u!SBt?!aIGjYhdHezJUW=?s^CLx^4=_kGxe(JkArMmG6jQv2s``0!TG0jw>{-Z|FW9bpH=uUtwW`R1 zpp3_OMMX)8(sA&CL!Vt|cdkjWmqPk$D?yz77~=FNyb%O+8IHgRJKx{Q2De53g-YyH zp6U0?namiZSmvvRUby^g&u`(J?ACUkc(>$w&r-S7vFomPpH&@YP?{`%TWyCC_&FqD zn_q)zgG9}xM-u=cA&#`B<;JjresNIiF+4Voh%W8)(caGW26_doyT zzm7_8uo_=RvUcso<%DCYsi|JIgX8A6@O=P(&X(`)d`efrdVGa5&7fo@16aH#-zRa9J`E5Y~I&M|BQyGEdrr5Rwc=wHt`?-vJs1XBv+nM{&k zEsW2}SpvK%bx*Ay!bR;f>H&BR80}}S|G2{N+JI!^Z+YWfF=+11F;Xxyn}N)pAF@Q# ziI1?8=3Yp!Ca0vF3LJ1834k>J(agZcxBRm084DE^6-f5=k+ks65Eejx$+|#F$lJ&R zB^$1EJMOHHTXba%&EyXtfIZ;`iuCyHk33D9s| zr~v?OZh6Uiw0dQF{kSv1gLg?tT8ptoL!!|MX2*7qv>bwyrc_ zP};Tv?>7r91iS(^Ij4sGcYVt1_E;h-KifuLs+*teNli&9+w7Y|zmj7yfRGrcTEv?( z?tOXzVZ{|d=1?PI4l&#C_CP_~Ktc(rTp>_D8B>V@+eQ#po&iU95})ou=ZY8g)>&0S z*v;VV-&idqU+%AmU>mlTRckl(jS^N4I1w4vxvwhPe6ar?h8_=_9Vp+`d&Wm39a=IA zBt?joAm@LuW!|^|Y=yjfy|{e_@_AyHVeCHXD%i%r%U6OD=stAmCZQKf_?Z1IYmd^SBD0m1KyzKpF@QFd@&%0#fk+ zjS{@d%F0qoRTdtxo4(!&i7EiQxH){J)t&0!XwtVSkn+07GpbTZ?hPvg-R{=tzq?LFn8O_hPv4S*u{#OiRFa!SxB>?c`_c_e!`Y(Iz zY{9w;50D@&hyQx+S9mU+R0N0JWCt*cqIwP;iIbC)kck+9fwblCB^1DJ^92dN)-AVq1*H9|~fa(nQW=Tr$cWm}_yY>GW?H^FwtsH+Gf7i7?`ObmS zp*Hs^e;`hOzkm+g_l&!@CL3?(uz$gXcN5ej{OhwK$Ly3@(km<*mn!uvM zQnbBBj9O>l51^fQ@DUmY!XzavO}eA}F($Fd>&Y9IY${q#?`)9*lwj2#h&*`3r!?H8 z_jUYUS%56$w>LoTYR3lsAiMQtFO|j?{XLSHHOUB$i8_HDE5{N@IB5d8h9N|s2)t(2 zZ41F8+OqN_u2e zQy4fDjvaEMd=PN%ZbHZi+u7b+ooF}s+4>fN%i9-_$(TI<2aCG`Kv;ao=4J%lRxAJu zlU^|Fo3Ty3Geiy8hJdOi;_6hSP4h_{$Z3H;&IF&H!RPcm#PDcgt!$v}2;^MwUmm0) zgKJPg;DQHNt?;{T@WRfnsPNbs_Xq^ky1bXCPtyH)ZU}4fbQbWqn{m(hC%bc}fD_~I z3zaqPFIo4Ao+_4T-T6cx4awj=a96;%jg5^3IpImsb&@o^G}p+!F9k$>Wc@^NvktGfQ$>Po?v6D!z*u&9!DT6@Mjy>O!{AG z{s~F^*7~nt7Ub&|1w&6{_}4FR?Esad?Y=%Xr~}gryunEfUdWCB_TfIm=e7wz3LpR$ zJ)mB(I@{;+(entB$e$@bp7@ONIwvoVv{7eT-|V(sJ|G3)HLrFTNlXg2v)suVjfY~% zY{7{0($7pJ3tVNLn5gw{tCev8G7$VG7uN}@X<=f`5DHvm7T)~XTfScb90FJ(a!=GT zgzPFz!}?QL`PLXPR_E64C@T|jqydZtt{gz?Niqy9l6fF$-O0jCB&VYkB6*z_q3+-? zw<|fBQ7J-hQ}6Z`SXrcA!{rFHWgrk~hO6V}Cu&XGYN1m$Fh1Oqi584~OnYN#qP^H*4)zjS;_(28l5-u&?NGGEqd>Csf%Rqw zyka(lf~=#kk#zh9)~>)7(f_*S8=%_Oe2an6M%#&jK7{!AsCD|cCuiUXx?`q2v8A(Y zC3kl{de>*AMrg%K4nmm(R`N_53%8Auk`mMeUWDP`$ivXcsdzW1 zv%%XsFMgu03j2Qh8e-McZu^13^SmIFe6GB z6qbTgQd04J84zp0cVYBIjymv&Ec&%T5gQ0W>FYB&eyl|dS$VfDTC=Rj8o*(4ai8^u zmq<-3V&ZzrPY3z`MFK<3QB9EzM! z^ugn%)yrYGtR*!#9@d{{rmm|Jxd46`x5r~Cj#?9vCLnqsOyV?4#WxYXzhZil7r0p{ z$|=IGbipr{4~NcAk=r~+*w{(I2k1I22OG~f{LK|uDFdGZTRmrZ0wi)!mqb$Wae-Z@ z%Y{(JAx4rMC`#7Tcdr5L*08KVtD|x;jplcI7>!RSd4de0DrE`SuAZeUM1@G_$z=6B zQ?ce1C!Q=>mQ8~_~pDT zZXOkUeH=2orP;pgKesn!!Gem!FW^BzDY`?|pZuCMLT>-_IhO&r%t0<*;DqDQT#;8O z5ylF}bn26kxa`MF!MUVD9`7$~6W(ceuO6%uSGJCa9jD)O9w z0osRcC=@~9QEb5@hvp|^W@ZMJjVd!6BO@cg5MD2Tp55>5l;{KW7|#QrENlei%22Zt zxPRnr_JA3ftszX>v zCkMzw_I-lnE4=B*M5)(AGy4OHafjt55Zd3gNe&P!UqRMX?SpTT5CbU>{&b{QbQ{hJ za4My+VG)TDwf^2W4aGwoFU!Y#y6uFv7Q6opIA6)bHFO?Q0j5dj+H# zP{cq#Qf)s4=LV*w7hhc!rJ!|W@c;*&vVgGyks)J?f2;;rgGr!pLB=BRbN?X)3=6Uh z4a|b?_jiP>4g>(rtb1a%n@4L*!3R~WYy$o9%)J*$6;LsmvcqpPLR36^4+eCO4=S^m zL>Hf$Ys9R9w+sN3u;H@@a6LtC_;_!`lpGyB2wbP`ACSMcjYk7&z~B%HJKAM$91Pat zydBhIglAtLjN=kOtgm0cwr!u{AVSn+5Y^)TTU(L?CfGLnXh0(nEt){9Z!hgnN1Occ z+cL&<6zRUIEm?qf#+kvn(p*gpG`0fj#Zn?KsV-DDVST9+jBHv%fg za(>v4EzhKrwR^kJH9=e@KigV@wIIWK!2QS`JuD-z_h^C-Zt}xoM3|Ftcx|UgS=BGh zIY$q;&)N|^|LZ}u@nXTTE+Kt{_7+6VFN@}|ciO%nijEKJfJZ!l18l>$7t_3Tw=+1! z^UC-Yr6{qAgGV;nR6Hhhp$U>v#98Uzdmsm$9mB%tuc@V8c?sS^j|b(eU!BC(qr)j) z?J_QZC{u|oIU0XJ6+f2qcAJZOdc5sLFz~v|zRYPq0X!`|(vJRNuO z6_aLdHlTMrqxGAM8kTqUOM(>t<0HRF@M4R;2@Bcv`4qGsEk7{nLJPBAbg=X!c3rFJ z{Z8X=5Uh^m<3K>qk5KP&y?Uh%&ao2|2*8&wO@Fs(2w|M)8M%F$#cmPG*()(#IHzz9 zXyqL9K4(B0AUl(9MKo6Kg?c(5eW_rq7=@7J2)+)9ZXpu_!B$Q~xz>^4MdAKejsv^$ z1edxWoZSJrZ7Qj4S?f)NaJx^i$%*)*2j4GwUvgR;)iJA3gp{&Ei~Y-Q_PcTXmm6K( z(MqrDvjuU0&=3hsYZerjf@nn1R|$J-YKXNaM-n~BM+!|=Dh_r z+Mc~&yG5G52#hebb^W;ta!QJHjl#u@Djdji`067<6#PVNn=1d`1TAut0pt~GU zS=EZi7RU(tSIdZV%7O0o!oUrbdQD4@yAmm|{z*ofXCi*kac672BG49)+<3HydxnxU zB@q?IZu9#07WX`WY=h&BiIFcZ4;Wl#Wm~E4NWB+%ZvL|-EG+)TH6%SFw_)Q_x-UjDsO-9XBl(@c3hw`awKM+mcHwp8N{e3j`5ojroVPEDfsTcKGgV*{UX}W5G_@=clwW?6wdSwuGvW{NAxB=)? z_vy?!bo@g;f7EUBJNjtL-)l6+*jBq#B$y{IUaNa!a`e0J(c~{|#G59&mV6MmaoPBE zK6-)#hG)7#wa%y4qE<=2q^<*B;aPiUq?*S`TwOmp()959tP-&O^qKD5)DIst3$69R z9(e2~2{^(3^Bek0lHUcbJh%fNecE{6!MjMYSimx$+)K3sudu^XzQCC10BREtLcxV;S4eNUn!#JO8h zwh9u2lBu-fhu8*-?=u|!R3ftY;+D`JIZ&FeRUH9Zz@jZyc#+R}*oPi)`O5nDXff#* zW)%;zn|Y?73?MV@%mqkvdOXpcZdC~pla2&lXDxUGT3I3Yd}=BKisu3@7JF`vEZ9|? z_Y;$366}D9+}9n$H=Su|)ickoV{Y{dKd}AOShLeDpdpi`g|ESrdp1<7PV58YJ$0p8 z7fC*LJs*0#ZCdV&4#>{PW?{>m!X!gvR!E_3v_6Rbjjg<;^ZZlf`7Ps{lC`1tUUS-E zgMn38&0|%KAb9{qWWe8Z&cE4TPc)0vy6pWwg#m0CqJAN5Au~WF8ZQ&owOr#~L6kGY z3PKQHEI*1w?elJ=LyNjHk~sC8x8nI2S04s{NtD|42&l;0T56ZU%?pAJgu@wm*TVAK zU8s!#2EWh}P@m~pFRld{e#e0Babg~*pMm+BRrqLnkZq=FZyQ_AznOD20Oipq_O}!& zCwV~?a>JtFlgy~qX7ZE_^MDlP1R}M*t3(Sk(Mm#N}%>}lU#ix_ENL5~#b^DtsxH*bDU+TKN!xqPQ<|+(-OS5sDoY6IA1>eu7OJ|4-Es!1HttJy9e<_i0bzFO{Jp%S*}ke z#jzHSzWDOx3(z~)g7PY>XY->szWu|@V6t8;2l*dPR!53>UaX*eZ6mGzX727{UwJNj z!BU$j^_%)XCf20t-B9que>tpIIM`pMDX*kDjaPqdH62k<4t_(5A@)<>W@J1z6z-{7 zck>L`jdGC#LOS3L8T@%yOUV^FzI}5aAs^s^X?pmf8zH>GlbECx(oyh&^TlKPNCFKkCG{CnN$Tc|y{ZqE)cqc}cMSz4UHA$Zh+F+&;Wh=z zG5ZYq-Yoq_keK4#A&PX@eyh_x=Qigul!GX7?^#-U;qooXUXsZW+Zki?fEuu9!U{j+ z>Of&d0yv#jsJ(dBjkdx~)A|!F;KQLp0E_jUV&@wl-m16&qyzA2x3g%KYEzH1maPqg z66zWk3AgpV`3v|~|C(c73ErTW^D5hABv zq+wxZJugFe&W>O`&|zK!UFtPeF`(zzihjmsu)9@|&M{}2xU4Uyd+@pP=l46C+?z+_ zk`0WB$BD7gm*xg`GwY}%X0R^;4F~jI^fe2uhLBzDK(zSu3ti*d2c;5w+fK=wJ8E+UpV4A_zZw=bUAl+=4J8*D z7le$DCoc#6iDrZT!VjJbN`N>f2eYgpORyE8N*kLn&UZwl*rP*x)1te+=*2R4?cAXn*uyqb_%S!k0i14>OIzmtpoKF@cn#{Z8#p$e!2 zsY09CDGpYUWJ8vCf2kdzNl_?a3CN)mN{-$;DDj*^!iwS2ZYnN_iR6@&2>)B2^RRHC zmi!vX5BlB<(p-kdKgX%|di0IN!(A44LaS#uNMD*Jzor5lCWD50a&mI)P(%MmTTy6x zK$8>5gWd~x@mO^tLjpuy3e=N`z|vm7vRjZm(ZiB)9wblCL2k(oBw_&hH4z6E+0j`r z0Z?Cv3M}lkZ)JeYlNwSe zND&+>zBwF}3~e!v2^#`v1nG;Pd&7Q~+9o&rRu;NeJs64>01ZTS1G#NqiIa^j^D$vv z2pJ&0Mx^ku(fe4zFh0bw>lVC0)WL}OoB{UJ0nG@dZtJ}T!sgHct;iNjelH?XQdcM6 zRz1kN6RU4gGNhxlZy(YHP_?Z9`n}QqmC%jQkPxBQT~{g+Z9%vKGGK&+kNb-($dZfk zk<~gfd3JwEWUC*(vB}x1O`<$E;suXojPH*KCEV0Zx;G;_YzqG((BOlNuz#~#@XuY1 z|E9#?pQrVKMsYbvV`ZzSBll(xwQ~7i^cwO^msdxzKSmwogapA*#E&oYpN^y{MLvYi zxZzt5e}4>Qb0{d|wG(C|y;fqbt>f)A@_Z;8bA+K_e-#DUlRtczf%2mDugG5Rme<$P zfhxW+H?Vi)4Ct8?e=}p3k#qndj6T~G*x=-S2Eh;MRbxe4hP{4Kk7ERn$sR1X=XsDN z-ceI?hJcIldR%9m!u0j^8N3|PzdL?>P#0z3M|h!?CZq!54h+{IQpNkvlvNAh7b4Se z34VueBcO>mS@r~pkcCwOnaL1y@KOuNOCda*87m9R46qBWc$7#H52%?TgiktTqZuH* zT_0**2VhMJFOsvD(4jnU!n!hq9P}lZ^8Woar1u#%AW-nIq}Wp@0t?fM5LdJZ#`1mx z%5T$=-dLn~|B1MYQSGzuF{^0aqvCM(XKT_MsJI&kg{ZP{IN~$~g`XejIsgkRnz`3V zXS#?V>O)cTXBEQkBZm_wp8}`foNES{ZUtQd0Xn@eXRy5n5-7@~kcp;q4NvP~g07Z(~08qx|M?!&z&GRSuKuxs$S{C zTcHy%1drkbpPfA?ctid8(c=$}-!8atH=ZAtU&%@3iay1+rvB_;KK#qs<o!1lr3sp*{)K3(9P$!8xs)|^D2D<2G^xv?R3L+=Wms1IMfd|920glGvX~c&04IQK$DbD*&_n7H7|P^~aofriOA1 z*XIaAL@S;r1u%7LE>4aoeH>t&k4QmHk*Assl$9nW-8Ox%Rm#K9uT?Hl`C{Kpp*Jq3 zk99n?89)sFfqt6a} zHyjHC2a)0QE_Rz-%$B=scvuDI?(HurE)RHcLQ@Qq~y*>EFi|Qju(e+6)d{A43khun> zRA1_#?weX#T5uk;%%lzL&%_Kq|42QBBtqcRrXdLinORL(G;B{9_eK2WbJE7fDH~fg zi^%~|Wj6#`JpG?JN)B}6!url%Xf!;JX!JZyR9QU<`^XAF+yJr&dYLH2#$4=Bt2qDw zYWe1FKblAv#t<-WH7xY5=qme&TA6EM7a-OAs>({Bx+pWZfviZDFv)$`MVnVsxePK) zW%6mLLPzf`(dSx%h}l?5FDDZenIIBU%zsD2@l9ob8OjQ#{N%Ih^~kb6FU!fuj+|l)Ex!OH0@r zPU!bstMxX%?JOjrd9JF8;O`yR(4gVt+lC_RqVs}UxA-#P#$y;bl44@7pr{U98iUf6 zzSl2bu7E30R8-6{YQH~F<|b}u0v?huv}Of}IU*{mv~L7GaNJXonZvl__I6A)tYb}W zEz?)#dwOrJth7Ku-7a;_Ctsl_gk&leBX0I9wWn$k{e=q`)$=4>%=Go&tr2}o(@YIr z1A?3Y?R9-A5xfxm5wsk2u|)OK%-8X3To?x7syUogn^mDzR+w7zFpi_wr^vwXF5?(0und2(D47NEa}q0HQNM8g6y>%BH0d*7*H*eG5 zo0CU({}jV!y5Hg{NxcwWmP?Jy@b!dK(i#4Ads^j7Z`U-@O&YslkAmbQ5lA&3A0MD_ z)w1sgltrYM0u%}i2vFMfi^BVO{?3O>$aa^uwqAUjYV`@^BdgQj6><7auWC3k-}<9{ul+IJTMN~DF=`q6uh&ym;LXHJBfpsUrnYh)f1)xd^ZQV2avD3T z-l8mmeAS8-Cl4L{5jl;rz1CBxfA%e6HM8G0{$G6`7q^<(8wvFPk8xAgJl-F-`d{Og zJ?4uUR$=E=WB=x#b3@OV=KQ`Yjwps-FJK2=4 zSnlH&lxdyWwU4P3&hJO6kC7pZO8VEx3P@~zo~{<#t6HJv}dTp_5*c7XA(S%GZ@g517rgBD;R_#hZTP9{Hf&%j^+#L$qrpB@{| zRlA-v{*PViNj4OF(`sW!afavT8BsgnUb<{%2+r0q`*J6ON>Z?A?ocDxw*p$MiB@9XobU=)sWKc z0uhl50$?~A5Gy1XLJo(6n^T zC9B=73#0_Hs$()P>PnRD!G5{{93T@53uL5kSFTz1Lmmk0f!8f1GRhHjEu=( zAm9p-Qhkv`BWq_fBIJfX3JE+NNE1&9k$V{&`Uu_HkRE&)-k>*cSO_a6{?X%O<|5gL z?w$Y1-oBI{7c4y=Fa<`L2XP!Is3NA$wT-FtvGdf3>fX_iasK|14)xmPheT$IjR13c zfhmG>+!p;McbEyt!`QT`t{QiM1mA6OR7~&cT|fe2Vq)O*85tR&pc^|1)rDv-tJtot zToV=0TiVa{v*bm?=?V}sMwErQ97g7XaQ-p-jU^-YF$B~I?5@jb5<2c5i-URjXkROE z+ihj~PP^d>#v^@rte5Fj?v9Lra!ZPqzQbFK@+nE`-SN0C$S7b)W#8c zOv_2_;-QwsfnY?W0}O9~#PXH9$eE+58waC;O9lMYh7v#nz5Gr6s~P+JYYwAW^J{Q< z3sj;f5tizmj2Q=AaL1aQmR3~PfA9>3Y{v(Qbf=?v?8JRJeI6B8#YpC*spfryG+B7j5A`5rbYK)W(B zE4BmAjJo#e^Jf+YhSoCwFYkJV3cFI>PNsi}KHO+|PfuO!R)rHqus1**H$C?6V8{x9yM&0QF+jc4BDLM8_Dp>9JUXDeZ77fPH! zH-PW^(6uv}n$Ewyq_8c4>R_%l96?+o97Y)QeRgD)xlE(jFmC6=E?lhu=?{&7fWXzO zO3cRx&rjkPAytRQr)rDHNo~+ytrJpP-O81fBynE{>S?V`fXD{D-B=|h*bhV~XUKIQ zvfEFq=FY?RHwQT;pt+hnN8-#6?zI8wA{q^c9!S5sHy;e&xJd5Jt+}quIG80U$gqv3 z%%9&s+<>G60HL%%g#s!rL$@vnPZ2{9cJUk3Uo>fg9s(ko*nQNz} z!{=95MElafp(Z66eu?)XvIqO}4-0{#oz=CqWuQ9TxQQ}<%m4N|kbhtrHP4A{JmGY- zHXu*mX9aJ|F4548z{zaF^sCDF;5))XLN0VeI>E@ytbi#K&EFSu!z+|o#dii2F`}l? z+%_U^Z;fbJ^$%U!W!)!4l$4az)Y^d0f3)hN^@H*plFZ7=n}tjzPxlXC9H6nN4%nDb zxJT3k;D{i|KyhId+IMJ{#Rh;~#daenL!LiBtM-~*?+JsLgv1o2t#IxwEu1zOY6ELp$Nzz{lC`jBkb_9qudB174dN|N zIuDZ`!l&6Q`V9o^E@e=>j7J-;Jmes4d$s+ zIc&@Mef!SnqcBM~YPxbc9_J9_=ffm+K^}_~QDIXszBPu*gtc5faLA(e_V&`!(n?C9 zz1+M3ho*MA1Rx6-m2nW-Ox|mTGh=Vf(ZTk<6zJ6P|JZmim6ys&{gwLP}s zbhULq_PHNmY;fpPwb=ecuSAL8VTU??aAf&1E30(C#iabcYI_i*iUd+Hs-O*0>7MX= za1q;#y{qPVSn5$vZGg=84|aw|LP-fPr0SqUwywoKH;pKv3M}9Ds_^>$>A148^5@T= za3ugC0fCUPa58n}1@V55UAICe)avP5s#y1yv%ht`WqDG_aORd)+9{Mfq#1yuIa}lo}fs2YR$oaa6Ae*1fCV@px4lSoSdPgfkZ*VwDB~62ImCC~B{Qfb0AW97?ADsmHSK-$wQ0#{~9J8cnkNj%If?h!KW6>OS zfsLxSb4z;$*sbQS$a}Spn|}kW_>nbcAb8 za7+Ws#_1aBxPS6Po5NvYx&l_==H|A3gncO+&dj}dfmev@bYk!HD1w)?K!Eei*LSkF zFuUZK=WCxEEKl-C1{ZNqN#_zy;bb0U>#P}&d4R!xI>Y%<5^i&X+l&}j&*APLc7txZ zOhApb+SN{bsbQJ1e;lJcCsJzehgneI-`IzCX?{L^Q^(OU z<+T4`;-Mq`hz95USiCzY^C7Y71#y9Oax<`aH0RHcewq5bm_e(y{jvNFN?X32Ca5Wr zTVeJV)&3<`@J@Ef^)K}F2B07UlidQX-sq-PREGPub74L6%JlbNNQHN`e0LiTd5su2s@ypgmx-^tNTEt8_V}>Q<$;SRcBuDNyxUstr ztBs^juE%iF5W?2q-+yt@dR?;WbM;BWM5RcLb=PD3Pce%NN*ei2p7YmU%@Afu__HR) z#AdwiTWinUm*Sv8o^RNuJ6!Dpk{4qqE31W!yH%A{RdWR+fk3&D*jPg_u0-nZa0D=O z0fGf7JV%x+$w zum%ZpUzD*BdHSJ45!6IVmykV-6HxA82+qU?tPXhWLTX*plw452#mFeu6x*=~lB3liQK~az2rCVw zH;aw6+6YW0+)ia{ZJi~*P2C+xE#Qva5fLYvn2<1;n%tv{w>+KQ0%b~H4>-A&Xfh{2 z*wHZD1z808)jO(cYH9$VzA85LOSI%X26Djr@na}cGJN&!1GNWSO>moU?~0`1q{~;X z=;`W4xy*9gZMqC>Mp-Xshz5|+x^qF|JOyPl5s@bwtO){=deU+va#9pNjsp-EQXi9z zNJx4pclW~rgBsij!a^sf-r*I9sMGPVS1)Q7DV8mx%W%1@m^wQvC3!f7L!R``R(jF&UZUPaRIJ(MEL*7Fhn5)W650vkDENUzP|oI zh`h$1Ayy5Z`EF?l76Sy7Z4!5UC9d-+A9rirfsLt2+!uiZrt-vO_O?mIUH{?utjvkQgK@pFVvGlHu(YiL6%Z zy@|mwEYHTeW?gr)ez22|0&ndvi(rWjKoVr^YrQdUAR(7t#uX!PJm$kG9)JlPGThdt zmu(DGduWPSPi=myifWVV3pgpT4|ihdX|p>+>N6%qr_=7`Jr8>kG!|Z%Yc~ z@8I;i0IN7Ctt=OYh1s>`)z={BQHIQD5||~Z;hXg2eJGP%SkOv{Cj#sTJFn*91zui_ zZSFN5Cbt;b-Q)mWzV^4bFlttD+eu{L+>$MvoXg#td#X^iHMl_`AEdvKuoj^o{q5#Y zJlTkTH)KZ4ssU>GC$6(YRKrU?!3@zVF;A+|v}e^%kZu)u+Z>Jj<0VgA>ULC41i5_K z=Bx|MBGAil0`41dQB6&fxz@(UlW<;~Zju7TWpFmZz49TG9@YY6t-67Vva*(NoB}pt zDht2`znfP%jL16M+nJtjz!?~#84bvTa6Q&;LAn(mkudjeJn=DTf@$gKkn1!j!^#4Y zJlx@oKn^GkO5oV*#KeR#Wjl3(5$^&LjGh5o208{MMMbFg3A1#NXzMyFstv)B3?$dt z+S;0nIDtB}xtMQo%meOkIX__QMO?bI*a%XWcfH33(ZLtD(?`q2{D9ZjSQ(>wa{6Ps z3m40CqbG-0?}1=!0Crj~C|f}B3-^4)LPa06M^lh(=I)j_f&daus%B(l03ZL9RLRkC zEjQqkf1AkA{~_+XNmzj6_n(%FJH3Lb5BNNhmX;vS%nl zC0k}h8JXGZey*-qFzn8Rasg}Y82{SYT+SGpVRs6l&g}Af`?__-WQUY zDB(X4)}X*a$jhR3xN>>OlP3>RuZTK7n#1qG?tA;T2$Tt%zOJCB-$f+D$Isiiw8^$D zMbA_T3>X9!CPQ5)ewk$oozAGldUUdT^sJ)LW=BLIflwFn0%o1$! zme>1udv6?~NV>Aqs4scGj`Z(^Et^v~y|8nv{aUg2a%7&*ke`L|dLJ5KrySi#L$oIn z@I*5I{OR}BgOpewQ{p|o30*y8sbkY-G!4h0Ph`s?P$XU5 z+?Fm~nhI_hawf$yXSgd3(cbtRE|G21rf&_IYW1{p(}l_me)w4wl2)x; zxssXL|G|Sajr>)096E1Z=J`tNvfT*z#`Nt<+WGZIitcR<@(9c)$5^M121>PiWu%?+c{J~D%no}34j*F=dX?-Rd6v3r51&sK$ zC?F;xf>21zbEgj`9X@!_X;NDH*t%F1WsWQSEu`4p6Tv;@8NE+O_Q!4KA0eBnmD;ri zlregEjCF_Qj+%Z;VX^nPNYzwQ|zI{6jY5du!qa74qP?cT7hBF`+ZQEl|9kRY& zNKjCalk+K%82B9*<<^du{YN@l8)R8%p>YAloz(_R^zRmPn|_mF0( z*o-UejHD7aIR6FfCuLfmGj9tDZdrT%PAct#$2FNwG6|cYy{i$42%~_GNq?(sd=JrB zJ?% zL8z7jvq#zBE!@ozN<(FF&q=;_=atFCy9tiAza>5oF}w{0;JFbY#J$1O>BuLy>m0Zc zuEKN_Vwb}RL&}CdV??#riOR(&Bp!7RUG;JfIZdVf%Y&7O7ZcERWg zZa$^~<%#LgF+*pYDAn_|d~^0p%(+h`6z>F*MR=tn7FSb4BW%Fd!}dgm=d1qvx9E7( zT+3-BGWAB+W_Yq2`cB(%s&m=Rq-)C;AwEQ=cYD@;adyAbuZ{~xH2^I==XP)3-iuUg z6(cHeDUH_{shCB=Do!OEKe;E{BOe_~dLCUNX86f|v<1yIrN|4rBhW+<5>QDbB{39AZ*flLwOPmklfd=%YSdqbz*aLk}EFvegEz;q0rYB8sO^p=82*t@d{-;^V+qTr8z-j-4ce%sZ@)3U2RakrNXxzzM!*=9{z|E|a^E(AfW@tcDKD+l*I8OU z=Bj&jG$SK7gC|6?>FfO+PXc_0Gqbg7GU~JYEp%QP-&6kuO`XRjL@~mf{lH6kK%M90 zHx4lt{YDlfM~Z|OVbOHf?7bsmX$<9o*SSEzg1m&Gl6vCZmf>xcrD7L^Dl$jD+>hsx z&lS$7m$vRiW(HnsFL|SzoA=L}zieA+C#{jIv(E0aDk8Bdt1586mGQ5ir(5uE- z@4Y`DpV!X|n7$Mq?0^cP2z**U`ALY+#EZA<9@#}4j4Q4hDtmv4Q)uvXKFUQ0HFw*M zC!OR@4-X(te26w&-u@VYjGI>DxxvbRMA{SF z(gzsDy~%1rxuRbpg%}Dw#O0o&s);RXmtL}TDM&zTSc1=7x}KN>f$%f71M;10Ap#zh zCpzxh`}ddfQV9(#Lf$bp$}pC3huI~-K&r@MBFNeXzM&K;yoR&<1;!AzfzA%givQz za)0u^>aWagBRZu;y2y#Fah%Q3m4FN>XgtuF8Ii+SCV5Qkb>4aFs1J(w^SoGSX?{fa zX#E(7xk7r8(LRptpcTu9CYoC(*uXl=5#Ey98HN)XCe1I+C8o;JA4=gUkjJX()kUj2LQg` zA}WzSl`^=w)k5KEo|MQ4NL7GH3u)+Otp zSRk`!;C7WMV4XdX(E<|yDJ&!JS^Gg^^+QlLhF4eKw!e(<80DjA#e4tuZCb)8sr^nu zTSq)0=1E+J2UmvILhVH1)T!`ci%T&){CeM!gf!g&#*IY~LIvh!<|#E(Zk43g=Y>@+5rbYj7=$ffzm7$O57X_CdZ#(pswC{>vK&1b(>Pfbp;u$B z1`$89r*>|&^9?3z#)Ma?O>N&3B~|r)1I@p92+?z2`Vn-|u3cN;z0vV8h{-Z{ceG&J zj3s-w%)C80IQ`}80c#00z=oS1Jb3UYq?MhGi{A(QN~(8q==|%k@h021oa+=}n^Ext8(+ zN(*35Rkp%%eI;fr;KIVsAm>nkkn1EgFyeH|+aVu;tn)j<86DY=)P-;7w1@u6;1;Sq z908()8oW*bftKOg$LPJSMXcYQ{FQmT@j()BoFQaQ?uVJwY)edrwJNWGsn0m?>gY+2 z7Q(Y5Ps0QO)?UwZThQUHz=g1^_rBa+DvhY;z494$YeJmWEf4c2#GDLRX%qyn$L@1_ z!w;8YKFVMW(mOKT@QoV~S#I2zBU5l(jhHP091p*&A}*xK`VEJ`gT;sXgXOj_6N+EZ z8<`A5qbaJiq@+wnLBYmr0&#F6s`Tv%xs^AEM?&~>3;oWSIu&y#xOye_n2KC^F`c<9 zj)Jf7{rmUOBX@zs-3kiYVCn0)B9=t9dv^9sK>=X(+hfk5zkrRYSnX^~^@0VLH16)& zJE1|Z_cR`5lz|iPYO_r#(c(`@4J2~GWT=4vVZy~TyTmmPN#(rb*ZbChSqJw)x%0E~ zY?%gZ?%&xliH;xQQkt0!zWuP8hK7ySQq@erC|B{Rk`WQ=&nQS4mL1W2PQ`0l7wkdc z_tf@`*H^O#6s`yLQx|Sf%2eqqq&IA!hTg(3;NC%%q(9#0muUlLdrJw$+fCJUw=RvF zvRoCHW6+OB5WHwQNVRvnX}ombgP(==o!d}>wBuf$r-JOJkSCM#SDLyk8#x(bcGQLR zVV$r~VC5DHw9{8I#g+mj#_#aGaf4VsOErpD7u34c7tIlzOrZ{E)fTQrOmc45Z>Yy` zyPQ7jb#E%9T=++lXoojzq)U1a&w>PP%a3nt>d)6)6Sm7Ji`LIJ*_m~IPY3g{r5e8j zg-_=m*Y02OivZ&(4E8Q?p_b6n9s++*1=ig)$8a9H@!PzMwj96Qbd`sD%$Qny5n!_i zYv26dg_o}nB|EJ@Cj8@79tff#3#@aa#GM0kK@{~LHU`xk{G7~vBWQ?rv;ZeWRl zIS|WexlBE@B2?zVfswSi0<7Evc%LCI$7ObPIlJckb`2CN?zc}bYI^*G`~8UO$lV=J z9zQ-{cVu~t60j<)aPteQ#Fgx_52QxE&Al=c_-u51`MZih!7Wk1HQ%V@2TAidCBfqc zChzLHWJFz{)&N=!z~yes?5aKdK+0%;%jIQCEB(xkZqoW$B}=ou!a65J7XOb0YL`H9 zTtZ?Y%a`Wu(h>RWjQSz|Cq|CU%h2{SPv-jzOg(#b+ux#y4731oX$xhgVIWQtYJ>oW z3q$F<*pEf-Om13JsL8~;!q3D=ofZ#dWOW4YofTR4zurb^-@dHod~ZTG3x%t~qGz(* zz`D6kJwNau3(NpJ^eFdzbW*(T&2jA6&f+E$H*fBtd5$7Rb7=jHip5{CsdnwzD_~a_ zy!Ps}SxBPWn)sXJ_tMk|wH=r^O-;?B{-Ch1EvWXus~w&EcInVcq5_r7;<$~Px|3n0 zpZ?TilT#@c)=!$UEQ@9j`Cm`{K{E9IQBHpKnn zeOG_*GyhV6Kle6J{P)e6Vr9yl)0!8%v~u-7kQ873UwfU+2w6sXp6rT?qxF9^i!A!r z*E+1PufK2KmBA-=R9pqG&vZ&#(EJPAU^5b4V2I@<9@XoVZ(lclHk2CiucU?x%;>p7 zSQpkNVM@i7@VTi8!o7zHQpmrM5AG5@YLb$X0d-rxd}wMwb=>9FPddrJd@=x07W?MS zAX*)L8whY|(`08uW~Rzw=GXtSBr;Z3DR=h3#P5z!My-J0J^VXs!xkn7wU{!MU_>C! zs9Ywm5^?>PH+({%G0B8jGagec*Rj?FFbNiR?h{P(m?J_~V zrDXcs3)PJ9FH02 zy`24wH$RzbWz>40dg-Xm-Rl8@!mmrZh{pA+u?Mqw%J#jp`ODY9{=fpD>*DBy9Ah=g zf=3S>T49|Txr9O>Ynm>-e$$p_hB|wb^Xq3(ct6o2@dd&yTk6sNfzaK&bm<$E_1F{_ z*xS$O1LzTTQVF)cp&vKnEjS_;^O!Ps(0E9_n_-ss(v|zzziPxONZMpjXglMlQ1?Wu!GN^KRYjH zOt2o(PnG}**LoZ&OS9bl>$wCYK#ouO9l{;ZGT)>wpZnB6#2$&sbrw4~Vgws$_}>Zc zJtZCze|$fj8bS98tP7Q4dB)Yf(4OJkj9)4sbv8K{gj#lXc3K*ma5YHrS^|-)R0#Jd zD3D6t!aKlKP50<$N0sVCBcl8t{Z#u?WV2_T!S1&5NI$x2M4i6o6no#zJOFFhVsYy+1O~-uHB}1-V&vvpiSJ|?}t}9=r|E$t(u&G^Nk>>A@z9& zY}&ke^QKLYLqc5BF;S(EP}Ktx#;H&V;3Q5v&ImQthr*_Er|&CtqKc+y4%T@b#DHPv z+vjL#Xj~se<&?*>2V;mls@6(Iei0~N0{C@B=XK-G8?R&@0S#`t@hW=~nTGPwiE9|3 zbOrPM^78WB{ABL&ALcqq-*JC3DK4Er5qh#HiJ~SfSiBR|r->0Vl|l3+9#*rL-5+b} z7Ko`88!Ra)%GS*|Co=hO-Fpf<(3JDFNQY{;C-twHpZ9l(I zl?ld*(NX0E&!i{%0Hsc%L{`>`kpQTXN7Ur{I>E$=O|;3J`;6vfJ_$zTISc|ikJFD| zy1o%wNAdUSY#>+x_2w{@J@$wf+z73g-KYRTu4`otX3;31loY?@oS)o;1hv?BZ1q_* zcEp>r2MV3qjkagOe=WPnl}Hj{H#RL|%vv2c}p zSVFYhJ#bI;4bb`=faw7QTGCl9z{(m3;*^vrXAI_dv&?PJ?kT^kdcM;geF5P-weh-s+pEzn^cpB|$m7Q#?My(7`{YujWoIl~Xr9Fy zl!m+mbMlJN?mGrodXa$dPvjGxJRFd|!Q}$DPQyIcd<`1YScpDa;CX zL8sz4dBo{l4c*={#VuR6YS$+90?Ce!I#*Ay!Y3pdecDYv<9?yy#)qaOz^bmdu3o(g z<|VP|Y-|UkK6fmN?-ceFWT2L~1kQj68hn9)mE1#Nag`HU`;8D!3qwrRvu}$teyHwI zP^>~r(7=`SWC`n=`t$eCJi*j*a}NnYhcXfr-90wLv7;@xkD#(2S5~eD85&bfTyMeH zMF1L8M=t|CJ$<|87Jo649dM#^IdK=Bj8e;i^zQ0`di421w4JT=h^!%`!ok`v?*w@C z`qitaO3(IBlvJ*4_jWob7n*T%r{I8r3`aTXhB5~+HKPA$TMePcq!=uANKb?={`mM2 zl;_D{Ebkpxy4{`>I@)Dt!C7hX+Vg@;57*D#xM5hcd=WY)v^6xE5u}fyZwhCCpnSC8 zn{k1pjvV>H=1m{rWXDFo$KtFQ)nFrk%@s&OZFJ_mJWc8q4N~$Hm2A zwSTn-U^nFpfo3U2DYFR@V&xa#(oIPBx09k}52mbKbZUlI6_H-NzOI#6N<<_+(&?|g zlXE%DIIU+}fE%dbqdXU(PxMW|94ki5b3>6}6nM|8LEKC@vwoi9B(}=14Y&%MXI_O6 z3+_6llPAk?zu@mM!z6!9R+EbrVeC`0=pyGakkqiifpYGQSr% z^X&Y`)i#8TC;gqH5l~LXx>G4-K0^i8M@ zzW&WM+1E#-SDR>{h6YKnXK;>Ao%(3b({o=}1TGvL526i-Xq~ABhz3#SQ_;pC(1^#$-hm7U7><#hqvbJnJ`Wycn9#->g^fEm=0tnu4o=l1o7@H4`=eUHVK*SzkZ>@*xmlw#9`4D_ z#bt3bwYoRHcSaMW{f7-}eK8F6ZX3-xDlXdOQkVg8*1vfPDwnHw5y=$vzSO-d!Fc}s z`EpexTG0^^a)m#9h|dqHrEfKfGUQb))dipdVguxtilK7@=wL(*)NO;ZaC{@wl!aj< z$Bzcx6WK&j86>v*cSq&xJL0LwdO|s8-Ad;V@$PF59+r`bX$uv6A0S2si-)gk+m4g zX|DSAFCU^;k8fQ%)5>SDnsA+Pl~0i^;K{%=VYHsX7GE*)n*5FGOo{HYmW9mvzn zFcJJ8f1B8v%(mQ@0MCv-6qe^UWlolm?k8C+p7=P8bA$wl`}z8!IDO1tBpD@U#HkHL zB{%~xJoiCLT6~38Ri`zJNtbI zc=)@e_MBYYGa@(|B>MG15*36;M5V1S=%?#y{8c zBK_Z3)I8Ib?1on&+ypxZrh#B9)6NJH7g1*`_CQDK4DAQ-2`sWF%+5+wYtcOmD zbf+?*NbBKY+a4a;9AW?U1G!OthD-Rg+2&Se&4NGoQUYBq#l^)&MndNOJ9(9f6a`sf3Y6Phq{96E@=~c&nYnbbk?)89Sh7^;JQt;#?0W>))2YA^Na{G39O0jVrv6ev#)Zz9o7-NbHX(*iWW zQ5-(7@8pbN#$h!GSv)CgNoI=!UH*Iwibw8lZmTc1V)!?<=-{8Lq4W6ZQ%)fvb7T+@ zo-J86j5#xGDZays^>dht-3Z-7cwfT*7%N_0W}8Iyf&9=+HTT$-^E2KxvAsCh`Hnd1 z!gYOAW#_YaX2*kb;KVTXt^@*bDJaS;n4^?wm##A3At)$_n6Tq7vI`?yF!8~!w)4_K zmS`C1(DIh58Qz;BsT>W&Bb!K-e;OoWe&rKnLSMgrEzDN4UHzmFu%=n1&K~J=e6X5-Jir!^&*l_L}d97W2pYTW+UdP2KM*&qxQV( z=)9e0F{%pTy05jmVQbE!1;^Z(v}Po=Dw?274`BWwh})bOYfxzITer@x_wb}ZJQ{#7 zpy+6SB0);FeRk|m(>wy!gBnl^NRqr6Iz1KLkXM&d39d+u3M3-m$ein&C z&y2q9^_o7pFHK)Oz9&+*uw81ZxN>b+vFqNkC4U&-?@t~*T92AiM(UC>7r!%B;m@DT zb=?*?z#_5e`Gs$Lv#0OUr%{4VhN!67Meukc{1aFPnZr#>lO2RN|6zFE`C%8)ZYi4) zkEcSm5-L=M51(Jd3hzsBXF4=ag(2RWw$OvF=9kH}qt|In}Z56@y=Uq9{^5qLu zt<*Atmy`vf0sm5Ml5E$#Il>t|>P;RbMG$?Ug)=>cGIh^4-U>@Wi?#< zW&bLD?ltmvAFjz%>(XJ5XEnl^Z zI>Zwk*#T(TiD9huv0W{HVl~1fa5I=oWz`!MT%)r@tPsN&ue43Zj;wL=qRZ9 zTRThLTXOJ?`_!7)TgQalKJ z8kXZ+`SvyrelhJa3X=X73KM;ilWl?RPM(Bq$H1eVhG8Lu)}0T@;kvZ+dx;B;YNI87 z@6ipTA^*92b5%xrz89?or5OlSlA%KY?xg%~RMgZkJ{%vGdT3Uy?NHu+c$9;XsYDi_ z&u}Z6#;3u@(siM}SeTmgh<-=^IRXeN8XtkeXS3?6|#Tjj;R64 zFR!~+e^7s6rPlUd&TikBU1^OsSu*`Pp-!#S?zo#jJj*|4k?!C+!PJSfIjI`@z9R(= z2QJE9=|5-?>&5<~hm@%>_O$%Kd3CHe5PnNc-EwH@Jq|E;C>c#T0-nRf9F~CLSL{T4 zGDv!fnh#AY6f^pKED8w;WS}4b_1y(`LWB3SZK-%xyxEblsytCMw7?Gx4H45JQTp~d z9KMkHV%H6L91L_)CFY@)IBilR`k-3fFtpSrdojC;Ws8qb3mMbX>0zBsE-rKqxx9P4 zCkLWZHy=AH#kEk14QGLraq!@R$W_IPX&tKFqxmv+t86`~!V?pP&YT(e)ZrdbDVCG< zwV!9zgO?b(ASRlFW_Y65xos$l;Y*}dT)bVpaNnyJ?UjuYq0!jN@%#Y%3% zBZf35ivyqc0NNwIwl^Xq;%HRl>KG~V;!B9nW6}qe`wdQJm9WjfdOq@m4AHp+*C&QT zXPZ-eM&!*CCsP;`+GzzWZkQgs)RX+aSi%2mZcDOx*yHfyj&G-?H}VDzTE}hYwt;!T zWP@R-^U`&Fz`SSKCm%0YeH(OG-hC_i)E5rwRd)|AxawB_#^=WrUVG0ARZ$V6&ti#I zfo`kH)a)k)c}B$W19B2>9zlh|EwxXHFbo?{5U5^7Y+t)RZ3%zVG_h%wk zC}X7oABu(=mQ5!DM#~WD*nGU;XWP7y#S(3{!ED5tK#AgReMn49H?H0giZ<_{bGz%5 zQpYDoda)R*YYs(iYMXXm$>(8y*w{=f(|sDuC(}52QvB$T{U|u0*s(va&|6ItzWeBoxM1g6lapjy;Y=r z-hl|&pV_c;qKhLk;B#Cyj8o7a%?UddDsi9T<*0EvKL+t1`LbOy&qtgdG< zXztiKY*ARSc(8PvdHASX=+Yx+ly0mkDdV6nIar=@#+3W09A*2DCY#?IiJxKw4j#C( z%Y8$gFH<_Nx>0=5Ijs}zqIU2jj4`8FcA^$zBwqjBe`x+##mDWv!~m+#K2WTW5!MR0 zbUgCyqX$Y>4Fx<>J9OImD4iGP$iOYO=73{N$mRA&WCePgfh&)hR!bek>GQ~=YGhp! ziKM7l$yX$@`Vt|mKUvH&A#36Y{&SxU1WK~1Db*~Oxr}NiRebIDDt3OWCPIJ3pr=oM zQfHvWMYB96$0szp!YNwCe?-!`AiMO1prE{-j_SkV%PsjbYb#c*QHu9Jkl9U`SFjAz zjbkDj%HM2_h(B6>i&D2TXl1H8U#3><;6AE~ozL1%(3135; z8U*)UU0uW!g{C4X=9he`$@{rQ&vm?>8DE`xTS-ih4zE_b&8N zeWa}U{CQqdRQ3EZZJ*SW6R$tVXJK2m@t8j~zBt(|9@Qb=QNn+0YOX1Tu&;VzM0;NL z2B>R(!u)~3P6LuPic|CE%_F9Hh@z~jNHy*cIydbZ3-$4aVbb^e4TQ>OuQNEq{ey!< z@rzc)aGyv}DiBvK^&N9nO!Hoz(tJo$#2JeUC6b7fRMkSpyf=xSM`DH?j3#(2@%S&x z)xjlW%Sgij;ZLAQNg3(G;!*&ym@h<+LGxgUKv}2wZX@L(hEBKsS;c=`dP@m zEWvy)P4s5- zlf$^j+m^CCHJahKhAVB@*dlV}c}$woOfy~TR*@zq4i0PRLY zk!J^A{D?$k3|?%daflpMZF7?EIQ%?|2zFkrlzgfjzb!qs!d$@ERmz+QJ1(%wQ$KHa zi83v{z^peaBE(&kF3xt;X@kbpI9~ge0`6MpqqM6y{4^8Vyt7GTH4PFIksx^5#OAv+ z>Dp?SA5$!O7Z$JhsGWmun`U;*C{0_@eVsJspg!KKi#xrk<=LxbB5R+t%f6Oel@`06 zE2-9pJ?;6pa%E&+Vx-lrjz5fK71)YRH#;3Lk{hOP98;%hmW@+Uy>lC^Bu!LexWa+B zcNmI?M*c4rBZ`@$<%lvNBH5eL5iKoYLNtR)K4HRi8=UV*1e#C=DBAhiJ?sIr`|Kt< z)MEIwSR&;ws;M;)^CDU!A+i4E}A}YoJ6__AyHHM-kF^(Qo0*CSfmUY7H z2$e>NNQKjD0{`by;6j-w*6SiO4hFVYL<7R?lA3Xlsr|vq?N=@CHbs0hUMeIr(4VC{w4rA|{>Zq2HdKsR$!0*T}4;b6G3#RR{fMI80H>7UjWF1Cix7 z+5P1+sSFsF=!Zm|Nl!uaLP(9FnbS(8)R#xi4r=vXr-kLnbesh|5uA@J_9^&R zWtHsM-JHIEwLcJ&ZYFz&rc^QifBk|FSQPQP5fAkZ# zD*Z^Uf_;hE4={&5pR1P1AQInaIt>3}H}2=Rc3W6AA!m;U`Nona8Y9;wav$!pEbpt` zeX``_mi-s`Wnnz%WS2`YN}lZ_ztIkTXjU$-xLlA=cKj^gi5B+7nOO^|;@@-7UA^0* zYkN@m_|A~(4%US)Hpj{MsqD(KQ*Pmw_u6*qj-C2GWviG7_B5Rl`IySH9<$$Z+m^Kz zgWN#iGKbwV7y0c_!O>Y7c`-*>$r-0-c=)pyH5FT@0R zx8eHJ^GO7Gh@Y6u4376EhX#9)Co;)HVapkOD#{M@ddT<$Y#Lw4xU)<+=Io-2*)an& z`yR!%cd$nI=SNVg{eItcUQ6|?ZdVKI!hV`?kw*E#7}NBKGxu_T^waLm+wo1I9DN2k zS5CH9qnM8Je~3)LBqWQkbftnE+!ZZ97v`{SNxZqu;E___Ni)ro*P@x_59`xl}R%Kk9=Kj)yN?xDbfQ&Bu)1tAf~y`?NZD-=A8S zoE`30Qs4KYp@By^P74u9W74)YUo?@fA-Jcnc-<)H+_o+%-TYAL^xe`1p(q1DclI^G zZ(=YwnLsItuW2;3CWhD`e93FeO>%6#EF)8d@wKbhoF1lNM0LI{~m=aPQB>#+piRxm>z!ku+VN@tRw=AaS z1+Wl=eE$Ca_4V~(j;#&oT`@N`jZ#e&tX)HVmsWkx!Y<(gY=se$l^A%ub!VL_FikTD zi6Jva564c%Ull&W$kAWV8uB|&i)Hx+7!xiTt3S0m<0KZX}NqOB*7uO6uZp&Q#2n$)NNr7iD45S&~%(`;{wK zh}5t+_YhE(xJ|Xhs9*{@Uv=u*eYO#6A0C+qeHd2s3pE=fAWZ^~OYKS_y7|<*mMtN| z;s%5#=$!P*VK|j0Qs*ztovcCh?8Nmxeer&Y;#A*4j*sRis`lgYts3oOod;~Wf12_t zGR}c92>A{84d@NFxUbs4Q{akB*ObUeT|{t=I>c+9 zP3l`#yN~HFuNnLr(o>e`G~_`mKmBsI>JN<@blh|*5qnHR)KlQ8&BiU7Q#&q8@Pd~s z5)Y%;)x0jrLHQ(?@ZC|F-Le60KS8!F+x1#Y@-6cfo+_>MF~s&eaS(rPzs{Jdb|2SY zzWeqaL6f%TGz~OxC`bc*xKNYyT*DnCJpy)N3~e#a)IeKV)d>-w`*9#_MZO?C_fm<5T_ z{!GG-pR4}x#G8Dp+XQQDt9e3ece|M0k>B>oe#dri(#MvW zaA~m9dzd>|rJZ#&icziKK`kRdOT(#;c1r#8N1;7mQ(i6rAx6Tc=^P{v_V=5Zn;TTc zvevGt_8gT-osMoE9eLZVW;P3t)`~)W%Y6Cmc37tC5ehOgGD!F_<|o_Z(a~^n+;luo zA@!y9a#7TSd=fGfv{+ILUFgrB=_Vy~RbgLbp9l|DofqS6MkJ&5?A*bmm2q1K0urV{ zt-bjK`VD(1>&$yMpM*J?#jE&6{Ps`k%S<0bW!+b)O$`Hz?*WOD#4A;H2=N;~u85V;s_|=xYff zlcjdkMr=z!A$KaL&Yl1u2F00mb#)Mj5%MOTngwVFxBOVXEK!~-Mm2~JyF&jv;-HXf%c5j)5B5MS0-rZo zD*;o!aSodEOcL3rf`LmnJf}3^;~uCd3KE07>q+DZgifg!%9Jom_hft!DTih(|2Zp) zZ6idY)YJbgz&+clW?OL7WTNX>*!dZ^M8TU~#2MJ9O(-}oJoP+Tx2aKo2JCNJ=kCdYw$=OQ?$S#8_Z*0u`{C+8^Rqt8<& ziA%F8>M@Yx%LvPAQL@2aEc2EugARBCA;cvqHY#gzY^%RjjbN=L)4|`jX?=`E#CVH% z)2=FddeV0x((3rY7&srp1qWRttxoVTKs;9*cL6D*m9=%S^x6r$cAkVZ(a2+1y0CR> zBEjB5dNYpT8D6mZ`n5VN@aE%m>g{@CMl7a_es7h1V-1o$6naS*-2_dCHe#G5W8az> z56wQ7cSIUK&{5f7T*w8nNrRq)^RUY8U0qX0TfYdLIGLS|jzEx|83iQrrUdYn-^ZSA z>mM3|(B&}ZS}vwE)Gk49CV8@(EGm+@Mm$|lu`Br4qu*f)?>dOPpMq0FKlu?K8TK&( z6~Sza38?r|b>CQ0{B8%V2;!r#n#(}X5HDanTcdd+Z9UYH#WsDU%uRn(2lacqP#pF`Lbbf?u=#h zgG1hfEVWUIYZvF4Gz)-cjV>Wi%4!n6is~3lhr;g=NM|CfoABFlh>*WRb9Psc{>Mt( zAB?eEA;~C%vG<0iIVMv|{*PLgrTV-ulFis4R&7{3rn9q z;QZ>~tDz<1mad}|j#v@ed$O%JUjV}s*UK$m@(bLRnReH8um zETXlU1`@=RysGa^uf}TJ0sRP@uomPbPsu@7h{{BrKe3k__X4NSFelNxjzw^CcoCNk zh19yrN?Ly-<$dX$>$-wxIGzt%PPB;I67e&BdiHMi3s4l3P5k=2kb9{jzxedoI7J{@ z=$KkYsj2M*Ma3_~$V7`=?F9n~muJ6VFrmChJaj;fpO=?+g{gJW*cO%IWlU3J8MJog z=wDE8PugNe(4b_5l{8C6y_xCij7IdTLD2_(j9Pnyv~1|ZORhQ z!6S@_v?=HLSdJL^yBIM?xUr-!75w{bzGEC=i36w^v^9{&NGupmH6%O3YiYfiM=jC= zf8wln`K%f-Q^P#t`@^2e!?WCv!dj05uO^4clMU!AL0RYWVnnJDU`Mwa-brqLU5#W{>=7@qBJ?2arcE%`$198m;lV1!( z5qGmQqLYdYoX}GgbB-p0+_;6m0k;MCRb=ypkH|WnALXQ%@YFfBBe*8mOH|o3&*C#^ zg`L+odS0jcL||or4}y!}Sc|_tYJT(*y1V|ER7*gPC%?cW%iXPwSodMF#4p2yz?`G4 z!2WMv=APH?Wx#i-T`$&F1KtX!_hQ@x0Pockp1o?1ns{c^lL36QVYQE%x!ApH^|KAV z&YH#DrV}u(&M~4&B{tsEI?Q6#8ffbpmUL=io#e^j87ukrnBs+CF|VY!$A#OK&NEL| z*l92IsGb+O@%x=pzu-l8DOYHjjb&Xk4^KID;o;LKc76h+M0V1Y@!k6}_Vg)@$@p`~ zRq)&J0u#jOA{e~>h@ZvI;)Q^avoMR9o@)g3Y5}11M4fAN93vtpoXI^bc7rbh4h94- z7X=)6ppXDa{Y@)Qsf`pUmb5Gjxpvc`w>e_BK7&lo&kZ=Y z^qglIgQn4U*xTTS76e2|u#6{aCdGW_Rio!X6#5+w^4O&U+Ke72vB}S3#|%z|PYw&_ zgw%&5*lOPY{(d)4H346u@%-41>&1R{U6%u_i9(&!*!_m_&7()hMn@MqU(^u!Szdkg z;H+Q&%}u0Im&8-(2s9Zl&V-|?4!!RBnZ<-f)??c0iSO90>f63LAV1S{)LFG7YQ9f5&!PgC#F$ZG`-M~&p{9t0*#B$3b>-yC^X&C-M;OMP6q3CAgh)&utO3bX>ad^i;Dimji zGKSI(Xh(MY1iy2LH$P(F&8!hE{OLjcFa{`CRHoMZJe7K`pxxGa-t^*qx3A-o!9ULK zX0hCDiHCPw-)EaaC~eOyWTKu!nREzPEOlyoc3eTXDU0mk-!b1K*1Hd*3na*AI~RW? zH_J>m7m>${f?J5enHbTo^3iEg**#~mkrEg%J-`eE6l2vG5=fRb1=%6*#m4}PCKtp9 z5t$ax^X0;XUD{5AgoXCkDP`Fw!M0;}2a74_!LvGxXWh3q{TwvPe3=9nV z1KXF*-9c(luDlQ{aWKN_UE7LMKMsDz^JkUN1B7UJ}++9IehIV zsn<gdKZGpU(NZ%U}Pe$LHm6x$J zT&yaqF&AW7^|smqV4M8K73Yzc$fm1e`~YIPJGbwy{kE#ecU95DXJFhFoQuR9_n{Q8 z8Qg^rH>QkhW`*PT-ad_sW8lEco3U<_f9NbWSrTL0sf%H8HqZe3%;mlDBmZsX$ZxyL zPx;qxzvGv2Gv}D!jRzjS$W^hb%ufduuEsS_EbyUJ>oBFz2Kj`jg%IQTa@+C`wESGv zCkn)(Qm!+683UGrMWGamgnP7ms~?>ai2ggf-2^VpSaoH_TI#Jh>#8Qj2U##Oh0=La zW)c}ZB7Q>RVJUag`aQL_x%8Sg;4h4TfN_U&7i2NH;lhW-dL(xfb6xTylyM&x^lu_$ivPwjV0WVX@UdS@yHmzUSlmUAHXso{Q|V7cO+D zJw3ilG);mkRcA35QgXwFj{~IQ?MrOyRge+ya1$tS7M{VVy<>EP`)0) zb=FRc?OQM>o{DEAEj-sWT*O_ulB4Rm+*BPz^w$4ec?5^|Fb5jB6{v3wD45|V35`&X z6J5cX2q}51LsC=6 zyfJGTBOM*&d#lNPYLn7aq2CX)E?8@|Ww|AG|3GFnF?s`{VeF(whcozzL*FVyl=D#4 zlkYOdc>8sTF(BuymSUoP;hDTwgYAKh(-;H8t&eRH-*&;IXWVR~?vVb{=#|OezHuUp zWju?pfEIejn~XF!?y?7!JqCjYCi{eq5+$g1e|*-$H&Q7}j-`xD_j%Mlwsc!z_<*o| z1R{x*0|CLF=f=x^El z#4sQ!9`|5lqIzyZK^RtN(f&Yy#1(31*t-O)UaGV2PO#Zb7T!T^9orN&U%o-@c>IGY zgK3vX8qXuS7ht}^5F~Mh;odald#>6mJNwy0n_^mDTJodzIYiHp7%J{TlbvWV2Q0(L zbL1o}XxJ^MHmmZaQPqmsS;sTzIT{}Etq2#(co!RU_&4`@``>S}n3M>f1 z|2HbkYwo|QL*4CLzb6+wukYI*7L0HVT%qk96eUEwU35=?L)fEOko|#Sw%A0e7+d63 z+WBR|Y0a0TI(F>j@71_KSTmS9Vi0&RAHf5eY*CxMkj5)ZY^oH`k@oU#SFODh+`2Qd zro`t;C=u=4EZsrWv8GbB-6Ci!WkH~UW>})H-4Hq=qB@Gx>Gbk^l;UcL0>f~64woNQ zn85S2t*@EPYPQrZovZv)b6+YC(JsLo0WD*l?-Lq`m*f*xxRE}TrmW_Dqo~gL8Bf&i zJUK$lQvd;!(z(O{4Ta-=P!KkBQwXC`e;O}R`exnVJRUWa(4=HK;o63Yx`uQRF#h+$ ziQ2p4;?+%w(rOlsPVh{PYom^HWGm@fhTPosP4IXL?ff{Y3EJheFg%W%h{;Spja_wx z{z8{-bn<4A$~aG}n#DK;M`uI`0KzPR^klCUj*WTymuC=39cEfBq14PL*k{PXsfz@n zg6qxZ4#`?i*SfUc5akSH#j#FQ-#ES>oHyI8w*ZtXh6!5XS+qk1+pGpxl>OpJWO!od zJKM6s85htLNpl*UwEAu)i<+*wRLIn}_q_k8IBVNWl2KJ7u7YPtNg9~WTtI{fbV+9W zg5w4?K4W34XT+E7C^DvEGHXtVa8Cl<^1$HWHS_lwvrto?;yvQoSCsD+S5_vX&Azpv zr&*3}AYlRs-3&jonvl^W@I-Y7Cc5?{tW134o)fIIa-_pVS`XZ#7HHbebJFvmjAsf8 za|T|ZQTxf+cD>yI$*875vM&UJN02_VFYY!;AbyK-T-eP&w;={5q8??yN|np+fRs$Y zE;$A#p0offlc49ex??;ZtH^!#H3JFX0Qw!8-d=5U>c#haK7@~sj;`fWF8k!RaMllx zzXI-4+wO*u_a&@C9t^VOOK3aAiti>v(2$%*9%dm_f-Od?WF$1x^?YRI$){i2mOY!^ zu4|tmPl5IZ5CYY4I`Dc&Kl3omswPY9`%4|@nl`X5BHf6_(@(UNULmd*7;$?d$m!|n zjp?$pcKdH?G&d?iIT;3=H%5$GySb7!J}~Ft8cw^qm*$ycb&6*!+0y74X2BKz?`t@n zk`4|Ipxk**E?4B0`wu0ivBFQEKG7(u^!As^pvHVC#fT*P@71eLTBB&9ORm@%x%R35 z_8E^flj!Z6>yk3V$Z@L&l7AMG?N^mv<9;~q$N4+qXIvl~FQIfHS~kTT#`glb3`c|{8pcM7NOqVhQhd{~L(Bbd zN|vPj=>m)F3V9AMXd4i25I_j%H|@ZeBaSugtkQ_yuE1VLXDi*W$};-FIVB}!^N~C2 zEAKe}hew9)tg|SSl|L-N0tXn9r24_z%&aHxtC-v}hQ{@dej?7Shl4-jOx zqtWZl2iY8*O5_^3<~(`w59Lq?*~rAiA&`-e1sQ_~KyDM82+bTz&9p`@0x>B}X93X0Ig`)X54WTfFu@I*iukio2krYx#7*kC8)SgdctGvQ&$+_T9$lT#= z*i3;j&*c(lhco26`jFCf8ktAtSS0KH?lr4lt9U;X(d(?;t@8G7cPW^E~}sj=+!?yMXR)?2D&e0XYb`k+&d~% zkuMMe^sfsM_v2*7E7Jh^-_)V$h*~X1Se-J(XBlLK1n=Oh^Hwk&HW6uD@+fE8o&B|P z>+oE6Q(e`i=#}fILDRq1YL{2DE43ES zI4&0v(xTuB$=pjoBFe6LPCndmK`KrIb&bD1V*+F5cc;X+FkJDS#=IZd;HDXac?jw} zpN@E5axMrj&Wzi)&8lNNBs5nnRDS*a@h=P7M?i#SR-$c>y@N=psi~T8?>qxP#Big; zlUYIf2jAN2y^$9Fzc}4f_0(_e?V~Sipf&*t(qGED`%W9{&WmcYl>5a}*!mDnJTUz~ zi_?mH%K!Xoa?0serXoh4A+t}=KcX+j{jq!bQOo08zaIWYmAD{qq2PAda$-KR;eueL znc-U(HinA~Cx2fL<%;H;x>1uV)1qe|hn{1VV;rISq2|1PuTSE2-D& zeN1@k=sGSJUrF5@Cb|p%{0Qx*xK%~hYC#?%26aFgWD*@6l4Nuqu=Pf0-8U4HKx6WZ zQp{i0doChkRcO+J^f>rIoZ6{FE-1cOu-LbFuu)jT`5!;aTipqT-pc`FwFLPKETWAn zimcU}t9MkNNiq8KO8l{?!F7<@5k;^k$Sm)k;GW<$h`REUSyB66&XlYkbd6Cpug1JQ z|1(MGMU1g&;UHQPfI3bgftTYprSFTgjZ=&>uwo6~_m5jemrgL8&=w|&Mzs7}2rCJa z6v5m^wzGNpE`rMdV429Ah+xj@VxCrvRzsMh#^Qf{Fob1?LrJ5dZEOg@S{AcESmDf2 z3i2egp5;4%%Ev;Uz6aPI`^7f=^EoZKkt#%LzG;I!cO03P0{and5e-_h++WxgEAjnV(Db`xiS7mYgT}`y!zH@MlG5m z@1JyT3W*qC@2EDji%k-Na1QV14A_JnxDOg*|M4X-)_4N-Ir9DSow!WAZ?|6%V<1F2lUu<_kFrIV7=DJ5g2!H}U$nU$hJiAWieRCa_y zRK}yC!O$R4rU;1)AybhQC1Ilsg=C1#h0OcC?!8sc@H_9v|NG&8KAqz{&wif!e(rnS zYhCMF*BX15JN^#!OU81J#C+vo%lFh}%hs%|6(R4l_v!!bOQxX)fK(MG=1!bG-AFj0 zBvC<9+^a0do~=?&hTu>?3vH0LkCxMZ?*H~-QVn4^$lmYw_8!<|sQymF>6{4XjQ<`H zH67|dr7lP|jdz+8j^8okbJSbZSJcsh$jjb${&O|&{9o3})+ciJe=Z0?AB=pw<}zZd zae_#ozhUG#S}yGqb;~2++LiBpXv_ceOZvY=Wd`bo3ka_SMIUmg=3BM>=7Mo<7wqO& zk!g){z1RNdGN!2S_|G?VzjE!bp2y#I;r{0TT(1;Px&Qz1KWeSR|7Jt}pPPJsL7kyP zs)lw|-+7c2fmMH5fMpBV+1jMKN4^{3zkU8+Z+^TzEu_HMWYPoUi4zI=W=xU?&Pc(oVHc)(Z%gWKe+EKQqR?XmG{Zc`R>voDDGAjW2l#d zQYITN#K%>3RTStP;nYJ{d6G@mi)3*1Ox$kI75TCT-;m=ZUD_p+B&|wE{i|rpT z5Y1Nirx8OKp0G~-{{AFVncEyeSigjM_4nZuFW+&=B3{Bc0Xd5M@3TkZre8DL$p0p! zI``<4RMnW{KPG?YN+$PC#y%P*JS!T|J(Ap3?y_cPUVai@{M@;@}qQ9eAP%PUf>#*RzRt*Eh zs@DFGr)vt^`IP&F{NxY*bBjdoCQw-Q(*O7s9+r+)E;Ia&>JP5jKYwR`(w|0V@iq2M zApHTV8XXH94ipny5miYa)+KTmOP9~Oq3Zt|KIPjUzhvn+A7Yt zYt}rRoO=*k3dIDM*VQSZt{u_t>h~VB?f?57)f4kfTe!@ww-=neSpYU3k}@54BYox3 zzomui)#m@WF*c7UvtUoKb!SJ%rI;gQ$P1r@OX#xyyb*+s|EDE*BA9lp@hReSVQ&$c zcF0@se_SolwLG3d`d3x{r$Lg~io$G5Qmh=hmpjsB(ds?DASJpWqu<%yzQyA>DOv_G z6!i@Gi3}!e+vYEm z>r*~+Gs*8?^shJHhHM8y*M^z}RE;UG;P5({yQ7(_0!e@YbGJeBcGq>}xvJ(@^(kvf z%l072Wq5GdsvW^Y=fs^sV5chlyxm{5sl9;3&gTLOOYoMY$c!;{6nW1_zg#}@7m%aB z?EF-aL-if)N4ZQm57!o1nOl~O8T&xLrcdFLgr!7?WessGnwmg~G*J|K|J&48_g812 zMdWRt^u7?G_@Iz*eSgNfe;^y-yg%urJ6EQa1Ii&cQSw(NQlto33_uM3NkhuBFJm6rr&F4bEVx z_5Y4ljnO;nW(DfL36Y(c=2KL@|2NL8nvL2U7o1T)mcK+>YKK(!@#jISxFYku4P2Og zS#agu^A7Xp^Q=u?agEK~Xs5uH{nI7QF0*WUzex9Y>chiFZt4uPtE)}OzOUMPJo}!F zV}n@R=LzS~(Mr3|Se!~H;;!Bs=}Y6H2yQmb>O&g$Ds!SYZ{9cUF4!X@Gw3mEpkLcI zH@xMG_EP=>40RI9ISW8sdFIqiMtPF^*=7#+k!&C$9`p*x=lgY248-_3zATktl>F~} z;_S&nM!tE+eU7`i$j{EC$7t`}ySQ#zs*$pAv)WNcEoZp<@EkUB+2vpZk)2wxx}T99 z8G%r(loCY0SN5}T#g;kKVHXol?sQas(&u#f~pS-;ucD#g(bPC848kdi? z{T9CX>(B4~m!C=-CNwN4d_fPWvl5mGnoY}8J_cIoCalp_sN^$~Az6tf*Q+u;s6*>gQgHh3eRDu1FfG@;aeK0osFA1$7NgtBHht_#k&yuR& zW)x|hb3X6GkZxKa5q6s)aej5SfCl9@;Q`Vz4*bff+up&tAfqj2<{y{-b}5DHHU#d_ z0jiN*mIYdgWKKcIGlK{esQxZjO|0hlKE_IhAlqXy*HB>D{wcy&t~C$}$t<6I#Abx+ zUHtx=lg*LcJz4F1pzdsXw-4e$C&7y?cY;#y`!;*CK+8E??KLRLJdQf?v!9@yd)Zz< zRq*@9*vQS4Xy>|vmqLgQq8PhdbaHg`O8JQ7_JZ2Xf>dZ0_G<84x^(G`VY|Bf_hmib zieV2acXJ_pdj`@+UnG6@N=w!4(MVWR0rxHk}Si{>M^j7*WdIgfs_M5V4WH*t?iwbe zkU}}7=a{9PFfaW0-ygE1!8VG!g=-p@=-T&+0K&zX)#_KQF zfI!jf#f@&qxo~&dteMw*QA?e-By)1iw?BhU+xz+y^gwBhXpKT?_L=e<8&2f2$xd$w zMsMp#1?K%PxM}L{&CD;XnnxsdAr7X!d-v|zqOmp&71kU%A`wpG6*Z`t3oi2n$^|0D znNs)eV_4jq#hA-x0g>+@WwuCz?MtO<-_tt{(m0gC+*MkKnYM>e0)51%*cbu zN3WXl==HVW3qu$E)Tv?2SVW$zfe(_On$VFzs@=GMVDdZ2{`?N^Ptmnd8ao!GOYxib zk$R_LE}?51naHTRq_~ek$l}NA1!5}!V7hOTtITyQl4G>-- zpmRi(4)PLLEblxt8T+^h%+!lH2e|IWcP5lSFtmDjIGfaBTUO=pyEHP2wv#MT*1voa za~H5~IdY^O#TwjfX?b(eZDAC7_DJ!Yo`NFTcB2geHAi1NaDN-`+2>f7eursQ9K@$g zMBW}~P@)ECkxxpV`9ip+Ab@Q&k!4$2OSNn@JW|8{@Xo$T=dk+ml$V|XN2|L!^~f%% z5|+i*j|bkibwn@S$Zhr7SwwVn{V|a`MwxJu)6_T3NA?m;CL-3$RM!CUg!{2}w%oMYab)Qb%<9-|Vu9@OgMR=a4!#f-lAlcCMWIVRs7B3LyIE__Gt5Qo@9b6szDdd`VFC6&x5hiu=jL4|~#@xd-Kc`1p0zT^8emmUPsJvN9s}ChLKtrX@@D+JsVN^W0~-*ieRnh-vGSgJ zP)0cG`hpJUOw>iqqZN>%dI&CLkM?xm)Tpm^OY%eE223nLr3EA|#-ci*&!vXbIR zwN1mR^aG;}yDR7VSe0t2iaJ<6+tP7rb7+JQziaY<4;VC6eTU)BGZjlZGCXaN5=XFGxn)Y zo3gym-CmE*u5GkP!m3Vs3}Iy`-aJCJBgztUIv>13`5C`Gi-`jLNFlot3+mEtoX}j? z1(~&XDAzz!8ZSpQz6`}}1D!QrLF$GSyH1&ykkG8!_P%y?i3d<1&#OladC&LyL00zo zyLXB9LdGNH3u6b}b=BUQ6ndnT@Iaw_cENN@K{2r$6h}DnandCOEJ|e%7DJ0rKERlF;w&lG{hgCjj~bocqxa_P zfa0cg6O9@ZqI-^EBl*gIPSQ6S&2P%h?m^+v+{2uTLuFMBCxtl&i{er|OmE#=q_JCi zz5s_;4_;Zki(Rhy$^KAKQ#m!cePdm&+uj$g-klh`Zn(MW z%()pI!|nct^dlOneTnaGuz0B`eUc3l$;`Iz+_H){MX7N9NOhBnmdRyt*JR7LpH>8B zPQ04<`e>$r{yMvodjsOZE`u*3BIcS5Jj5*-#&F&bIr2~#<}mur}T6Xtqw@D^hA^ef?P^cnK{X*18$%q=)XA4AyTpCC_gH_uCv(fR8L+a#iRmmv`su&P4i1$DU)+ z{b#ZKtdQObbTLY^(1TQQ!j(+E!_o06kFhxLNSt??T|;uk;L{>smXdi`!OZJJ_5Nm9 zGRfAGW!^F?U#?Yu7{4XsnA7PJ8~GEThguEY#{5j`$DfNk7-w6XR=ag)#z5Ck$WSLS z8Ve)NM5(*hQsdmXd6<%IbyB2={%iKyo}6zn*RPkp;l2Gz@koN4rA@n8gUQ6vvOvji z>F*rJ?lp)cmK41=EZucBAN=}`H@8ei8mw$gT!-aQm739f)#gqQsvETPlKdX#&|qB? z-CUkwQP{k$ozX6LrbgbLN*|l;=jB-FlRbhULQ@Oe8guJ}Q*wbQjVyr^l-C$ot1nbl z>tXfmyxEyPVgs`7eezR2z4PRNhnh#hXK9Z1L(|`N#<&8g)IO|r@YSOwCsf1-j2zx> zd77Q4wprY%EL^R?J5yL`Nk{X*`>n66eI`s8C6bg=OQs$@KIdh`_9TU?dCiUGRq@Y2 ziAoOiPfh8lt4FsVUb#EIYw`+jlTy#|#yUV5EM0KAa|>g3C(b2G(m{5IeS^V}M=4Lo zx^2-$FQ2Ez|7!c1C;NkD`z#R=k#8sVC`jej)vi2wFiPdAbkOc<#E< zCt`V0X3Vxmvmq>#E2oa~lIp%Fao#l+m7BCmPKO`gTkv8bONqRr*j5|ahOxT-dPAFW zm6^P$Zra*jnWdWS5XEp#hGzY?`XKK5kO41xTkXKt^=9Lqxhh#VOin!*Y|dO}-#%ev zcet*+Fkx@@l9M;P$J6=W9z7K`SbsWn8PCH5Esv7urC<3*M&sI2D)%2FqA0(;%S&z; zc{^jYr$Lfa;^2#UHjjo2lT_LseZF_e1AWzV40XOd*Ep3o*$G z`V~56P_ONE2oveVf17qI3{?Ch12iR(8hw@#fZMjDxv)vNL%mj&3j1I()5j zbfeP>T_;MiODlan-k4vSUb*aD?TOjnYCG&!g|1s(_w|d2*2FIL1rK(`_+p=ai*a)i z@1*xehJ10@TVazH>66*&y*hd@SdwqNrngZI(x`5o$!F;O4fLSQ^w5|WX;0%F^R3=| zzL-)X-_o0pr-r#T_U(U&4b}Io_{;d<=+i>CQ>z}lC_eRIUzCxhQ+rE;`P_s(z2AC2 ziVeLfs2$&XBjm)fW9fqr&gh>^=(}QR_-!(#OR_va#j{isi0~mqX`E!YYWq^$@)CNh zSS24rjk;aU*5`ueEorA4H3F$Q)QYb>7t(PrG%+OZ-9!!h=s7nI`Nxk(YWY1%*tkvR^h`{=JYss&igrosV5n48 z@ZR*=*%L52P>i7uKV3BVnwX=?kn~p^xlV(n>@1}QBi$eJye7WrN1@8?rEFb)S60mH z)8tgCN|c#6F$H z2id)S%v9N;baGAgD$ByDO+;9JM=&}h=^`c^3`Qwlq`J?&`1m=sC_gFz=zFFBWwmGg z3uy@a#J+X$f2f2&6DR!=&(86^R+qp3HWXoAKhjm-(SF;=soAW=Dt!K+l(-wYHF0`30Lz>sVI^% zJ*Z*YvgIXF3Pi`O0+l%+C1*bo*t&)S2VkF9u@W+{tqZ-ju9sor{1WfyM>1G#-9fd)EtrnJnXWVL|(J zD;o!7Ls`AHXLLECTx$Q2zGwQKeUHBs&CYfhcP{kpVpq=Sfm61G=i11JG<*7OuR*8b z>#NR`tU4U802v>Jv*&C~0Xs){D6~j(cwU6!IDODl}z3b_FX+zT~zcb%f2T4@GFt_ z8I|?RwiS&$lCrM}BA<^O6gLDX1Zn*hraXI!fyv(-hY_% z?`wjDj%R(II-BvTL}>&%j}eKXr{`Oxa`gYJrOtnu*6r4~Js9un%WyNg_ zOt2d5<$qufGnybiw-0Wl)GL-SD0UOgl|NkSq|PE35C7MLRsy2kgtAHvW>b~~T0yvx zOq@WZ?O!MIDj}+pRLn;1J32sa)9X)L_H$IAWza*9db|o5wgcM~DB{my5Y|a;+D1F8 zRBMEVL~;`q-4Wt9ScFBgv*SDG-nzd(T_G~@2)UQ?MRkfReloi2_wDI419aH)T`ajW z!zX7a@W=BxYbPJIWy%ZnLU?L84#kiqyel8B&6E8^)!(*A z@zu4J+fU=o1riOxOiapQ4PEuuA1I>x zaO)h#fY%& z>}of6a6Ej)ILMG%w#WGE&kHrFkC%}^`Pd%lw&{#;-B^xW)7HRjw= zEq)%*6jm+dv5=`qQe#(=!sV&J($SBIine&x3q&!gN7sXyIkg<`iAA8a1eyw zri2v5l7VD12l9gb=f0twyN9#|78n7L9hvWQc?xhzVSNKtKu%{nF-8WCalsOsDIZK~ zkMQ-I5~ds4yBEeU9cpz3LbJ)S(;#oH$Lv2IGe9Z|Mbv-FxAWW*r2|+BRb5v17L#JF zNaO7H@HYUpMEf&AJW84Tl#fVt*R?B>mf@dFP^)$<{TXJ3=jhn_WNu;Na+LdLe?BMt z5a|h(nAv(W$yiS>t-*B^oMCCI4Z5nL;RBBpwG3eUY4m&`&5n5_p_@*0U*OWSkDM{9 z_On_DoIgR1TD0f{{?4=Jz%v+a2W7?~LkX{$0wtzu z!VnQume`1J!bm`ko>-?!i~lZE4|6<(8%Ekr%=AHi0Z@@0@}H(|SyuwCTE>SVfrdI- zR^>fVj+CUTKrfBtv8q+jxLAYl)N<;32nF3Rvfa|Ul=;daf2~;!33jY z*J*%Z^oIhsGhkME`hxNue;WZqTp8LkR|GTy0s?mH>TYb^@={u6V}gAXnh*nu2upel znm&BlaVw2qPQZ~G>oB2nr8eNP^^cv*`6f5-oGDw#Af7n3$!4nYm zyHg(;h}0zd^)ke%kVJscnA`SmG0j}uVwPEJ#?Dl>4-$@3?xaIZnX&n&MJ&4tPx=wq zI)uQS^$FUud@8PE;BKB7B=tNfE}ZUz4#&q(aM$RLOgT|fH&s|sRlO&z!e?)!kK4Px znX&v(?+Tx%C{8}!X+_4gseWDlb2U+4B7s$pl9^tOX=-R{iWYm7?ns!haV>a{Nn#0s zt3W)uCx8BWrUWDrluU+{^l@<8 zBOVW2zW7Qy`ZKSUre)jEVM}!K-WITW78hwli~jbJ*^-qLUCeFgz5xRD-)%iL2wjCF zv3BO~Sl0-No{Qa=uH8dgFU?N3kJOv*iM0) z9r2&6Zr78n+Yg!^I@Cf0M^J#1q*8Z&&ZRMnrY);N`teR@yNLEq^NTBufgdWWNP%rC zalIc}R&`^`v)1M!iT>P=@JjAqN~<0#>q zwnpi|_&346MJ|3E5_Rny1n&r3pzh5{z-&U`uSzPWnn?^p5$|}lIZOBNzSw&%9<$C2 z0=1#x5IE*s)?J&{LBG`}86-JV<6OvIjl;%`sPUhGHg^UJ>NNcLh`@+&p&}e5)Zgk$ zbf&zVuqI>*ZKBS0*nD@M0Kp9jKR|ymMbMd*zC>jb#X#m9iKxO)=x|JW27%|l z2~((fI>*}!?0-K7Kytl8r!#CLQ7n$n2lk(gwG$)hfo{tDl7}^ienqb;>tOVoZvBuH zvb;7>sEU<{;{g+;k0jDrds=UP3N-V*Ft+|P{0;aNUP_rSq<>Mp6eB?lSL%*?%O*XQ z`F;ClP|(b^#NJH)x1kb+9_Qg#CVUe6pI`{2N&xv1v%Ow3#Bzo{;$!fgsMKEXguMPD zV|#6ExHFL+?;sQ-x;A4LpqJezp$hvA=Pojbk8W9ph7tsD`U}CKQ^7MEbC? zJ|6kyaOs_#v*8%0Z)v)VVd^8zTxf_E9wGo zfv1$Z;qPlH*_7$v)yFM!n&uv`3K#m23;$qTNO6nPlLV>@)U&^?_}jg3 zmFinLvni>%LhS`1WW1Aa-;034J(|I+IT|>bhrcw`jvH3BeG?y{lx@AE6HCe-4|+kC zweAb}tF4!sX%a-Oj=^f)vJrm0C2Hh8af}61oDS;jG?RP+hi3fEC zKOzT>880^Og}gKIvSqeE&fKHkzy&5M$!%o4sWSK&!cVy zlt-Be!G)VkRzEfK=YZ6=Z{H3V=;;@iCDfTp%#6P%4P1qr2^Y~1m&d+rJ@X64E&G0& zBws<_t_b^iDpaQ9nQC-~g^e|C899iN^kTZ122wwqOrKP60N^vs?9t1!D)a4TVY~uh zC?0k0ne2S=%w;X}!*}HtA9L~eAd^L$+VD-Iu+HW97KnS{mAB5p9FJx z78!aQVU)WEbfd8bO6i@@xY(Y|mg=_2V=x&_o*}82(faj!Z%1w(7a}VJUl=s zG)|4s=f{OFxHtK2xNS>5cI6~7<5}w;R;$Iu!xS8?Q-d}6;?YtDLO?lOO1P$z>muK| zO)VsLIX{^{Pu{zcn+R(lZrwEdW%Fyuv=$6_C zNg#+W9!))~848)mR5TkMIf!d0+cjvB$6^^$oF^tXW5y^$N%MopjM)(bW?y=+DT!=O z|1i1!=J^PX^}FPVX|$a@{xfbx`VbKl6<6m}+hQ?oNT~x(Xxl|bC^Z9vjbRM7d2*2+ zm(}3bkKeNftO;sMpz$G;kHhWoQ6^kb)dLs#MBN!*^zwD4&-|>8#|jlxZkf;#q}Jei zc&VTcc>mY+!RwA6NIhIE)(>PA%V|>>2Y!3WnoWKSRT&fcVk^^o{b=ngQ;&Lc+RfvQ zhR9Ph{KVJgV48%hlC?Zsw(Jt$_6>}C1ns{sj1#HHX%U-nQfQ>6CqJ$6ct=tZF0v`K z0~`5{)GcVk+$`Z=pybd~(-F=X`SKrrZ1RQDavs4l?4BpqF>T3CrTbEBNQ3Jrz^`$`hOP~wa?;e+s^%;U2I}&PyMMT6iK6va;o6JS)r2QPP!7*B- z!--`bMFQr2nmUC?=P@^hodDA^CHF@75D^tpL5nj8N9|lb#goV;n9_-(PDr0^@K?Y2 zftpucYk$W(w43xuEJeN(^qqL)?7`HE9iaYnJYAD_BIDoltnFrM#0jfAmd}>=;)BDB z%S*3ZiRGoQqha6nhOvT`qK9)q3f=5NFi_x0EL~97``4su!lD_c@%>+T1sL?dFbH{( z^(EeKO^Qm@(IDL&_`Cbz$?)gEpPRT*cUXrfpqs7aqfr#+^5UdwpMamL(1}<}z3JW< z=KmdF<%+M!3&h~`g7YgMZ85Z47ys|$B82@tZ0p4k~d;EqSO*pD0BW7Vl0}~wuzoTcz?%1<5OLQ zoH~qc${I4ck$$-(lQZ_^753eh24m2pfr&gAluan2lL*Z(y}}&F{UCB)F9J6$c1sXU z2?;pST70F^nb`E=5_2OrzvJ2_LLZqM9|EQ~2=1%_{qQA-on{`bY8n)O`L` zFZ|Qi@g0eIzOXH&_D8;nqy9d#={L+$Ev$`leS8W!=td*S)IPiXH`aa(|1!I1cH6v% z`O`9Yhn^bLWG=zmljqSM3awyZv4w@j4(Jbqa+7=So&^9>4c-%#FwgmUh@MD4)-!9M z@2t{zcK+uC&zQBJ>yVpg<1-sE`OV{{h7Xq>Sj?$pgFx@?@Z7oY2I9Ybk{8}}teyY> zz-AFb1Sy`fg+4@iw#T74|Czl>nV7T{1`afVqcJn_(V9Dt{Pm@)-=l9#Y6_?aATf4= zY}!_x+=NzT!`PJ;pbRvA058b#+4*_!$`yb^fQ41=mMVCo`1H5*-0?2h5KwYmc+!n}7))(KHt7wJ18n*(@aagR?%utNzJfg1mmQc+ zG-3S$O%IK4(y<8DH~-0pmGosjO~8%0*Xy?ZJS@;NGxxP4+f8|}KaWAuoMukY%FUXz z$Qe5so9)3QGCwI26Z7TOE)X|M!uLBN`h2#J;4S)STFkUc(3}X`hH&MqPH1Zw7M~UO*mgH$&^pLqx~s;@a&8!5ag~k?5^N-XLG##i~ty9?bKz^4g>YJG{VZ zj-IYAf!7dm_CR5PPgJ5p5aBE2TWb--kxrjD=cG|HE5kG_m(v>s~nN9Tdl#scws4 zIQ6~h272=-T#S;9YKb3&Yv)APAIQF__frC#K54*Sy19YSo0CmpKUwB*UKaZp(-7wo ziokHx?>4TXh;B=G`T~dCWDL(Y=`3yH<_4)w>>bYb{AI2~`#2Cyg{v@l)8U8YP(aAO z?t^>rQ=-oa=8Y2g`E4Y<veyGcSKx~gm>~C@!bk@0FFL-q{w_;%MawR zniDK;yc6rIZ+}jS4AEptJxf>zMH^5v7V;)jd;cPH`EP_Jg~hv6zG*C+w~kcKl-cq^ z($Q3}8N$+>N~i7ZW${PVOUNWLv7_WG3&o5G8~np#;S9D~CLdh1ip-cq?Mc*!2s0n> zgWFwDz%51H$qc;6qf@C+*4P5keM}2Mq}!D7NB4jq3{bPC?Lli-oJ?f3X7a!dvOrc0 zM_k;}(!ykpi+=Y$#{LrF1q9UBebfw;SjxPY)uh*XMrkc5=LaaAx@XYddS8}K?edF0xme- zX{*SdcDTP=$3k$mXtsooYUMHfVgI5@uORRSwF#!s6+)4Oc57fBMS+79i!YX%Y7dx| zW=_h99A-taoR9J(<~7@U(ilbi$qZxAP>PkhTwT#J!CNw`xi~a{;Hi8?giTU#LhjMTvXKuX$3It2cex$TPrMDf{GUX-MbgMjNqd1 zSRrZEKP*T+-({8Wk<98^va3LaTVkLVwIYi?F?vJHn9GHnX{y(BEZomBUVn+!A4*Blk7X+nMukB^gcEY1-nl7u~|EK6|YayQ;y9m zfeb4kA>{`mWytjb#GRp}z#nq%0r^_PyBuSe_Jc4%NK$KuRAl4i~RB{TXotR{Ap0`V&? zrl%ud_4TdZtE6AQB=c>6P1SX?AdfNw_GpJQ?9B#Sc z*j^q*csW2FfuE=-Sbc_f9SB{w(@LgIKq%YMss+ge_>LTN!Regc%@Q+7*Um4Auke(7 zkKG0H&86&o^5BZFkU@U^aOG{=6Yl}b$P;h@I!=T~QqQcI(bXS(CCE-GpjMzl-H*=& z#bR&k2Rc(q-+s~(5V{1Da#`iX4X$6%0QDJZ+Dx?(PIDgqV&mMz4XSBgRToi0ozwf= zwJIS}RXh9ld9f^aY!|StJ>)u2lJ!&-xt1A=jD9cv-C0DdP|7Yu>HV!E*mOuWRKWuB z)?K+BD&qIpD3-@BLivQBrjN3=utuX`a8g9XvWx?bTM3j|EV8@wQt+@U5^Vdv*)1}> z_EO%^h0iHrc3_&V-b}_GrP@xK3_wY10_h&J#qQp_XMk2U+8Vs1bmmxb6&iQLRafcc z2hmr)b_gwASEB_RHb2Sk$(y3Cq7&T&M|$wy|1x`(4-Lo&rDbIoHI)v z#;6vNb<+7qD2PUIDj&^eo`LoX5Cls?rE0C8l>e!M>L^dUeQx6edH0A!GS1B=A6d^X zGhextE=TWn7hXMauohBLa4hE4a422`csk<0YsX8TpzA+&*X;)n)($Nk1I(r2*m-vq z(T#z}l(%Rs@61d;Zdr57W!ThiJ&fZqvq;QV$KT& z;$I$2kQhuhf9#$qtbc-O<5qqLMgjGa{96(ool1ysG-%Lo;@+hm`3ibe)TkwW{b$Jb zYZ+(RZ|PE`BiB8MBHwN_Nb*w`DQnlfSZ|Q%ENh$e3FcHbr;Ot5jUEAkT>WdSafrF<)|=+m_P{24bBKTJ&GAWKzQ1YFBzgsGPFS-W_2dNyFVD{hU8|oQU`y;2qy1 zlIC|^(jCYQTIvOLrN&EEDXqQli!xKUh;Zx4@UOyTyls%DtKmOz)_O4tC!D;E{?B*4Wvxyo(FLU3!cdHY4LCdaDHG zGuK#$_CdZt%P(Y$cTIr*$nAK=jyH5b+g$l#YJ5tIyl`DY(=TrmjNWW#BiQ@R;7LEm zc%BtuQXr?kmVRr9(9=sB-MhX|%xmfN5=yQX8#rDRfq+~=E4H3fK z;E)pX+@3cspF$ix`Zzc^h#HD7s{--6?4#+brD(dWVo`Y)aLchVY~uWDgY6Zocy>C@ zLzx87Y_?X?k-GWiwF-?xA(&L%``7P8dA%`@-K)=WB<9Wdp0$VO?m^}qqAZs#Ui1)I zTtnoraAdOcDGsGH+3GO&>49x*xu_S*wr~FeCY$ikO2b{dzM{U+#P;QiSl)rNA37ZN zXsu&59lBLCeqXL`pMRw4l{Dt*du>{B!z9W8ry za1*=m&FhQwKdyMm>V4+oP0j`L{BKFvavOY<8}j@x`l!+}CML_tBCtCzsJj$ZA#b#l z2JBC0g?w(L<6PdNyo}*8f(2 z=LkyAPFkX{Z34~K2TAB& z?c?~QB#I5JcSXuQ2R0qZ;msly%N^nxytezoU*!CL#YK9_r|BxYT35<_lh~7PZ5(*t zE4>07Oh&)Pqs4I=U^&-G9PMvdXac$|X$T@LEDU~4P|Yf*cfn4fL&u}OUQuV-|}Az)OU!>eo@Fm`}RDoD`XHjA1=m zE1I{lZBJRKO!$YM=XG175I@LQt*!(T;YO$`{CmeJ}|;rK(?B0><}_WlIwk%rT!RM zh3?Ca$4t7*nAw`a9>=Ee*{<;gpbDPFRiMbSxK2;%e3?tCda+$ zl8o5i2M57R>YIJnKLX4PSbC-Zyq<-WVdLA5aS2Bq%JY~(^h=xEobXCi7BNI*lVjGH z9PT{wNs>uM3mu~fJqmKt+~0UnQE2 z!wczVm|NAacCpjAUNWYtnoxdOZ*Xp3?V7qqhnbfIl#Zfi!@f)Lseg#(%?*Dl&iwzQ_W!9jVoQAijL_K5hH{44fBwVDFO}_vEi2@fa*kHKA{T0PM_saO* zMu!ow+n;k0!9yv^2;>AkpFyOhDJM2At^D3!p&S zmVvDb62>gCyccDqo*^E~VKvp0do-m{$e8s#sW)`3g zO*ZrM5MIdB6-2M$7SRxa$&U%P-8z)O>SrpK`b>emwSO=c4saIIH4lZq7KqvqJ^=nB zmbQnuT066TyvfmvSbs=8qGW25es8`l^@1aL1;I@uD{=Sk5RneO_}FvKVj|pnauxoU zdpWZHWc2_F-BtPK&8EzP&3opG_K`d!C8`a+=nfMOIdCTc(RDNM+MT<9J}v@Be~kD? zua+ST#&5mB!O?c_#fQ8zgnrBX<5j>-lMk5xrm~XEv$OuW!XzFOf-&yQBBkqswd?jj zp)fwByDf6Mlh;=Ok`gBu50M#nze+lHz;$WU=Wb%aJX^n;+z6lTvP5^nn1ip0B>E{^ z>+WhtlJfu}Zzg0KcYgly*6(F8_`RS_0Qe%UoYtRVIoETqmoq?Z8{IV+;q6eKnKSU%%xSh}% z8DZ+)jAN$G#-2$s{j-$R=tD$#G2A|?NW5x~^&L_R;ZDdl%q4WKtN!sQ>0JSWU=GaK zP}b_(T5XF*7}^D@L7QJn2h3z}E0-V-HaWu&Kq4{%fZ^jO>=}C7ty(yTte2q(DAYC) zx_p48rKKNuaKQbRueO1IS$S8Rlr_JJt3uE%3+e0u;tLUWeSiD>+Wd#@$IHRXZn@KF z-vpaQI%0xfO#%uSVStSrDv?}I{MOTui$paFWb2>=F-}TZ+}}_dRwlgCj?KuK!6Iq6w&b2cnxbB=?U7 z`x}{M7TsXHo~ak_LQ&L)L&U{*_^8dzhbYPgK{G7>3p)6L^s`o$tZoB=AFF`-0d`pK z@U7gJM;R&>W{+($0bC#-9nJ(hIoPWs(9nfQQpgzZF)`mwE#z96&~qCh_0k2TW` z)1+7$xHO*4rF=a9q3PNFUAR8{T%e@+4j9mWwbNH%*irKn4e|<>G@Bm|m^p-6I`&yV zw-)BEY#rQHgz~O$&l@#?N>>5!^rYpk79P<@Bc$v}G|?dKGw9v%xj8lG9?L_uYKPK5 zC7;siIh+#VSy}S+k^6&slGj`u4I+vEMi=@IMpZ84 z%>hP5kkc%DV9KnL-ltYpRwGC9RcbhA{dFvy%*IYl_kAvJEs~D@a(3RW)t{(x+?QI$ zzcu*D9(wuyPjdD=;W6DYI>PrY%`G3^_E#V>4DU67cHU5!y3k*rF9S z&L#yjoVQ^ukboxX3;qDCatO@)ZZ!l5&F$;0hB@L@AO&LKp56Z0CtJT-CjO?9lfzK_ z#}4HinfgT4nh>p8f}3Oy7d}u4EB+Q})2#WrKd3lFAzdp>IY_jIVL8L!1577wpI~kE z0Shh0@#19h17&-%S&*=z9Anq zPgDi`PVD{j0g_z+&XA^t*QogT18Oo0enV7o5v@HdK+oBC4Jl}W;J12oj#v< z6*Bd~%*&gyMg3$QcAPY>kUcnd2jPFUkFlv_pFQ9ER?3BLInwLcmwBJ5J$qD@H~gq3NNrW4C7TNsb&gA z^$O{RK#MGdY|O19X@mGRk_C>{rd}9^z-l`37(|++a_MU1PH3@@Rm6xZ95#|!?=()I zXE#fMxWq6xt?&PATlELpge-VF(e0SKt%YCZc%iMUa!}8jh8IB>CxS@ZVW>IL!O>f+ zWTncvoQG2=7w6YS&2A)jw%q=HlBr=~IiObMP8bO%0i&JwS}b`s?H}XRjp72;eXc;O zclRL;ZX9nIQZ|(qkwhvdez7Q4Ux;8Vf%|8VhRY>i4zd^ z10<^L^oG|*+N)Sc>00!Dt=s0p^q~t10bjqqd@sy=buiZ>U|IU!u7(O^)a&NC6!+i- z7bl%gnS;m>kjC@ZYHZOEz`mzmiSNjJkyNj=pb%oNiWgESMQ1N@QC6(J?r&YZSRGdR z6BHz#K81}V&7Kr2P|CEk)ot@*fYoYq<(k10oUKLtE|v%%I77qesl4F%GH(8+*(F4b z=2u!GC;0t`faz&FMI(^LcO{t{a=hbUZHoI=C?XlT)s=%h$_mCU;6xYq0 zH8;^UEi5e~jV;`b1XN#2X8uZ9vg+J2MvxWZ!v=@D6q$k~@O|xH!Dx`Y<>44^hD%kU z^y5Ty|I!%2qIzT-gkGJ2QO!k<@{%wGY?=*CNQaH!e8%qp7*7O$t7=s*D17or zxt4uN1i_#9*+}e@43Nz4R&z+s1IhKdphATma3+L%m9c!4VAvbjr@4M`yM1aUQFNz9 zLh!dE2b`qAscy4s<;h(`zSWl8Lfz0SX0XxmOk?gHU*j5!ZZY=j4&UCJ)V* zL1Fp=%5EVSAqMS3|B#NiqKcj^WmjvK_Oly4=bAKqc3Y>e9wHgp)?EI+w@ zD{faLw=ezM?KZ@TfFlA9u`w(Ygavj3;dyF?(BigIybt_m)l+xx-+%4Qm3?=f;cV6R zEn8#*{(9#}PSlsc_C|2;r)}~{;xX9Z z^oWELqSs$Sr|!bkhkU#n8{3Y0e$qD>nyz4e@*&)s)zz1P>r-{f@>%}l6vz2`G5TkG zhY*h8#@2&c?jFFf3!Xn>2{K?wY+uAVghzJqA|r};M0`-uTjVv^NT|zjL>WJen$fHQ zj#-E||JTnwmqL1~zb=s(aj9DFCo}x()WICMhrxhgzfE@7gc0s<^NGoGp7NydJE+)T(6Sa;AiiN< zdLVc9=s{mbTDEnWNLdXKCB!7YOq6{4E7so?G1_$Neq}f z^<3kH5gu2rGEXeafouKO*v5H@Z1tda3FGN1ok{r-Eu?oGAn&lQ?cwNa(@Ki$_UuC& zQ=GA_D>3-t%@}-K@UG1i%Ca9G>MxX}+;KREJryyf$ho7WZeAMP6ph2p zXvP{DaD=ZR){BuAhWS9^e|`X{?iGZPy_%@~;ZtgRt=JY|kK>?DrG>?C*K_ObE#5M& z55KA0aRvST>4u?fD3%=37Qp&e#e`Rg9SC_lr6zQRI>HhLi(N zLs>O{h+ThCJtq9nvSybV1^=9juPjW8iazAUZPrD)9p6Fv#uV z&zxd(P)98PwOF$0{+6d7KtA9j6(@#J9lZ_Dd|e ze9BVTB()C$5(v}XhTeP5<;{W}zYB9CI6wBz5q+t5pAlRIdJ-~+(}LB8V1WkcmoBs8ULN<+*b!PmvEh0&5@>s$`4y#A^zt+cn+CUC z{kbCl>f5va8K?|oN+X19zGl&%Dw2T;nAyeEEvbpuqFuUjwMZAzle+>wcB_`qY-MDE z$kvCdBmupt6R?YPC)EL91Xs`sFcEAB@362<^HGvkwia8xZIvd=T%N!bwSI$!@ovZb zn0dR;#SDA05_wBBt24nmng6hSb?lQeLfqHSMKxAT3g*+X0K z2PJ&g&k3VCa8MS!Aqa28<#GyVt4<>d+U4Yxg;@COfDu4{WFyMEXirgL zi+)=d?zWIZ@lA%S& z$C#svFp@=H?X_hmTzOLHoiujIWNg_*Q5I|=O|3LS6r#oz@6{{0B6(=g@AVfh*Y;;u z&cObHX5D*HB%!T5>J?P@I9n~5;a2FM(YB8hKY*~40o}I97V5Xcm1GQMsDUm+#$${M&12Wh{h!7D4#kaqlc#-q`dWyM)O%|z-AHiN# z#1aSKa}@PQK&dtl+MJz^%nB7p?IY<$vQh32qLgR4Z}bUsI2VdIj4p-zc4d9pzLMCj zIkCG>%b8D?u#0mRFfUp(ik-nNX?qaEAX)+^W4_06CZ$KPLxOum6lw$Q{%_;jRMl(s zFj)4a2N5$!h>v#$OS;Q5p)IBIe?ZRcXGgM#R69oJrfao?@5w zZ6+73ceix*mekYNuIBm-ev0>m*@IvuoTi-e8i{-kIe-&P8^N?;Gu))|Dj7Qu$IC`u z5`C@VZjD~Bv!wD9rHs=j*dc1LWG`v?(tyf=SjAn}op;?YZo$bB{YiuTY1e?cUaTU9 z-Wj&;)hnYNJaU{U(hJ=2*Rb#6YgK`o9uZDvG#P>kuuF}|Wi0i>YLrdQe_iQPUM$ur zhw49En7EXRs^~!=zu;>P4J5kp_SBu{-Vj87oLIf?hFm(7EN^u8{w-{a<_5ep8MH~5 zd$pzIU{QMVa-;BCAGc|Y&@>wNa!^?jcEx$o<~ zuC>;+E(fVi)o=^8_pN_|Y>tQolJ8*P6rw zS14ESEH+vdaz8-fKUb07^?p^yo~}bJLf7OeKOe*a9b~IXOubCGI^x~m{Fl1jht$}z z_N-WUE|~mv9`Z1;NDo=hvU{Wl?@!bHk(oQ{MkTJ$GcifqX(ms(^{m}m(pfF^pGj>C zRq&%Gqka5@<3GvkM`?AcWY(7->q&~}1&Y%sYqj8Q_I1k7`uH!YERNZWX352*-` z?ux|-CVnScDo4^6zay|`%dfu!Mi2UUzdRfNM8hSpV6mFc@gf3w3z`W{t|XyYC_1}` zgmt1-K7Texw0oT?hdS2ad-&B~*Wen2T{zDtZ^(pYAKFU*_(Y#RQNC4{i88aqj;h$P za7@a+AV|4OSKl%2-dV2y%PXY)tmx!Xr$Txxrp`3aBBf`A!`-Ql98q^u|A?&Hsr)vE zy53MF8m>^2M>kfG-hX!Ila&mp`Q=f14xq(*cN;^0Gtw&f_@*K6Ao#S`_UmIuWuG{g z2-NnT!)Sv+;vY_A=}>!Z=;i%oRkC2@H49B{LgN5352{Cp+braX3NTsou7QBM-%30H zoelnhh}}08@`yry>R)!)KEU}^GKa*E-uU(k2b7@swDMQg-HiTFGhr27CsxEVyqqMf zIp{tCV}HyX@7K%9%p{b^^8kelzQZR_+>NmgCNX$?V6$fEJ3AK)HXn>lF7VS(L8E6Z zD?d|hb0U{LAszb%oVAQMjOgTqF4NT{knn!UgRs5vk?2_#y53Mpnha}OK9b1OM&^M( z$QmRNYf90-orR{$=aYblg^{LKeMjgNci#er#9_J>SLAs9^~s8$TigM(Yk#?4T+t_j zZL_(*`H%iHqGo+Ak1jJ8R^fxPID$7sjLDfTG=9;vKrv zTk_wE6r(MU^W3um6e3Ul=Sq7;c)|2H6L`2X>f>4U=rS=ome_Gnla3cshgAry`j2v? zdhzrbj0QmC7va}D!@L-bL}V`->;QZj{STw1V9WDpgkn|4hQv8FzBlB;-W)V}j zL61|>Uw^~wQ;HBe?I0E=oP#V%q5L5<@5n@x=l+g935Qg;Z-JzFKcJVFY0M#t9tC8(xBvIk-g*Th8jC9P z&h+6MdIG6sLY)CxXHH@TNvqu{m4j9}wD5tlx>qz$7M1*~scR(CB^PzO2W=_WjVbl& zehqe8U#x^h8N{rv?bLU;Ytb9Zt6i{M*@KKkNlO_ktK`(lp<#Nix`38jYP|ClIZ3vi z_%LyCH8KZ>Rh%c256#nt)II+70k>#?v?I>Q=&V-dGD3-hK8zp2^5i7m^SdKNkAnaV zDmcL{^`rCMa*_uig>MKJ_Cz~fRNt|Bh#j0W9Qr6LnKQ!uWfu~G2dp^&+*Y4d*d5VXN7ubW>PJC1xBVp&iRj2h z)~s;;JWNWtVRm&F%7Z0bPb6;puMzATN_z9TPxR?LI63oGkL+oMual7^Hav)yl`M+m zERpgZFM70w6p{zQ0AR3ys|Bn3-c@cGUjCQ27DR?gG^KVO&I{@G!#e}T-XMINjP#z{ zOD6RGg9b&fV4glI`TVI-GSVequvZAJoI>&$Kw3bSh?WR+OA>$theJgGZjw#Lib1O7E%d0t8+S#}8<(9gei2~J7quzXM!TP0 zScv2}Lxl42uNX%}AgH!{BAh#Zdwt$5#${Gr$se9q6?FOJjYo{v+>9#tvc(+tQs^i| z<{;ae(s)}D7;qBC3CVQ+Lc=1kF*RGuVjCK&j(~O$O%P5#2TleB__bS$n%d;#k;*Hy z+tF5#S(v4`K2)3LjOlq>4%4w ziRd6Z$U%dJK(R91jDAhTWMwvDya50>f(r+J#0o-7S<}w)sQL?Z5}@^m@H|C6LI@1g zOcVHeq_59}toZ!XmA9%5-tk6mlfo*2HB%jOfKQCWxEFoWHJ=2A$yf z&7;X2RV;Wg>LiBZ#di*K@6g`7+x)J=d^w7}E8ryG`hu(}Sr~sF)GjMYpN5M&S#H-ju2C#~L z@i$DI?m3+A*W_8!A3Xmp)h04xrE9`W@ZxlE+2UpaI|W{1ERj|)FITf~61%D23I|;s zz}hoW)bw1cwIck?lZ#MNtiB_Rs}yh)O=)Nzbt#aOeU9dN4;$m&QRr)>4+iO<+)I3I zy;ndd1@HhJIbE6G&51Vl;qeg$&2tn5ngX>a@xEU%!lCQk@=Al#zs5bmrjA!qO)u(i z6aD4Gs1!joDkrEC`lx3n*?SByS}K2mAo0=xP|gzcNM}NZ9pl{XTyziTW7U`NV8R89 zu>T{33UH)DAAXF0j_#pi6HakA?XidO;(=uv@|!NK4U@{<>Hw}mq^Z<)=&Qtf!O{w^usbq_`ej7XIQ<)(FuMBcI^y*cl* zO#OzY#Y?~WDHWQjrmuHAflj619_ZvC9)exURWv4y4tu7i5Rkt-zZDV{2|1qGt9OT42_KO+-Ea1558Z;d0^zC!!_O zQ|vB_!u6-Z8Eswg*!e)PFcP~0JwwL;uO?eD%)y(;R0|uNaQn%4=gJ~D1HO3dROXdS?F|dly#qH~ z^OeT8@s4bOo#h9p<`$-Lkbwif1lYPb-@a%EgDb)Zjj@rV*T$9bX$iTrRuvsZ2ByW= zz-<6PLWp@Ve{6P;W*gV2GE-W+dL#u!*#)}!GO6*LW3#~XiD0m;x$(XKtIM0_{0y1o zrLWYdyG$5oi$Rlb-hQCP-2Leuo4ebe)_`8%g~u)np;YM7Wbw)c!+RD@!~(^S7(ym~ z%jm2p`I;GKojf5@do^uexKnTn3FJ+n@DVL+2*Af1=lre&HKpjBI&$MAw~wH1{QlX@ zi#My1*UihF5o&r%F!5l(b_HA&)z#q`$atP>@OdX8g~WEthv!R27}f9(we}e(ITeqr zj{0uuoqJGU+he@qwGoPA5aUpV5^i|$YLm_$liZ8nxd~wTpqiZ3Ow`gE{1Ftf0T&`P zyFfOO2A{B>U>d9YlJNYq1N!%>e_M*_em_+Sey-+KB0_}o9{NnyE%rF(q7uK3g2WEr zZnTR+Z|CpXK=BDC;SyP8Wo+JFbwEG(Bp7AG0__xmfR#hqLbQ&59fRNv6H}U+4)eT7&uxOXx~9EA;s1w|%j3A;4;=ZV0)C!7x`fI$FQq zXLD}^IY+M6ZTm>SLoA2r4-`upj97n6eO zMN9N?gPRdU#uf+i7dRI($To}eSrU~N4ReV1?|W)PnA^(QAB3h%BX@V|08$U$!7rg zHrkMx%%cvP^QWbNFvXqm4y+q;CHv~ryV}4Z_8GJFl@r!9U9JZUzBx4v0lgz?HCw^3 z?(wz~3XM+8ddfjq3+qP59I*lW6!~A2OtaJwZ5ShD;)`vcPSUH>miLFg4}y+SH@J*u zo9$o+H{GXmL@KZgftvfur0bWt$^%O)8matev&bJ;HhXj9l>aM5+Brf3#So5MglO5_ z8+FjI*>=LXyc|)iC+-wi)jh7zfKA5Bx3=I_qs~6tv{>%Amw*Z3XE#S$rb_;MR@R}X z^W7CR9u0u66CihktN*v6?3SS zFVyQo$Rs{R^2lV1>5qYiVN7Ci`{7tnw$v9vPV&$kY{;Ps|7=9?axGwyU78FY z@1V!a$F82zUY>sQ_l#|QlGZ+WP@rRvAh_?Z}CH%CSOop2*%L5B5re}4tB`_ax>l%#OHGz~UqDE$xFFDIUMcd+Psx}><> z!YIHZ?MJ)F9lsxfdHNJR1@G=H2S?wo(i{E#xT7(m9RYu|RH5?we=1})26SqRPVdMN zS;ua-u<|tNB5Y3Gr|89-aK6qX&*)4js~^DUWz0A#R68+kS+UEvCk?dg&um zXp!4vZH?B-bmX^KMr1qlaQ)iY9>U;1x5@vNwju3o4e!>!eW|VA0LTPk;Lg$mz*!6W z=9aE^`$(kISRToko0cc}SJ&k7^gU$VO!CU*7HDRm)U~xzA;rGfX1ijrmDa6qGx6ezVy>DzlarR6Ua z$2X+uek3H$P@7$-68{|{_IQh%QUE^K;~5fq`~*orlm4xaTZDo#W z9LJV+WsQf*BlQ^~N6@^470M>TE%Z5VRF~%(UZWc9=JUZYJDdV&-N&nrF$~2z5w>{bU+;+*%Z? zMl2LCnHXbgfLTal~Hm`{6AUeb@!(=ccOgKZftE%DWe{v4%%}ieSiBs7aQ+AX=8*h3%up- zUMq)@4p`MZ`**Exds>`7d!E@x7(Nlcu!K67Xd%WP%!f7W3=Ff+k*i%^yv&4|4XSLv z-ZT;;crqD(-ahy{5H(7tPg00qf=UV@_74!S`i>vWL+w3P1)+(ys0$rsf~<+tb51Xf zQ3F?{eNvqT3j3tK7NqZ6c(KxP23fu9{E>^hIVh*=ipM|W?ZqK&A%qS*(t57;vpn+U z>6$%edCY_tLp4(mKH!WKW14oS#^sU)o)<<<2asO^@ZOdN^=6~(>^efKLm0ynmao06 zpQeah0e8+4bvUPXd02|g<&dc1Z{UktDmE{1{%6MfUZf3+sxz5BUtcgfITpNVOWc`n z*OMGCQk_)Zom&$Anfu9%z#QKa{@UqaM0y|bxT^Tk0qAmY`m6}x#n^ZV_5v#*)Z6Ie3N+waK%boO4qMvawWdQ$R-5APav_)L*Iux(lTNs z0O8Dz0~}Tk>w_)ur9k*6tk2LYKzM^ptbF(|Vt_Cp9I;~-k4s2k1`3VVUUTRmf+4m+ z$I7XA^SpK1dCWO*oxqmGTu^7s6Ybq1Am2l^)1=WiSMtmaXrZQ|GhaV>&{k=F_Qzsf zlD&7*mGl4>j$WY?gj*ufqM|A^lwJcngQpta=A4MVoPR^yl0-^FL6jT0R28#7gB9#J z+ifp~DserxzhM(0OB4f-*p%v!mb?y#qaE1!f&u`4Z9pf`3wh|%oqFy+F4KScBO-a475RZ{DRgBSy7#tz1{h==)dV z6#7eNaLi`bsQn398UFj!+S*z;qjE2$i+>}5fP+?a1=-f%%?EYkA!uf~bZ5pMy5(=5 za*x@MlM-(I%OAl8BAiBvJ6r&xc5D!i5r#6RLdnxN{PTKI)YjKa`|=#mejH$rPKmkk zC}B5NUXcOqL(6~R>i*o_dMY@SXmM*ttiPG)_I)yc@yDV)s^Cs%s>;}ZUWN@wLkwZA zCp`T@hVV|Nd3nwrG1CI=5Q_~C^U8A~WU5U6{FT@eKrVvCCr~S51RqV#Ig0DwTF+y+ z3ldP`1VPtoZ%az>_{TNU0z>s-7qLMQUij1YI+bkuq`>m6b%3B@K(#Eva(_dB|M4C5 z1iZ=rLfsNf`X76rH>iZ{uN*O?K0nV9iBYDT^cUjx&wWU8i!vigx|lQm!dnu$n4g=% zu~?oTeXhSzw-5)Qr->i(4w0^yEsk=X0Tlo6-9knc){5-8CQbf7EECy-q|D4iWBT!l zZV8ae`Fmr#>K(Wdf9v8W&^jkTTtXcGa+_F zyF&c@8L*TgeomAEwH=|1kMOU?WK*28)rr67HM3!e*O91SO zjFukhsDMIOC_9a}Rdj`zH%rjH2TKoC_ID3;8N*xvrVK;M8EuFwmXU=Rzz&L>G)N!H zD5Ko7L|;cYb_J3k?D!kufVm&&U8vYNgp|*p=Lcl{4~%>8-z=>^h+=hgPhwcZ)Yp1_ zM9H5myOR%P-JJaeILh(#29{7ic)b6+Xa9-?-HZ_;7C*3x+-{(}Nt0o+PfitCMOTX@ zfMh0l6#dO=oKhI?i{ zvwpbendw?fd~ImX0VmV(P;i|I5b{Bzv_gC$#Tc- z{^(xpSyc42dUq6zYhM&8!Tr}9+jbNtX$xJDGv03x4D)V89Y^#F5q&F*iP+NI{~y%r z>F7%}tht(f?Jw$ep2Y{vnozH!>l2&V1_%|zow51rNnSBfIie*pyvFTcXyVuOgQ$s# z#>lz6EeR#KjZM%EIu(rCaQ-r~D(C=Ov6gSYL#KemE<4%|D%J{tF{yL`XiPE32wH0s zJ3>#o`Q;!Fv09b}TiafuU4eYxf6ejC79Di_*$Ku4v<3^49i$y&c%r}M= zZ`kSiN=9n*k$&+fK8|HoHaaeR`)}=@G|XY7GhKdiQ2p_C3H8UBPi-cOKhNgaBt97} zEBjWkZ!cAQ#X*{iCUm5xAMCpHo;-v!7>SXfwMVq#=?R};<yg^i=KJJHO~`R_rSg&zGcz;m=cfx*RaFw+?IK`ml3rbm{H`lkdLaQ{WBEIJmi^zC&6IpiRUHHCXqTFUb_|9(Zh(m)*QbvYm%^KV5d22^UM?pY5PpbrRi%VoI zjW4`Uyh*QX<3}1flp%L0d8I|g#GEGaSg>;AFf}#pjf$4yvqdX_`5jtx$E@#&q$Ft_ z39KSIJ6>O!9By}*o4U3je}qR3hCB;Erlx9suI$c3hw5u;p4hEsl~+*U)h%NX7S0Dy zRnrc4_t>%uV)8{!e!j3G(x_>)2*_A;GuHWU3C+zFa?_HAoSeWacg3L5j2zI0CvFJl&`6@9X=1 z!*eK)q|cu9PV7W{2E2Uv8S{n>+Xp9rSP*SD*_MX>d^*KzM&&xgIEI2h!uxfDaavT= zt{=>9*REZ|w%*uqHN1Vvt%2@4iu#u?UmiJjtVWY#ERBp*whq89N&N0J4w7Ssh4@A^ zMvg^(8isQn0so9Z0gdIa?0s@azEM$6tfO@Z!abwf^Op5mU^F zS9nNwQAY>O{bEjogGLx85*kEV&8W0)YG!5u5Rk#y3SylO{^-76O>r@msf+_+np5@n zu3u0YhQ-l&@=*>1loyr*=naL2A4iz;#z4JW(3A6ihs0iaz1-U(Y%Fr&!WM7+gHY8J zs(RkNyIfhh0Y{OFFBLZwiNtdPVFoTM0fB*i+}_hoB$Do`J@E2mD>zF>dVTe1m~icYpAHykXl~5mh$WwVfm(_ zgLN?vE|lT0$D$V&7S_~khsaRwEUaB&LVV};?VgHYcphwPVobpVlWRYh z%IBEZp>bkM`!2jJ&0XR&fqWewPKeu>3ik?yzj@TW{Y>`^V>ip!Fcl5*-w&XsGY-{302z_3&^IXTb#d}~` z;XCrTvW-D4WF*&Bhw*RT_PMeXJT|+0pPtU~@qzjQC-Jg7it=Tqn^sH1c~ znJ~@ceT;U;!Gq?o_P&1odaXT9+hCZIe(>han{maacMAy#SxCVu7%LvFTQ{~gM)i>e z8@;jX{rg|*;(SItOU4<1$g@v=&+hT@($Ue0K|XkCa;$`(pC66aaNjrh7{*Q2-`(3Q zuajAemgi@DhU^L2mv1E{aS!-q`dMyoF^9XdU|#A<^`@nxI+Pk599*dd6-fUT<>O!1 z`|CoW6T`RYTn$64^LBtI3xy@PG$db9>Mu%GiF->HsTwE;W)<(M<5MCdz1~}ye-BM@ z?$LBG?G#%quF|x>*W?e_I75(yBtrdnTTtO>^AYqn;o*AWxZT@J!@%X*81rKu zKY1d5;Q|yN+VzemdPx2(PoG{rKZKzKmA>nLBxAFem6buIq^gV{a#%SbnYyp95B>SA z_}l9IWn`XsdWKU`hEL$$F*i5w(rrUC0UGVVJLbQPnCbgwi<<#g`re}5Ij0S>#Eugt ztlPxyMJsSL_c%H_!rZQ8^7KbwUg95-jAY*!c1Kcx%Z4v5^M*uScC}kWlIDx#WUc(n z!GQrLT3UKny0aX$c#r&nZ&Rluj_0OLo0Kt5p}EE(FyRB63J6*nn8%=HEQ(uxZ!$0^AUdj6jOIv9b`5y)GE9p1btw@y$Ml@!q&oqDx0w*OIhOyo^~!VvxSVyB=YVXS3aNw`$qX zuxBZ>I(6(=4kYW>++dl#h~F1XYfPh#-g~ysg~=AuZ%~#m>LAG{JFKkFRO>&diMunJ zaD5?|$xXCR>J6vs#V0>LzS!C+`D};H?w69+!sS|{(*4FmuKhgVJc$PO&I;EA|SsO@>7>xltt9C|y+w>9XA6Aye0_36~kAFbsdfhJMDdA99; zQs+pss^Zqx3$b2#r>$X56nafVBMAF^B;l^>0$L)=q&Nm!USQy&tI3`Ch?fo##_Iq~ zTtT^kl)wK9J?)ckZGh|W=p2ETkWH>!=`7r$f7RIQ(CJCptL@D%FPCoWP-DsxD#HKA zHQsl;>*o-3tqXOFYh$uI`^2O9rMwMoFFr^?8?^VP&71j6T8}6x#VDXh*ly%AnKzJ- zgE;lbEnn!@=ze~EYrKNeaO3**9bH{DiR#B$RxJ9<+>iqX};IMPikB7}$R8)kc*#F=JQ;bai zgN3-fhGILMA!9IoR03kZKOvN$CgI8X?@qpT`IPb2IFF@=7dL zJiAK3kEda(@C310IifH@0_|!DLoD9CAD{hzI6HX;)$nl3i$~sw{@^?$oT{tk&YUUr z^xUAtO-n4GykuW-KtKQnN5Hx3SqV~1A}yX+SXcnB7f}E`D(Dd|jpZbRz1eQFQ&T&3 z>`3-0FrNhq{&`^FjQOGwz98M*Uso|A%#Fj_08&nAX=(JJ!^o){-WBKPLE_-XzG7x!DBX{rJi{;#4yaP7o5zy}V_%t9yIX&%Mxcs66WDR)hw2X|1%jCI;n$B*o7CVlh@X2=U zZ2uhAVh)l)7{=C`*+oWSX4uM_Z$ya&Cz9$r#Xp0>>z7g!YhxJu#Lab@FLZ6JaYJel z9e~)Icp>XM^hiHOMkLOjwR=9@0Q_l&ImjUD_yq2xq*vc1B_t#i6uhQv@|UBWCbzPr5V>)v&&@rVdC^QhR7T|z5OKDze{qam4o_T%}j8L}qT;U6}l?khWQ zW|sQ!A(eXO>eZ{qTl20Dsap5-_e)OL%3~E)z1qC&e@a9QU79*v{kn{hS)#LNtGDD% z`wzKdB_9(E48oZ{-ik4E<1>f{Ouch_sHLH;%?|oR(X&^tj=-lJ^95swL5(onm+gQv z0gi9{8)OgYd)=4b;A!~Eem>jD=_IOmc!1Y{XG0(B2v;n#u^e);>EtazQyTTRjfS*NF` zs1qUzvS9yUuIaTKcbEBH19GG^>CbCcoK$w1nG;D3?ZDMgY`!PJuo;&cZ93`s7^#5SA%Z+(4v z#%BPG=cVrSblzRNb|D{RbO2+5@IpWqKJoBdrTV&4aM_CG%XjSBl~#gEDO0cNw(88x zOsd1m2)>I*1N0$N#0LboZ@#U0_mCBCgF!=fBs_atM5M0YIo}Q5R-6wE;aMrEORKA} zrJ^DuC6%w?<_DRX`f=^*)$1o`aO_ajVt^puNB{jg_ipYPJ?A}H(If58>yp`ooMhI$ z7^O6nY%Yf3;rUY3qznqd;It&89k}yuUR+7yp~NBi^QI_+yV&!vu;~|-uZ@saNl)2r zCzdff-NR#@jEu}vlS{)M0D3$O3f{h*jwG>9pgk%(D=WoPVqHbig2sYH?;7Dh}$SFuB)4jJ&+*jS7JWJl;;N-2hk+d~(Z(C=WV z|Np4Cxdh2l&9K#geHE6ZZ|;mJyxg>)PmTdZ(NfQd*=R8Gz;KB8WHwI+u=bunO#nB+1^K71#}#p24ef2g_l z3w{bXcEg=7Gd(?AS3$_?`;K|T0#z7Tq+pj|W9Q}NA<>BNM`DXcWr`ImJ_ZX1FSI>Q zSdZay8-DCP!4x5Ao+YMFRGN{J=yp^3kf5x$HGccy!!Ho|jf1~8$Qq_TgREYFIUp*| zT6a%qe60f!?@K|$A31t7)eMCQb3f>zRG*`5FR$GZZdbg+nn#i?`nspBrR7*5OTmO(cNiKOu0(W6JW>gF#8&%z`w2@%eg z&5D^|Nj z92fMKyliY7=<6%d4U}tjOd%HH?%lhnSF3Bz>FGrqxqXJFpjt1?sG^hlsqt3!oHv~; zUeBFH?*Px?$qD%UvZ2BOSlW3QY(mFCd&AYv6&T0mXSlA0=c=1iCw;^IFQx>nFY$8T&3N=r?}9FG-G_joP6aqGU>>FL$f&<=;% zU#AO)!2xd^QsNT0PVn2$9f=#R3Ui)X8B=t|MS0th=APzgV25V{W4Ul2Fuvm;-G}Jr zcA}6#3OR{^ibel_lFUZj%kX2iot-pZ$mQi>B^vbYW_TS8fNzpQQY9rN?_SVBC9Mj7 z3RTsTpDS?4^wWj=hvYUR(|dh4b}UVCsIM4*P)$W8SyOFtVuG5IGW+J}bDkSO2)}*% zHh5;`c^w;@**~#NQH5*J&+OZWK_nJX7BN!w6Z>*rnl}TvSQqfiaJ~Xn->Io79Q)aFHu#vk~n&L!d;QHuF}otZx<;@ zGQ$6;B}cWJZa++3p}^^q;UM-11N)90cP|&AXd?n^6N#@lD%ie%Xy~kt_kx^XFg1x( z7ws>sRu^Padz;1dm%>hZsiqym3NC*+L;4N+;k%6=6tFrl8%WQ?hFh{Y2^1l=q!Z$N z_7cQOw0V5^@L|r#k?{_GG?>**apobeJ}uR{#U_K3HEtNSJwAL=OH1qc@msR`*H0f$ zYtb5t6-Itei{E6pVD}p`>*(K!o@3O1DYY77uoh_&CBw7Lh8OrhfYPbZGLxF9|Xys2AB(o*=} zIyL${(qO{b6qM#95C(jp;}vB z$^F1>43^2;Wj?S7^E;r3pXtNTeC{XzjSHzgf%yxKaC1)=_nHptRn03ANI975u(*H_ zHQGzi2L^oJl5|W=ITAYC6nx+jUG?M(n3(C!vV4uJ2s^Fo*AF>RD@3SXwX53AH;`fw zD^EkxvS6PxqU#e+(tJw4 zDLQ7a`R5yoU2hL|dc2i`0!E3F*2DMX2$>)E5|IPf! zQgN9~S27t^GX zri|lquBofzZbH#R zA{{&cSOJDSLe1o|1eS@Gz)^as6&*7NPTdQ(KEbsAgS3&>*0xpR9iUq}I;ubg;TR&B zMAm^`V?Wd7vgDQT!$(htd0~-Nh&M*nBI`Y(4}|9z4)4DQ;sPx#EgV2^S4@x`#T684 zaq~=E79bq1BUQbvNp)+(!oWw_RlVS`u3Q1jm zCd>yMsN(8>`KCokhv2k(S*Vtr#E3wNjJB7XDHpG&NfGq(^*V3e!oo(Ur`LY`=vGuD z0g5IX-0IKQ+T)6k$Ph=WTk~%}f+TVJ1Pw{R5(Zgczv?ZdRxx4YT>cyb>+UCmh16@p zncj$)=1sGgKF9#{Eg%pOOf9QZi4RqBkJC-f{t!uFzpX0SZ&02ihjTAfPb41t`x>|6 zflxQMw2Wm?96`p79;A>&g^F||2o9!IyRCN?9QulS(dOQl7FQcPSdP2{yIWiw84}_c zyz5-m9k$CiX)cNno{iz`nA#$2AK;^_>}NG)6Q{nOVy*AmaS4iz@kOjvKo5zk6X z_go>bW#ARk_o`NjSbwJLDQ+>ve_aF=O;aZx-QV_R>z?{&>$socBwoFm%<}?dW@{;A z$;WRt#VK78xli=(#b0;(XpGUUu`QP3-B~oEs)ZW609*3}{V;%}nK| zPXyQN{_x>O-P5a?v-&f0o$%>5lJunMWIa#I``#krF+o7ijp%hcN{y~QlDRl)YT3c^vua#Di9Alkjprrbh2b1j{xj=k+jXmpsJa<>$w*SGg6LIo9hD(_!iNjS))_T1#Ui62`si*$Rw|UEePuGsY$pvL4^< z4dD_GI_eR&&fn}C8sge}1d0YSatf%ai2;inZxoe)?@Qk=N)+#tb;_D4K18`e-Z?y+ zPteTu&g1XzIy=)aANBI~St8V=Z5n>^Y4X&9tf{6}JJR2PJ&qsiF7?UD%4%wECK}x> z;_uF&@6PJxP2f(-30L>Mw4|e@z3b$3jvS|f7^nBr^Rch;q7!?YenVcHh4ER^)(~~t6Dj%kX(6|t1m!N^4*=O z5%BPzN=paQ@5%1jBEmJW;Xb_=^ErboW7$&UgqNK6jtPm9+lk-bWF0UgoaXwFUdGKe zAlvR;EK|&A^=DNp?~~mn69ZXKPcpFzNj_F$yn*lh@Ig=_FE;W5`mDLRxqDCfP2WW0 zD&Su>Ha2?06CxQfonJdYcGiJNxb%!_wj#w`T=@Kke1S1qGUM~ifgej5mlo|*`ZCh% z`&Z>5yZHk)A}Ow|$iQ~c+LwMRDSZ8QVw5<|<^v}ptN}WhL^Xnq0?d9NL z#C!z_iTmlf>9-p7KPIWuunGmN)fE>PN7;f>Yj3T?A55O_P8;lrDh~K?1^D=!$`L?k zG5=ow1#Pb#-EfLZ_tCrt+BT?Bz#Kd8wK()taf5GE<6QAMZaXn(lFIg6{nR zvnO&2(w4KepWke|ex3RmuRbk{koc;hw%i-jPh`aTjGM!?#h^V?4LX+Z%%N;A+}`lQ zDIvk_&K+hhZ%vQ*h~Cy$;h#VI7#qbSWu162tV>lTuD>hdGJSjklPtL^{n=9GGH>JJ z32u#~Y7N_*OWDs_&eUeb4V*bwwK&Uurb#lfC1YTdl~q<%^V@=Z$%@G{!S?QxqxzF` zV@m1V{W(6sl>-qSsy4}`&C(>wrd=-WwfOpZ*Z9+GV~44z9zOFs6|=V`$TG*Wg|hj4 zbGUTi9Jj`d^SI;J*7HsFFV$=KKTz|JA=EN%-kgPAGITGE(r@^0_6|wKW;!}P(@z33 z&aU|^t_go;?mXN7?K*D`Q?6ch&cx&!7`Yo0{fd)jKm|d@(f4DYmG`5kEmG#55dZ+R zQfSutbDt$faA12m`9L8iXlM>jzn=ptv%0iFwuf>}EiI56@R_vw8b3HD zN~x)}EKYo#=ib?1@6nU-n_pQu&Itri6E!p4XH6 zsqhkwlL+?VmTk@Y(stcxuC3v;B0%|q?yPif&2h*Qu}rnIY5(vp{ zpKs`W!k~^qW9}7W)fwSZzP_WXBOecI)>WI!%7#>$HzW=X2)Z5WZ*nm@IJ!gQ#C2{H zU6u$6=e6%-v!v`s3ySO{y{k;oCE_Q}6i=UXkb1ifgWEU2d9;Z#ftCiDQr8;;?f=yN&79M=WHb zxmQIjzQxN4)A_H-YVoZ^K*^u(VamQceP&fiCd_7@_tv5HMc8)ABcI20d4uvt1?ni4 z*DwDqzt(%CahmPK1xnfXW2XluK6nf@rA0yDOU!AW3a`KP4Ak1!2q&i^oOP9F$;ru> zwGrqIy=W7(i5YrtzmJWHiHfpI`EE^wG$TAG#|HeQT>4|ft@HKb+tMOVSYIk^Z=Qm6 z81{avQW#u+swZ*n)v8QxH4l_{l$G8D_PJmGK!|H#pgIODg_j;k?clMi-){3VoXQNh z)`!;uxy4IYZ_&U~7)j-jb4&it<4VHAn(%q@>LA2b`z3y#>6 z{y@5VaO>IW6(1C=CYqB5yCYu?Bj<>|nlqBGUo}QQDLK17a*8cqMeXqJhNRw)YESE) zKz2D4DDli?m-hOI-1nuz$KN?LedkADx*KtcGonq>MrR^M|CEYdN8mwTEjg1dy4*kJ zyH=ihM}OXGYUHrc>|R$&2d^j_tE{5JeO1*#md`9XAJ?&-y`PpI&s}>a%uVuGT&;`~ zyV$owk*IY0iK#PlMdY$RmHD3`BiOrp_h)XJI~_Bz9kJhP$r+gcj|@`(?)}+fzK($= zjvoo2IMHco009($--~>}-NJm3)r*n?4P|0_7Rd-wdR#Le0#ABV!ZRS|4dPCbHwa#0 zoDgQ&T_Er5?R}y9kx@yPE5kH@{JkrTe<52|cwbf)ejb_>0MB8Ig}U=_&Jx$-ZV?$A zPiwzL=;9#ZRN~f%AtJ}aX4&&ImxZmYte~bSNOiq=Tw7hzu*36Gb?)c9{f{R_8I3Oy z8ZCrQ8XB66f;UxGB3{?!^_e&dJ(3?!4m^AU6gW&h!FHWRkkbB$JM1l4gEzGd1+EV= zs9WPkx#$gjYq&|D*EuUxT+QV>=LLWxkWQaCt{~&#TfTm04t1&;C$s2<2-$jm(R7_C zAwE7!ur-e#KSoQgt7|~M-Mf2CF^fn}^*leWUAmu!+w#ja*aI8IS2|Pnau3oqP}O7( zaRQElE$`aN$xNuzYkPjYi?N=(Fsz?`u0s0KGhnQHx2w~)P}QGq&uh~kef`>zZLu{v z!gD~Wndybo;5z;zjVUgNQjg!EXIXqKp0<#(cnX_@b-uSa^zDA$hjv9_ayCt?*qC=W zS#urZY|-%FStqpF?8W}oGdJX?hHg4$4WUB}<9EP`4-^z|RN9OWWo3g4a3M1Wp8e3vuTzGXY5SWu$Ik$DcQk5! z`M5p@l^gPt=66KJH#_+-hv*;{nEEve z`ngTCVdpJ8pgSh)0T8ZItoKzRF)AF`UaN%+9qF;b$5^0)AN$)ee+=E|#{fY91Qikr zQj>kPuPNi|=>vTl(R@@eS-j1~07y4*t!fn(uy3oEx3?YouQW6?44m$SRgj%)Caxh5 zJ#yk6N=i>phlDwqht=3hMWqp1%I8W#t2)z?(({)sZ9P>JfmMbAC?Il<*7|E|rGxdS zy1NOJrteQ~+!1MhBBnd@_;tew`en*{ye2kpQ+*#aBe{z?qUnvov<3} zw%>rycl*Np$kAMZEv#xX`|j>)NgGLUlMDfe!D_VG;Trl>w_L=zKkI4d9E9UnrwuCk3ygv9n6Kd+M3 z{&bmCvBY8sQERdMQ>7FkiO*Ml2CT|B(Cc!|&TqX=;7s(_rfLUz9&P_a^JyU=Ti@2W z{U15D%Ei-Y4!1-70VE?bAz=uDH==!lo{lc17@B)X6d{;QP|bMY?w*;FVwHK&3et(4 zmd`vqUTeT|=$6I;yl^jb6GN~@Mn;6y3>kSLDC6fpA7K0d%;C!9jk^|}6|Lt$0v)@v zdAG@Hqff80z>OhWNq_NT7#y9E(cU;=I7Zg3`p9tok{)Z{5fFZPW97%vuWRocqGevm z&7FjxMin=O-3uv4bR1H)4~Nzz55emw3!;Vz)=is=^LEJif0a8*-)V{SfHL4HKYvwX z2FhiL$GTUp)ac3|L|KNCz~l{{|D=k~pz6yVeiq!d{_&TtUgcrh03){!>35Ju=U5Ji zx5?ds=ng7BZ?2t!M`pIHi)iy|yGOZyhf(^3otftDA? z9(G1VGG}?)?G@}g+~2`E>k#fAujXPmW04fK#W>t$6TLM$FWC&^Yd?G}3sZP$HsRnR z7@^y_P&DA(_R#fNKC{61*xOTk9xX~dboH+Wh!P>G|AI~Y)wSV;%k?cjnhU}vmPR;~ zpNaH_Lr5*r#Z2x+L41t7aD*|og0xVU*#GX&g60Xo@y%v@g>X`Il087;8RuR$Z~j`7 zi_UqVbdaI@CoJ{gL6C-mZPkbc+rP)SI7#Rwo-AzV`g;0M){4lB5{=Oh5|jKRGu;~{ z1FX08F#v^_z2twntq`Iig~Shyurx5b(vWu zX)2o-vvY+zc10B{)MOaWdbd|x>EPGzd33gk|J|M2%l@ob(uySmWd#!g=T*iYc#oy? zc1_+-P`=KMoPbzSMOC%Kt5)f~wYO2tlxrSpwtV>NRV(A6^wrdMvkP}PTl*UoBJ*}{ zd9IZ3DqsD8y00Z0Ul8N$?CgD;r^ZP;J$DOjJ$ggwjPQ(%H-lLu8qRgEg|L!*0sf-I z9s3QXMNMRPVZT7z{eMAD3hq}Cj?F=EhN6V{WuP+jEgIj;Uy$Z&>qC6`vu%DPBK>01 z08@~#B?SQKBtLJYq*V1Poi@KUo6YKoDJFA#eY|DC?c6g{|f!O z%0$SyXqr`Iv|d2T?&wd9^O`$l?rn)u23YcBO*T;I&{JFUR(y?8q9`h>1Ze=?-Ck^!5Q{#dmv_Z!~^ypwxErO`z}b<9GFRKEBX?OVQed znWF+$)gBH)SMB_`$z=qeyZK()_3d$Lb)PtTqJG1xv}_?>0jZB+AeXqEdYci7rl+PV zTZJLAAdzZ!ARV7FVIZMrbnpHzR5$?T2ok`!nef#ZvEt1F%4vq&uA18jZ*-tXy zH0vHtACnW5EB}IsY!?t{ZEF)`XYct?v-E@K&g$vyZYM_5g%t1R<__jF+_adrg%n)} zp&+DAUy}SDyq>rmxkh-Rx*~MHb~QhK`5LG`EN{qksv4lEym}H^H0(Q^=xe?E{hlTe zy&7>Bl1?<w~^NKJ>Za)_HsIiXvwTPX;8=Bdj0EYOSEHJu4&A z%k4{9Fmc%(ub3Jvrc)4Z7t35FPJpieZ$Ro8<)k1tH`wu8BJ*9Gu5h6A*4EY*Wy;*H z&F#g=1K45!B`<_YLqkKjEV_}7Xg~EzCPTJ}tQ-IQ7-Syp5``~M$>m5U#Z^=F-#OMy z_dH!&azgUuw$NVo&7P(;_I8gU$m9a&ac@8M4hM%tAZDXnK_wqQMi>jhg;-eZ_(>Xf zuMWZcn^O)qC0=^;$mq>*dQzb7(>7=Z7jo$=eCxT*y5uj7dO`#MGl6Qh(<7 z_&6xqr}?TJYa4u;_Lsyc&un4b$j$}h5xEONgQy)d($YB5%|Yl=adlxp9>=SAq>GZ0 zlxicuk*FDYft?hgAAQHS2b4Gy_(Db)JRO$~ajKaagb+I15mn+%6g&3qE3c}$2A;Ht z5ft2JVS9mRJI~l&f(``jCnzt10|MrZ;)FN6)6rOhT&|=;+``dxC8dd_BSaFbP@$2Ym3&$f^0vZ@9lul*ao5 z`!xO^?%p~q%XRx2#l%7|5amU|MvzoMkupF+QaV*Y2?1$oYy|;P0TBs7kVabCA_M{H z25Cv@jx!$IYj559{l0UZbN)Ej_x|I$Tx-4W6Z4+;J;xklOumDhX4Mlr4UJ#rNh}I8 z#wCQSr#bT1fc$|Suw1-d3XgS6G~#ISv*Bq7Q4a&wB=@EJbNS0CAX#qNy0v5ODd<9Y z4g{HxjEt)k1bsAp^PPCfWt1Qik@Ds2M8|zDRPGQuVE5yt9OTEYo_of`Th*qg4Gsff zf1G4!d5rnMM&7OM1GWMk-BoREG~h8uOio|#VbPPOH1quW`Wf&S<(uJtjXx2Lx~-w9 zsi|+ifv>n4wy8+4@N1E5q-H*N;6Qoa8ZSWHb#Oq<)f5kLOQ5))le0)ryHx1Q_iov> zO8~|_ikb;J4>B@N3}}GJSrRBH&4uRB_|cXTQN3HC6rYs4;fiK$ZVnc#!{Yp8kMi1a zyt>h$q3@t+tG23N>tkYMT(f?C$ZFJ_u!d*`SFlbMzz}B8N_l}`Xfc$w3Jl;oJD+N- zZ`jUTh9eLb4G6&(+;Tw<0B5xJQIJNL6#2`d+uot=-`d*tY!L2wU*;%OD<_^2p-cL- zLTJ6ih>TGgo$`i5+5ko$g>T^o{C7#$Czh zN7qr(@4Y=S*qkx<`Ctgby!_2etW=g#|CZBU%sGKS`Wik|SLeNd@9tg5CR$x?ZmcPH zZKw$1M3h7`4c3Ww^g@!JlqamOuBHfI7ULS$$=p@Q-9AGTr=-u8t-;w;Jg~pFv1&Xt z{Ck8dQ<`y2tXfB>@W;;w4qsiPwkm3|r(2Ic@N%i3iSTT2wq4IH%4Yq>x0{T1b7d@k zTgRoAVlQ=XqvI))9XzlmdGprS=4oNVfJ&-bYD|!SsAO;D=f$>ACsE4#pv(;1YvA%f zZ!xyV@!cVzzicC+=!NFz=QlO^3zI-d{Yl~?li3@CjvT73Mdyt{gNF_ue!*?@Ts*Xm zZSx8M_3IT(dzTr|qoD;jA`>7rMl`>4Wyb%`z;FSCs^|rDSV$a~R#vrS+F+Xvn7N$? z=!2l1Gd+Zi1kO!c4xFRXynFZVx?)I2kU$(Tj2aA(*7Y=Xcd2P;oZ$Oe=^HJh zMK3AWEBf$@^P!e&E!-KC8<+5|6I&nlf3HG0@LtLx^#2$Lbcq+d_E$Bt5cN7rN{9v6 zT)7b%BaoAVv-1YU7o@wciw`#jv3@Qnc*dzKu5i2InnTq!1#5)ij-@bUB>0v?TUk^MQVCraZi)nC9ws(6B6F8kdPq;*vR-9_^`OE{Qo_a`sn{TluFQ`&n^E|HYsxl^e2?m=Sq`7APM0VBoSwu z%Y^I|aFmw-`|9vq7gtxpqnMgncgkZE^ZRjyBMKiQ$ipRnb`)7xF?LM98lTZAz>%Q> zj=n}vKA%?HYRY+a}xo_VY+b^NTU}?)pKUth-pT&17q&zx*@-wAPmFCK3kw@zp zMI|IiycXd@ojwWVC%g5j&tnrzEAh%(p-@hp4hJ;T(6LNI?0?<&A2+Q|Zmc$=S-$s# zt))BFz!|8c_H&Mqic`5u8nd;NH{~P`H%{}*+o05+;OzOt&AUeanA63$x0=sVjI=OC zHJ?Vy;!W!7_KT;D9GzYOAsi1ttH1wDitlym4|~cjKLl6pBt)d(6`|{m=Z<==$VrD>CgUrEAl?uR zaWFIck*DW-Ey}#ReNkQEUFRuf41cyv8hK*ZYLzOcnHb4O$Yaf8N7;E)k19$Z$JHzg zOG|26sCesYYmZi4yLK&HDXtmZid>S_!kmr4wb3p~(@y7QD`3CL9ICgs#ui27ejl$14-3fh$B@U9uMHP|Epf{|cKY`9`Ezd;lcDr$N;+*TtV-f(7j%w?-?|ck6 z8YLkG@Rx}scNMYeCq;Qt-YltL3bXjCbHZmQ&8E9+ZHZWUeuVm&)D=if~L1?q8wQJ>C-I>Q|;EtsY)*U%*fAlmoSZgjFFm!s{)y` z*tcKYAjl%CK?VDsMjRRll{j?b!$`}(49*&!Ims1#P%L|0z&c@tBCFe-Fw^|5{XbTG=g^1zWJi8zJWUM>KJ z3LA!`>%*5OMnXIUf_*9l?b1NGhqMxkPOk9?@b&NByJub(hQBO+#&KF*EL9g`E|xM8 z(-jH8nN)uuF@Qp?M~{|2Il^JE7bO4+@T9~<38)Nwpu9&o?efA~#-8zcMoe1zwfMGq zaK7_yUOMW!aRgl&+zqdO`98(c{pmWTcdl#PriBjfz=;!TF)HKe|EtVH1vn1-U62sA z(a_-hX1?B~W`D*uRyt0Pho|71RV!AkNUJ!>T%YZ5V&`oXRw(VNi;5&-u3Z~D zj>sW4k3D`S6(Ef;v#=Op#OC2IXVrFX+cp7D_HYE!R6p}T9F8cTtzx}Nlpkop2rrD? z$OH@+WS8<)r(6jN#?MHqG;3hUNG@N01EDnYkcwsO`1s7hohsEaN&O#k7989pJr>Mw z^#V;pd->Ypd+Mrz^E*$Z_#QJoY$&KxV2JJbifm?O(%#erkI*Y~BK&jOiyrb%ESck_ z0|&>ShPWLqYdckn)0U2#7jGJlyX%x<7ue!oSDtIkxt^6^=V>~cdh4E};}l~jvWute zd7RjT`2jKevG@MNftGqL8Y2-?v)}gS*c#dXIz%Ouak;#ZvTAH zTF)eIt60ZIoSGGS&njmK{F#58(?egq(gE#ueN-8k!#!!beQO!=1d_*F?{j-IqlZ1I?(B(7z33M zJ-aI>o=k!PCzlF>UVkUT^?N5A`IRO{Kv1Hfq+Co)4m*j({N%}#+`2H}BIytOj}5oP z?bz~v1dv{KuqEuw?6}vHCr5M&sNhskpNVBxc5++x1-TxEHAPHI;YggAi`>YEX;BsL z+_@7eQB1e@Nj-#TP-Ex`p1jo7tn&|xg*l7rt@z96(luqln_p{+@`{K^b;}tZ?mNJ3 zI~HUnnP>bp=mH8pocCyXSf4sb?2!bM1oH}dzE1_m#4J^OTD1~hu6qs~Kh72QJXWJ2 zZqCt>Kc+0N|3y1k_WvpPRJ22-!UL*H9HVV=3vE=No7#P zNf&eYS=11g1L?w=n#L5Q`rCK29k3jLs570{+PYCTO0l_xb$aw!i>TY{yoCjUwgBA* z!w36Zh*GYq++*P{A<6p+Aqi#^_53*1M+{){iHo#N@%fWqMmsU_I^DBZLfp zPB(;S32`Y^VdoDxkDHnDmTm`B$&*_csY!a!hu*Ul9_FgP|tgb;`sP|Zbe(zywG1ahIZ~Z zPG;p{G>4(()YkP9C%k0HDR;?Cqf-i!4AJ#M)0voL>*rTgkB9=l5O&(`y*)bt~xIA;%N=_tlRTvE`sO$9?l-ixO zwY7L4axYQDmzO8JdUd;M{i=B007IRpOR98>CoNfaJYMZ>ZRsGY_0G@FLsch!2Tq(A zy`h>(Ab@1@2QyC_Sd7uH0aW)?fOP8yyW+CmFsNHloQ#;925nnMr{fRWw%4;i%eTs7 zfW0*^{~CVOgRQNe&!3BP86>A;pZrs)EDvg8%O*3oJqb(dZ_E>VW2UI+4V$hllIggY z5qJRxjiB@csF3SQT&IxbhZC>zau-$*!Bu`mo1F?F;SZH5F=BSXXpJ*3Schx`f_{N% zO^wfS&N-sB6tVd)ONO@lZ~k15=e0E8KLk?XER5>OSFLh@BX65Z0hbxvtR`+%o=QWb zTKcCKLb%w_#)wT17FV2{xw>KX2jx}aiKsweUR#p2Y8e?;LqZPw`Q5D>o&_`wq9UUKTWn{#R3AQfZQMJgwwf>xBf^Zv-|?bgEx-zwhQYxHh>)>L;ZSJ z5Yx9bXgGi4%E~0drvze45M28}zMbRa*ITTWJ13FkIcNt3*?KSSOP8W!Vt7oN&vin5 z9f2E&HU)kP|JuSr^ie3*u6=n5F4Uo+osbnPXUR|%unOBdIDp}Ih3VXjW3qNhvfqzz zbElvvRhM`SF$|oCV1V;dWUat^LS@3f{`$R2p_w`IjVnA+=!qa_=lx?F2=X?MwF9b`my@;ctVB{gxM%RQkk2;jba8W| zFci`SRXVxrRw?1tKb;za!b0X^6rlks3hu?U&;V~Pf18%I(*nZ{d*t^$jzHK!|ACWtsE@f z;lrV!An@#Wg#zqI6MonC?4Ns&_A>!jEx8ix&Vf$L6%@f z%+B8WPTU$u!sn&X6CWwk(-*_rkTPyUAf@8_WjV!oYj$X14wqZ(a(FqVO#Zwua!(u)%$}? z$&NjF;%7Ny5WFhHoo&iQ^& zl$Dp)(bhJHwvDOu8OJRJ6llsSDtM=|$NBiw!7ppM__46Qsu&(Lyf(*ibIxfWS3JZP z^BA^uJ9pSV3c%v*_c^9`w5C8$jHi6>7|{o&oQ4*)xw*N3OZ+07A}UwDCEZGUr@V@t z`VXOGcxP|d7B6^Ph3qEWX})0g2Uu{`FcBEDh83B-O?w{2gkhQdzig7;7(TXL_Iw4!G^VX$Crvidac*A51f2o{L>l%G*FlgIk$oN#&2eYi z6($bO|I7D0bS6|JeN$TA>uW?DkyUOyTvH4AKRTaH`T5<%v>7nL4}DP}4F!^wAH34* zJ7aI~Q=eN{>~C6Yzjb_m(UfY@>$cF_&#PJwh;2IVvg|4~ z6?No`J#s4#x#Xm{j0+u)z8Sn`ok3^9N~5T}6=yar6Bh2>E9o@@36Jv)3> z*sX8ze6}ucNqnMT-J_=BzUe7}k!RDGt1;SCS7%!5aN>w*@IpP=?)UNQmX3H~~gYZ!Ul6@n6mdR917wY^$LHh5fQ z%LDG^vhK`GxoWY*&je~+iDD4ghzICd(mwp^^=p!Gh2CdUhv2g1nP!SSrcXHvL?0I6 zbbQr*eAB7siZl^RmfcQbJa@O{Gs&n-jV%*eV=A|bY?3bI!5PvGJy@(o2Xd*)?(kMT z)Xd;i|8V)T1zt7FAxhvu&i4;2zJOIyN~UDUev@~LE7wdEs^l^3jVN5qTuyfTVD`)! z>NwV#@JGfQSpIg^Wdy3?G)py4?NwPMXOyyTWc6Id<*jQbYU(1Loi5-Ov%vPH-Q_KaD9!(Q!@CsN+o&Ir7)2i9pp2YPxEYY#2*U%8KCT zLNDgdy+MObj9Z8yT-X=Q%s7{xY54O@%{cXKs$o=QWcMv_yPw;K!%f81lJrm@8N7WE zlB%92lKa9~K2@Li*~-*}4qBGK#+E8rScx+wQVWGirW`W5Nsy`S-yez{WyBPR=s6HB z9Z(#S&>bOz2HEZknNU(&j;U3WMF@;z3G1))2ijn?q?x<9i1UaG_Hw(GCyvE&pNTD_ z{M$XsGV}28@bhcnZL;x%pH{HK{GdYem#BAp4Le~%IAjmIPUpynKm3=Gm5Doy-w5KH z?N7{md+E_Da_!e^oQH>2T~qw;BvD38>oF7F@8q`LIkE#nNU3w@T&Iip0xeCDdPf1~6ECVL zE9>!H+cbe8&k@0H{I+A?ru#1%R`<^`Ze4e0^J7>PS%UMx9&lyZi}#ZyxTKxCJYIu^ zh&>Ij!`owI$eX!IuY}7$hq?EFI}w0@T3T;8QT&z>H?aaywg$}t=O-A`#^jki`1Sz- z=%#S(A|jla27zpyiYaF(joVEhvR6kcFi(@`!Gr9oy7L17?5KG z?$W-dHMOlvkA00t{u6KS%WsKv`lv@lFinIX`@s&v_Wk0`|GE&_X_S*zxDYhQDyQbD zsPf?|!+f z2xeB5(AM0Yi0}XBS5AaHHePvnI13G`H|^toM51FTACB8iwq`e`yMGJNKI*Brm#G4; zD1V-I(<%&~jYpfVeR$vlT1=f4@6Xq7>cVsUfqpgtOnQ79*;6n2Qiky*)65a39+7uT zE6$l-)6ZgWnh2S!^M7ByKD>uTjdvShAm1nkO>yk%?1T-5V?eq4nvpO1dF^irAD5-q z(fUw4eG@y8D#*SC);wLhQr@BezS3gwdDK)@BOQ2Fo=Jim35FRIU4KdrXK!u3(4Cvk z#%@D%;o({Hp;OdGjgJ=7)wAYi8wUIYGy!M!r@pp&*056ZRQZF&V2avI#tp7bdJ6NC zg8iIvaw@{4$E}j~dNWff;lztSMBlq*Yrew9?Mkcj+NjMVHHOyCciz0MyX|LLoGWe8 z6;dg!U35u+c)DXm{E!_R=`D9dK0iaTz_s$%{qjsF_|adXh@)uT&Jc{2a35A`o;JTI z0;JW^?fw~Ycx;hBV4*zhfwQTE1xfkXUr^Mzs8Q|or zX0X4OOvX9%X8O2-PH88j`>lAx3p-{LnO|o&=+{WEaqWD>ZEPFT*pS>mbU2eL=0&lQ zfAZ!JjW0FU{&Ce+nAULY7LnnFoQZAh_;uo{xBYq5m?}j&%^`IA1cd4Ujok8W*{_SA z*h5VARu}MY;=KRFySle^hjpJJ?2H(4tq*)3Ei9idhwb|1?vk9wkWu41Qm#u`3depGj)$HvNH)N*k8XDt(IXGvznsUon$>9@xzLimhRa1o{H{0Hohah z0WXdu*ZbB62J(;gn z#B`cp58JZGKd{ul#f}6$$#%oNC*f7*M$7dunfs;z-Qmeu_S+v5`+8yI2s1DbH#dz` zV}IHhjUuySj&pDQ^t92{lGZ=_%6uA2D=B`c$WryGICC7E77R`KTYTle-ruw$`%vB7 zK*Y`v5sJa;m?w%R?%dDS#M5Savu~}|Q0#3Y-hHUg#M|-eJ_>F2$uO1#btNv6z+7ZV zi~ZpST7Fwm1A`IE8j8xxZ0X^y#0#Fxa}n;{=9PPor{KWMWR858V9Vr)H!GMuF8nws0Fxp?<^>ZN1BbM&z{ zXn2$k3t22>Y^r0ibYy3?l<5Cf?8&?LRJ=gf;=a0Kq?UMI%#itn*HI7 zq94|#>mnS;v!~W0V%4R_(i~XIqD76G88);Ru1lH=*j1(x5LZkcxV`Rsp zn&&dsN%p#?@hF(2#$HsTcT7^gOi4GUmuOJYCHd^$6~-Si4!aM8g`K10OVTWnFz;^C zl2g54e3`Z0Wtp+4*J6RlIR&iV= z63oj6`7^NUK65gL1*VYjf(#U?Ki;@#U0Ij4EkjZ*yXdab4woj9QL28`mrN2xsA0`U zX~83jl^Od5vQy4&*jX>lWH<3;uJ6hDBq<$H?-eO_mNS>qR_yeU%b+o=6L=@Nl1nw? z*3gireMF1;oANio+1po}IG$$lkTcrz$dq^6|NeD|Kjb#>y0Nc{$-h1J+SMQ$`SwIM ztfGDc6dzs06Zg*d*oh~8EyL@O%tluFB4c;%$PDhm`NkFv5{2o02lryWi+X8k7edc{ z%CAujmGXc4oU4?1)-=`8+{(W;#xRwoNaf>!FGk+k=?q5_wXad+$n|_`6dK?5Eg=5z!rAcR=R7famt&O%#+y{7 z8r6F>oXl7#($uO={gI2X zN4B=>pQ;`$naxd+UVPG9VN%3*K`>5=**hee&i=S&bf7?S6dvYi{L6`e-cM8o6RWtd zk1CE;)SqE!w>WHEQuV(HYqK+?$$^GA+gQ~{11!Jup$ht7wgpwLm}VufJ?zirZny#fM`pnsr9P#gz_ z>wOvZD~{q&JAUO=THZ4a#hZ_C`A1>|G)Nf|Zt9c@C(NxsdkD;ggU^nvme7PesgC(= zGsbWkX4JqyH`N^374z;6UA|W!W)ICXTy#0yr*IIWe+elmINQw5|8QDFH^1jZDoM`y z$J6I2b{WNX?+lY$zTA71-(F^1ML6et)Kee{#?@SVS8KXu>-_}1kZBh7Yl|yKhdQP{ zG_Cn8wZ1z)!7s#34v*sasON*3=bn4XDd-)&N(BOSq?XquAI9+{EW^)Vx2(Fj<*jqGTJ{E#j)q75D7Nmp>(oU(z^$XKLH$v( zM^lsTy9Q}R;}P%N#Wl$)#5xmeiRnygdN#mYnaV-8T{Uo7HddLT@FFWIMIoC<)lXr| zOjGSEvD2euuhb%PI=y8R=`+q=25xj>;QfgwkDq_HtH_C&S#~0uH?T*zjw0KhjXPX& zC8w&9M#e3frsT4rXH#V4Ce1F6s(x#9UbyELr_tWuNK|cP%k#ZfIO}~lPVrk+P$=uJ zN6g~7yJ3f%x_Tz4#+cVD8NjDfrDd8+4Gw%vCHfZKD8$z6bL!&HTAQI4LN^L}77;xM zB%i~9)&l0}Q9US+Bl6JYr02ujdz^96ERC!(v;lGgNyuI13YYw}V&kls0-AM=_iGw||`<6$(uxx8~q z=BZyV%i@>U3`zdX-VUmYBEIZSZWkAnt%~&8M|cw2+Ae8bAC7BTeAD;ov(b^uQsHW; zC08<F@aE*dAQZT1?`nGoizh!U+z0%v197TSn;dnZFSwn4-k-_4Z>G-M zruxm=)9`a@@p-`0GILMfF@7;bg7vffxSgPrR*GeBhQF@hprb|gEA&ym+da9I8r*bu zLzH2t^^ZX%ttKTayWv)fohL2>E|jcSjeLUqlf2s3cV}|*^17usLQ2sdt=oV=YD(%P z0%`T(+-n!~99A!=9_BHc{!bv`<#i5kQL8e}Y**!5**;spJw0A7mn7sGOyggliy^r>mY%-B3t(rjwI?j%D$FItdU8@;Po?UOg zoG!CdF-hMcwEfslS3h$us+~)9#7cK&a)7W(1r#S^Q+6A*`7Q$c}G8`4(j~4Ce%+>S*7| zv8I(y!>(Gyr{`&f}=6Nzw{R-T}@9+>G!u1UZ=+ujflDei8QSJ95Mf&8`>NKa__ z!A)vS)S`pFPlT*kbki#{I>Y?#@?2Gxo-t`i$<_8CwR_E2VutZ6+vWXpt19}d z#}ipbqiwd_!0c=ZfC%AOUz^eD@#c3gfBI^081qpOa zg*%VE@wgfezwMFwDvItg|E-Q`v52Jjcn+N79n4DP7wxLvSQfQ8-JVe?il4rE_tlST z<9f3%Ry%w1`)8+byd2*#Wf)Rb}c*qWkY5yM1-OJx;D@H6MYESx$g~oE0~q|_ZrTR*4524>!?|+NU^uPiyeP_es=oUl)jd`|io7dU86z*IT5n2^+C;d_tY*k_XY zn&%>oQRVBFB<^|(u!rm!dtb<+A%q4-BsZjiX&sLxUWLJ~l3)u*Rvw!;45M+Pi~qL5 zpgLM#xoGGTb%rsE*ovN)=!K$jTg5b>>8X*`dqz#ej&1J8N1^6JM#ARWMX74-PC5D; zRvdX)JvhEPS5k(dmhw)`LVVLQfmt;>>zloqjioCm7v`4rp~XCt`ff3?o+Xev_O<1u z>l@NLT`)5SEc`eFhO_RO=eSsy(Qm7b3FY1$=#N1`gtb!4r7)+~(%W^~Eg!q$`xlEi z1x5|@S!!8WRl9l>1q}-X?pYZfhfZOz%}q(tFuE-~aPDWu!KkQ7RAe z%x^z>_r?wp!%5p)Z3h^s`tN-(T&y+Dzv4t%S!E;4wxv6chr6&k0?4YM-^RhCGkc7Hr0_wA_tDz!x8m)`9)}ycA86F7;Qg&EZz$wSr zsti(6=OxS7)xj^+8G5czw45M$Lm~Qo*p=%qo7*N)m%cYr0PNAnic9OFW%+&tq|mA< zY((@Nmu@H3VGD|Lnm06mnlI;Q6XJI2t|??HG~2cnjuED2n4MVg2>_iYo*{@4Prbbb zCTg{z-j9sWi@(w%E?~UN<1FtE0sFU+jfq!0R! z1g3l)HK^1B+bObn1UaZXutfgz8U_1EAY)MSe6=>_*_UrEo)g^EVAQc3RDbm!Hp9F40Y6)*x1lXh@eR!6gNVd$P9 zq4J3w2IatZa&YICEfrw$!ifOz!b0W|!)exFSVrdZG+CU7zTcoKvI^@KBec&j0dK8< zOQX!)gL``_BPwueh@mya@ezZmA61<{f1cic=w!?Y(LBV61P!$2(aAwa9oR_WReSq6 z1WZ-0cN5|ERqxh#bS4kaT@&X69~QN)WE-3oBDW04rorqf#a=!(Mc7Ex{l`;BVT5lc zd8Xp5WQ$iGJNu~KzaBQ7;MTiJTfb>%umsK<$qbfo?Y24a_BprH(d9OZZ*>nkD#AqU zS^p5pB$LqorZ!nMfM8k!zA)d|*VB&G3HHL5r#Fq%TF)nOU*ri0f;Q?6hTGR@3101t zbb9vjC~)jkYwxvnxvaV`L-e}plRTZ|B2)(Kx-r|FgM~%Xdm#?}mUpFp59wCHTm@qo zttHqV;~Y%yi-n!Hwh`mX?L#AMsx60jPA*%dUpH!;YIoI|A$9P`noM(_(v>xW7n)u~ zov-N9(`G1Lm)ifS)CJ^zxY}unh}`XGw6VktD`JnG%P%p^@$O{iJYW1_G5*JlzR?F` zzLT_BYHY~s+-B2;$s5%#|>nPks>!3R&IMI~AQ#8CXcjUaxOR;=O`nYff z=81;$C%(AuNea(j>9qKfb#%{ZKbPxI-}nVI0fcn4wfl*%7S3j~_gx$xoYlX|+9`1P zX1$B9>G2V>r!Iqmn_uRRs8gKDg7^KWPoFT*8xaaWcAx;7mr*jkYOY1=T;2$~1yY7z z)#4?V@dSClRnGTMLsIxlQ|^A@Fo6WNvv(##@COQ2F1{^BZ4!QcY^Qpaf@pp0sKpf+Z@h@Q5pMB*6%A$z6evW^VTi+1i7*(R z)YgIoByWQ#2R}nKU3M0u#9nDG#rGLnj5++Z$1d;gG}lfK3DVI6mz+gYfcNk;U;lVL zn*57XQha0ZcwI}YbAGlO(^+*930{Gj?>eWa?&^iSOlaJ3LU2H1BAU&(nu{i*8NlpE zb(5M}-L{k~ zGfzxR@^Ufv>iwaIulK%BN>aq2!QtU#a?0#E8QIv#RP{1-{T&vw3->e(F8zwqIu;!? zVYPF>q)j8;aVdY!6l1j*#o=AEI$xf>9Pg{~S_Won2MU>9BW=?bH#uGBt((=W^YdicY4q4O;jyP?BjV=1B6cotmb?tXVZ=+P zu-3qK?p+|Dw&5CCV-z z&H50hMG4({GvOlfkL;Dv+l=UbBBx7;>sd-P$Fm31Z`Ah-(^$$zsXJJ;QWMfvo zfakJC+rIHg)T4v9$h92jK2-;|VKn)PaO3g4sf}YlaofXw+qn#`0rIWfQ&h9PV53EU zo$-*jhS-V$YRm7BxJ}#Wd!H5k)}CaK4XyV| zQL6cdtTehbMeWqU;wYl>Si9CuQ*D9(3R#M5t3Gqp%qenPgfl8}A9b8l{&<(sQ-OHY zqCD`BqBcTvq5G|A`$JcBxl8nLFp5%)fiHIF&!@bdv`pyLf2ZeAmCWnI=rFmZZD-)b z=sG=6eDgy1wxf9^`>s>FPD6x5xl2_Gn|j*lAlo4pcAg_XlhxT&Th5<9a9AlRI%z|? zepw#dgwv~D!8yJ7FT0L?FS}gIpL+Kk-L!!=z)+MpB2{s=qdzD})lVJ}(B%RFUC!i6 zbRljB>Wx$z+Q{j%qq#3K)*EinL7iI6-$2ov!2FTfwx{zr?q8?v7liScyNZ^Hz^R+G zuRAkPZwjhq`)hwM?kuQ^ZgS*~X!-Pr2<9F*&KBbf#+De>?ZZG+_kQ zw~a}D<43L+#eRtk#|FarlDDikd2=N~s6L5%Q{}O$uwwMXBr@vAtlckKPVV)&L8Y^LJf!zM z6mTB-y)BD9yufUCIf_S}@>gOwta0vYIoWn-;J(EmebL~-`e!+&X#!=sbs|v&A`x+Z z5oXOArzI?1vuU(!(aZ{_ zcRc=L@Y-D8n^{${P_eDuux0VxfZ)@CdaY*_S#}4jZfUd?^PTt=qhgihepx@(?`tz& zC2bT&i6)Q8)(`#pcxXlbNdC%HYRdEMz1v`E8&=x2gK!7JsgbgWGYh! zG$h-_^owM0kVGz}QJV{WrTKDU%-h}5OA?!PK>gJpmvFoZBsN?y5dmV`6 zA7|Xi=%ic!c01V~s{al!EeuzHLx|gs9Sr@D>sTYmUKVn>p!&Jk_US=hF`4HDOphPD z#pxLA#_2FxytnGK8H{Z%*In+uV#s;qM0KnNGrFd)rFL}PTQA|uVSXw^WW2rY5^#h! z=yfWv78B|JP*F>PL0Pc=)vFH=7psoF>3uKVR?M!}+hkTD`H{pOeNbC(3CLNT+)wip zb165%mK^xvh(b=f;+WmL(rqkSAs5F-#u1e<-)s2gpPsmOfxhkru}3dd?c5Q1-NPZ+ z^08-`t@(TLz~W~Vd*d5jbcNW5OzX3XW;RFqhqS~x@`iiPp8FBf;*O2&c*0 z*5?WI)(u8Q37VD(-+OjMM102S0+IF|dwy5E?4Izqg4!!Q_a8ji9a(|lDQYq@GV=0C zXwFYFSIC8I6C<@7U?mIAS3*{cvCGqjFq!gvfFa_)((o=`#KbPpLm%^SFu$&>uTEajO#IYF2K>^@!vsQS9 zmW!y0+qH8NbpSuoDYSVbHOMD>w*1$-{U9tR_8DTpT{az_cup|WY~220*#~DjUr35< z=Z5p)u7oXCv;8F99`KX^6muAp)+hmW-dLqw_W-1fR+anQB3ira?=uBQM_&ag3QjJb zMc3Oox9IO>3FRkrS3ye3%FiA>VgQ4RjDgeh!?NG`NyMyAXhW;}D=zQe+Tl{De8)`o zUMB)hI)G1~@7if2xN*hhETnUsTzxwbl5$|r9Q)Gp0tre;tLi_$ehCD9{PbGBNJf^> ztY}@r$t#j6u7+sucL&)0q@ewoKM1BN^-FISHEvMxz*gQKq9b&D3WtA8HoAOg>|Yd_ z&i<`P&g;NZL5jERujO#)1fKaS0c9Gn6$n}|oGu8h5S~X9%(fRI^^0}fsJ`gD^K(LX0`1ql1c&yCQ~AXR~e4!o7MoIdJUl}(_6!3 zfaA)K^gdPxZ51WkuQlM%G3CB61OW^5Qol{V#qoVJb=hUK0vj)4tnVFwYW}B^Hpvpn z2koSa%SzG87qhiOnlX7BpSWZn%TRJ73~-S@WJPd5_d1K_`*i$Pr4a9DmEP2`+sA|5 zP{^BE;nZ*@3mCU?bEb+PAlEpKFH`1F)T0|*QD2l^ev5lOUi`+m7WI%*>6?Vkv`d)% zwp~(nz71Y})Sm7`BU}@}vAScXNPJ})Bp;A(8YZG}%3}`^!;w6BA|ZAxs2J~(=>th9 zR(36SlQIbU{{AsJ0?V8WKm0GQ6mciN3Ks-3>@035<_)zUm%T!usM-+^?=pBP3g0M> z)r7BnX8fh7D5KL`4Nn(c8q*zQ@~gA=k^j0lmKe%HAM*)*BoljMGBKk#Z(n;UF{b0l zk@JS78lX;w8)TmBqNNt>$%8}>1XG+HjvYD_fl~tk%EqrEli&P5eeL#R*S-PzO|ApA zv3dLDxise3w{0m6#{YSm!G8ZCd|s^xD+Wqg55x(bD5%}~QdX0BJ!Jd5?AO|jh_iKH zhfMJ-!qFqP!e(vE4>V|9-rTU6!7P^785NZ{El>g%R`SQ2B}j zL=+We01<7&@c!d}jWK+?!bxBlUP_t~ap+yxK_h?l92W+HdwQByDJV($atgO7S9|Rm z!>V9Vm3(o`6y^d1-7^5@QY*>s)8(DtO%{~-pPG}n?#ObRtY*w^2KY!z^DVl)9X}&O zJ!PK_X;PobV;C=3flp@%WD(VmYPe~^qun$b7G-cG(}}2-tA31-j#*7cb|xMSVX)(n z8QyJI+VCuznZF;Le{^-PRz-Sw(-F9ut!;XKJBRUR{6g7HtzEye-xbm_Uz@w34T+S@ z2N6pCQVh5hU@&S`R1Kh+F=9L>Y-TgWc{DA`ggIlJHYPusNbru1ih~BVtXMK#wv%>T zYbHiLEdfnN^oX$uKsvLh2@nBQ?o{Wde>1h|l;ph%kgdYAw{tm_JxKIeqT5$-04akk z?)#5|?}15A@zum>?7mf}1kcQL`VXkggo6$0aU#n*hks)8_=hVB z8+R1(@zjJ5t?VnP4Ip zI;DHYjpM6=T7Ay`n~{$cnkK8R4jXU{Ik`*f7d|d!p9w>mU20o4FFi{9AKwBs(m86` z#_*3+P5<>aS#9_NV2lS#WuiIuw!b@bSvRd0SGMx`1+bOZ~uT`=`*?h9^TZOcX=FSVTr-vJwfoioE+4&uX6rHET78_ zPnXoj!3RU`S^b{mbkLr@LqsI;S^lb=SQH0Ag2iwsN1K)ZC@vWDsTIP*EwsTc-XbL- zCML^cdKe$;SXtSgJXmi;H5`S+k&}Z%*65D9w%#z2n(z_&27|vXn>QCgK|uEI;jb4N zahr~*GO<*^@FDYRFO4gc+uF$k+#e{pTBr< zxFTn!Dh{(el;I_fGYC5LwCyg-PJj)fVcoQUUPo7V9IbaGbnG}x_k4abKDOV(br7F# z(cfqc@uzazsedn2vMWqAz-VgGUIX3i-pWVM*4MyLgUDqs+L=5Dk*ZaqAcr%fE&h)>XmAy=$6vcsk?|Gip;7;RRF@~rm5 z;$Hg59Gc3-kj&p-!>3Hg3W8y~^j|oOzGth(KfU>MXU{((46>Kq$znW2hFJRSS@Y-R z$*(bA(x)u_2UIG5{z_2r_g{YG{>MLmgjzI|WZhqT5?Yz1&;4U_moR9NOhTRUpL|9p z6HgFSF~0eqOWF9BOF0%xjOoM2S@aW+9sd07UZ~ysh#6e?Fl-(!-i1GZdti6k({Z{Y zKKIYqCt@M~{`;+Omxiq2ulHBrCf_LAiPfBVxfm43>rYG{d8Bt9#RtNY0rpRQp6 z1!EkC!-x46*W3SCKh9r^N=hcK^VLT$q$~F7Mi3`Oe5jtPBrdki>a*S1{Gc=YkW=Pw86ejE@G8%j#zQ(Z5d$jxXLkun_KFdXtJU6Ea@67V_GVb{E!q<8F`T1exLlohW zzqcm2*U?RGv|7z%$C6nu?*F0Q-u-5-R%0SE>7IWsRVMMvmoG!$gXGwm&W$V%mkS?# z?Y&Ds?R|ll)jIM~!&6C81 z=Uy8VIrH~lMkax#baRQQxHvDkZ$t+GA0hS)Tm9MYN=57i`NXXjp;08B=N-;-<||NeaDU%!RE|UuGLv7{U&%c zx~ki_^vh-F(~iXH?#e^|;&o{9<6XSg3!UE5&GN3>_{M|e3u|?@6WDt=V~x}73UYpj z8TG9sQbo2y!@=%zTxUoWTfF)IMUT0=>q`h%j%LhBG5s5gY`V>JXkYtRF=d&@~W5t0vv!t4F6O_-Ccgk_HO-Cm(|4Ns!2Q4STu$WnsWY&0D>u8N!yO z!y!WC{gJaIJfG@t+4-p0-YoF*$th!MdKKIUo*O#aq0IoWd?)WKH!!AI(*H&RSD63y z-e<9kM`L}C`oRj2ew(zOg?O;nOZwH6Q%Wb(aa?-Gv)?a=KUupn{5_Pwa<0wF@p_O! zd_IGY2(R6uj(5m6aWRv83=~O$%9?WS7o`Mf(==gqaM(03Ur7`8T>=1dsnjv1x_l z#p>e>(>cG;=S1Lp@(z++iB}aqdK?uW^*UKz$GA2PO`uIOD^mlogNt( zA&NV_uR$OqezuHRf)!`j=WpCa8d;r}__ zA$?x3LQd1xM(Wh5y}rjrs`4=4tcWSnvC7ZMr1nj~IXZ#IYy&(r(Up@|X~rwUSZX*Y zgM!WE1g3^M%}Zl08c#IW(}1Bhqwj>)%c9vfHea=3lccg(@_{>SHo5`5Cj~bY)r6Z| zh)r!6yhqRCU56O0kEXrbv>_q@`Q>sd0dtCi>~j z)Kqh7JZbd(Dd~Sr^Xv{xbto3;Ji}n*+^eT+YCP&(iLv8Z(0eW@@>1s3j!c9(P zLv$B2!UMUD(!4b&>-(X%lH-}S8xB2)rErPPWd(P~zUIpK7e)V%qv)t1E#>#SbyxLkF6`Vsrq9>rdhG~LSn z{6Wt;h?%79hDy+tTF4KIiKYMhTNP1JsRN)1hf(vnDbrseP23sPR`Vp&78?4J=lp5V zjc{tnsj?eXegmuKo%MmQRs+dOy9DEtr-tsj?>sX2wtaPSq4P2aNQs8V%OtZaJoFvA zo4V!aAMI8`yC6c&`e&lhMxaX*iK3L%GWE)jS^-tX&vh`+9|N@8_G;Ksg2}z}rLTMDu!^)UubD4^w(kLyZ zK~b!zsHAz8iUw((Tkm;SY__$3?{^&EdmP{U$F~lDsGjFp_j8ZebzbLnUb(*KdK;%5 zX9ta?uHChSfea8itRf7rNzb(qiSCfAd3`R6ddh`ZCzr(-6k9XeM}}RlOZQa01)!}U zW4a3(5V;krvQ!K6BBteR+@PN(_n03*{cV|JtKuAo+6;}feFYi+I_K%o>w&P`9Q!@$ z0GqXPNW2Q4XMyX=?faES^Y4z#p2H0$()-By3RVY#o(842WAt^pyS=eTX+J&)DZvvT zK1x2`_P{P@htv!#dA1OHS2s^pwd-vywHYNsNEeJ@3lpYo!b6e_g*rA#(X@_GufFS|8~{0#UJzB(Yt5 zJUY;7X8xdovz5+g&ulqdfFu-fS~y0Z1-rVsNCk0TQ1{Ks9)Z0D?|+cxogQGKo~z^i z2P{m;QmTtQBb|<&CRekjZICUlUJ=R z2_NqCPk*m)T^nE>jqu>`QTWXd06&Qc!p2{8%Ia@F&`k$GeI<0dC7mkF6LlRw->1vg z=lSrg+hr=!vW>lb_1OJxe8_GsKz!iW+Y!S8x2O29n3x!HyUkg}M)!jP^&RgexAzmf zHk!#+{{%eFUQnD(Rc4ZOtzMNuf>=d~K=Etxh>!MWt|A(LD9DQyd5)IVYnIfd-J&I6!0 zczJdqw{YeGS_jFqctK3Z>|O2GP66e$n>P~a zS0#~Gwf=-Mw}NX_IXL8T4&2us9KI~T`0ObtTpSx7EtvUL;T2^|bh*A=m7;lXuvfBj{2YNqiKBZM5s{7}PA+xNH?};MZ)1r{UN~+!-w#3-1vCV< zKE5c|b+hJ#(6IIG?d|1HZK(#nRi)07PP_JA^dOerntQ2_2l>G8>XEnz7W1#$zz-oc zs8!*N_C>+!1si;@aJf(yYju%!wl)a?O&D!2G@ z`!m~(KDa;91xrm&OGZCEI3b2Lld)(rPxR5=m!p1rMqt7ghp`-jbiZl1iIyh4IMu;Z z{K?mh=?N}_HP3a}O7q(YWoK}m^HTk0n9X~7B1(PUSg1wlDD$pQ)2MkZleg=^YGmQ1 zt^KeqU=ug$QuV~2(~p{=g3W*Dp;_jCW0}AI5a~RG2n({mqAhB`4zmTN0@c3v*C>|f zpl{Mh=mJ|FS&_&DHact@r74nze7`48N(aII_$9HAB0kgiEq|04d~7S|2p>l%>b~ zJp}_U5(uUCjpiocLNNN9qE1d8+K!#^+}p=Cj}7`=<(17%+(I7bEYf~^bSzI5IRZ4T zF1NO14LkEad2fq7t4dqPHUuB8A}Z|1Zl5D>0>Ok2z3YBn&|tRt>Xj?Es;Csc^C=f8 z-o@YZsGCagrJZMg99wbBb?C*eKb#wYtTJx@>&nDfpX|3`97-EDf+RwG-w1~9=|Hi9 z(V*7|pRV=R(0Q*~%EY>wBZmT|lcHhO!upKRn_j)u!!Oo_F*y;bn?X*KK(X2?Q93j* z)Sfz2iB$A!BTt43v@WAO%pzSE&~LONZe6gzy20|q|0sO#CoGw9FM4bE*wK5)uRd*N#t;~zvi~&A*?^zB;}+Jo?b!yEiQvM zlvktY=8`I>HziC%Ay+itbV6eYIF*JH&c+EzBex4H1qH#2&_kTL-|$i`_PCg z5c!-sMnl#~c=N+$T|r4dOb|tM+c+ z{ss)L_wj-EE1kQPd-6XHOb>l+_T5GfG(!RAd46)EH7B(!_9$pexQ*xI$7l%Jeth*8 zga-<*!FFDaO6gt7l2oS_vC) zkkx8}Dohy*;1tk_u^*m+V`_r_JoYRY<)ZsHbFaMePzcx0MM4E&$+hMLUUZA_`C&!1J6NF9A_~huIKk_BMir|`0&-f1$2bKhajzcG5*uZDZI+c z$`{hlaMv>D~9dFu@eU%>JskvtlIFn07#v;vY zHvhdS$Jb(}v)jY7!J&YF5`|*m^~*a^ zGE_P8Bvy^Z!!qXl(vg%i^R0cj#&16VkDJ{XdDTy+Y}MeM%Euz&vFMJeNx}lf6KAF= zr`9f%T$Sv3!#Rd)*(F_(xNTKccQ`Jezo7AeLrML-g$Mm=z`g=e?pc;sR=?1Z+(7^9 z#Ie)!_smR^vA($=f7PW!uluq+b{r2q^7h%u&xN^N!v!{ja_#F}9FrOL9m5fSxUm*U z(8DoDA8jt5dO`2tjTJcG$>&k<*_e(qb5#4wOVU96OPA{{tvA(S<&$>?&k7VlPqjCx zL@g4YkwHDE9x{yqhHN0F0Y=}BdQ$T3)0;O_+_qno{Oz89!18Tm6<{`QH@<6{o3rKQ zAt8n^!itYEOCEN7ysU;ibbX&Z!8;Hde8pzMujuGu5)L7>Txi7Tc zCoT^uuHo1GZx;3-%d8KsC912Aj`pQfpG}$~ZsBBZu%3$*nUwQm?twrB&k<6xK5x8- z2TRiBKb+_PbIXlgh?5$fq0L(K^o6)O!ME_avQIFDPG7Emz@VI{DQ59Htu`Y`uD4}u z2rVwNSA;r{1FDwf4d-%`Fv77b=*@HOjmNd;e4wuM1@)wmAL&8w;&xE-`xCxa-z$E< zGd|GTO}Hi5n5U(y>(cxnjDDCUHnnq`lG01Cb^`cJe3NHU7EQnBL;1Wc)Z-o56R^j; zh)EwEw5xUysP5n>+)(}m{84dn@iT3S)sB%!sVq42(53d|lyg21b0rE)`a>CRLK*hb zMqg{6he0nlIcSYHVx|5*bD?aJTBZhD$2KD=UjC)zK>^HzbAkJ-h>=Pr(`Ffhe7}v9UqN zLJxCqscYNgk{g>Z)P;j#w7J3qQd6Qw--pwC2P1vBexoenU#&uUr?P#{{c$f?@$=(H z7L|20QO>nSufo+sg7s}lW*h_yh91UQH)O97xbYRiE%}q6n{^GRi~mkh z-29K(Mx^+vAa?h~SSM+}usp4$3Y);cJZEm5uT63PreD9{fASBrQVd-%gf8t)HC_oV z&CO|@S7V8cn}HMjbci!3biU#ztX@m!dTuB%_%)(leVS!W+7_N;n>g9C8uK#Qn2pDZ zCAL|Pz?Y~aCDf;&@jOH=(NgF$2y-cP<%)rm%WIB8qttrtnyK79Jv~lXGsx@=?igk< zVb-N{3%Y%KG&FU43WG+wsf<1vMXGKCr*hIrSH043k0ELQ$F}_ zpqRXPo9SyZ=z-nvAJgg1`B1wk2GUP?j=h)#!EaPfFc#mXub+<9Z&~@|ScOMW1s(>9 zO2@Ow?6;t!tGWM}lCxAr>}Qk`*g5;>S>(3FvpSAU{NbsmvA?(GUKdLYJhf1xd(I?^u@jRfrZq=I z(*5}O`0nqRJFqa4*VP)pd@;DZr?76lzv+4D_O^!3!k|{bU7#0qw8`k!#2J1ReJiCACw!fHc5eFhs ze~i!Oy?TXUr{DcJxD(^x zwl)EW8V=e|^V9oZwk6ic``$j18z4IV$O{|#uT>w*S_qbRsX&nPY{sg@Q%uJE%6$be z7#JOVjALDIq!f_pJbm73W1}NZI8HxoY-NSuL6Eq~uKwm?EYtL2Att-5TPNK0SYJd- z5)vf!eEu2;fbtJEPf4@VnG9E{jZ6k4JD02(u+LwLiLTg)^O1Z)WO*R|{0StEtn`*0Oa@Z1j5I(jPj-%4=AB$2-Q;e4Mvs2Ie&2E?1A0dm zd;J&CkiE7TJk+u8d@W#VYFe*9Xm0ZBk)i0oso*+{t>J?bXX`vJE-n(4V!X3dVfKd4 zU<$Sa>a%BKNx3+M*Zu%_^c}ITlbB4G6_XhQL5y|SJnX0AViZ$kG;5}NQhTN|d%Fcs zut9%V%-L$2(I-1vByxwG(-}XX(}!xo|Mk{9aGxk0oPheUy!LXWjHH0OOy8m+aFd#Q zMU!Ln73x0v*M|uGS<}_8neN7HcANyWI7{%Md0XXP5g8r%9WAL!$orB(u|4xC9fsN2 zZ%2-|1+3nmDSVYN`XWtLXzG7>)Volhf#)y8qxZK~DwOz^*_1p44BDUnws7#W3il+p zs;fWma@o&(?-OfBzCD%T#E!7CopJo-Y%xK_Fxh%JA@hKl8fId(m!@PoDDL>u znEzJOozra1$c02+otlkHP#z++G&i`@vKdDX ztEM!pTjSIu1(Um>BJ)>cvLQ-lYd;@^iy?Pvzx$d?ISrlZr(lNPP7e$gDX8rEs5A%i zO79W5gHo{g7jFgKR(@57&eFcH6)^R7&imQ$+{t3Q+FTa+Tv|`GWl>R)(EcZ2cAg}M zRY*ttTbY&*ULtJu#BqVt$DGsg%F|5@-Psnpc8BnRxnlvz2o4SoNL+X)sL6c3XWaR< zE>i@le>gp&_9u0=#^)jJwS_Bj?lqzU-6FuB2qyj5HQ@%cZh`RvFU@?2l95ZP=kNEN$oYRTA9u zAv91@GO2vnw!q)`i(_Z-G)+B+29)&*&R;VUB-}fz{p)KTHF!f;{`mPcx^i-%GB-fr zY6Ql?KVR=S2&+|d@7~5N(Bi9%otxi3h;Gf(R^|3ztu;FV$5w`|y@zoMbT@o-TT+ZVv8PxjU4W-^AgF>A@~t z>t<|e`i}l-2eeTmu`~I+VQjs5nVMu(i=&$u+Z~1(XW%EIiS>{sXt!rJ_c11zA!0*YiA+>5pczJhL|7_>pVQp0`=NuYI z$G5d6>yJ0aU6XZAaFF%tqp&OyTW&*8!7MdtvSB(~KPJH@k!}S>mjYTl^~m%J$ibaw zLD6X*)wM`#LSWu!#IUF?6Y=1MBR8(ZHEp2AWW ztqTEw0v&E~Xg8J@~reamxrS zdA$Pv>7!rTN58=J(@%3AfaaV&T&^|HOwU*&c69r6{g_WG;&yTlJ8F~$$w){)(iO0j zW4g?>FH72%l8E(uA?*`5d{IEFA$GJmc9a~=n2QA;$=vNORBMOC4veoC9&@0nklOaz zQ{}Q`aZ!=3O_9cwg(vdUhQ3t5a9`h^D#n^N(pw>L%WA`uRI6nGB#?maI%kLQ9v!ShTnvB5;vT~;o{L4BM@ui5-_T!+0E~3*8W;&E7N8E zf_BV(grFoaKI%gb-8XJ7al~A4fwS&Rb5R}$tN09{LQ(5y|4nhhE5@p7RZ$en0U1}w z40_t@w=A@I522iqe(y(@9?Qpi8?~v;Piy+uQy1cdnyJ637(hx2ez+A5wqA5`j&Mp; zOpLI6NR&}@o*=*0P~q8jhv7(lMW&}bi2Li&1pBEtxB=bD0=oIv?;}3P+mX7>uWlff zj+Bk}`85HpRe0l%gL4|T$$qg`OY{G0Vs zMhu5)Dr3ZcE=xl@41-G4_!;I-7h-0TaC1vpq}H9|CX2>?K1)Mu9aM;*0rYQX)UEeL zMLwvUR`trFK7F($-F37pC@cNbOJ)|#bs!@tN`CHX=BOCvL5EeThU#wLR$L^mN+5gTzd`(JDPRkGIOhRpn~ZD z8|r_^8N^%f7oN|XY3nStESO2xJoJHE3-z#%|IuT7f_&^Iaz)KwpP8%y$_q4loqx{R ztbVp_TbS%brL!R1?%Z9l#j@sLrN!TF;Fa`G{7#TgA((Q%`wep@9rCRAIo(3Dtjfw ztf;scH39|!PJ2;H7wiOx)GuB<^F{A(6xxEH(Rt&muqXYXr+`zpL)y2qFMnQ#U+Vks zNCfw(HN3rzLAY_*&o?R;=jX41EEVKC=r^Tp=ZnCmhgZqTs8xwbOG}H0IEwgBL80@; znVU20OH*uCQ53l^{Txg%B4tBrOe@>)W^E9%^riMLlfdKz2oY}K&2{SGLfq<%8+HB> zh4`Ihjp0>OPaE2u$XaLzA5wU9#){kD0SpqsAfxr~Pk;!L;l_;{WltH}(VPc5ffT(+ zpR`N@ncU$PxeeCpC7hc-Tn6@Y;pcb(%?NQSSOU~kH^V*&gN!o>`O(v-+!u4`{D_2< z#@QSLydebSG> zw~7ULh4w>qi#A9k-G;j~?6h+i|7P#-h=}a5^_XQhnL-o#d2EbdW5@mg?yCfRG!nt+ zDCk5zs&Q8|S*$f#EYWOaut!UU8{YO5R?Z7K4H<@|zU?d&W1V-Ke}ujBFI_97BSn&+ z`*9cBwsGhL)^rcQ(Rz5`FC`_VmL|*C7h(dcr8XHVb}$DKrO-`2F^zf#)*^Ozfds)) z*xK;)KCXGxR-j}gWX{UM&U=e|oAVTk;!T+AKL#1DT~R5wC#*)>vP029>AHX#ZrV4# zn3keir$WWrLLkHx1`c}9Fa?2m7r>#s$8$voCf~gLoOTQqUdN#q3Wc`rpZ&oW=2H-Z ze3n>jWoY8S7rXQna88Whj{BPBctxN}Btc++CTi9PEg!(0m|377c4a6F`{yt|)j~f( z9;LxiEOppzGTRS+>_;zua{`$m;Luev#~gJM4u- z^lrS(O=-MgDxCT?BQymH*ENqDsn)lj)8f+7Ib5P3vmL* zplNn@9;;`Q3{uxjVT~mwNh}uMnD1!-W9TgLW0z!%`>`~u@=vT2?j|v0Lfge>PoLRU zFLbMGi^8D#lZb+lEr18SdDg8hm;Ze4a%a|NZC4ScBaMjTKgB@B>DK8EcfsGeek4#! zIzF5=)Q4IOK=k@Q!W!C_)|xw`u2l$eC6Ty279vAdUdL^_>eqf!BYeA1ZY5_5kCkG5 zLK6dSY#$U48G$HImggRLs9b{F&J08JN^?-9c#hsA2c<03qT+)1Lk8LeuUw(2gNw zhQW1)OVY`Y8o2#;-FeHOEQ=5vE}{zr+Arbq8#!vo>8Je^=Lrc<7KW)=!qvmnsrOTyrdvXkt37{o{P~K6qQCghd!>`-^YzTYYxa($0M5rd` zjD*Ign@}Vq1saL)!Y3v^pjKC%CW(#kX7Ydg8MGMNu4*y0*I@-A9+UZS#9d*yjzo$( z{HT@vO6Ahd{a;~pJ?z6YNQV&|yzwn2Hqlb>7JZc*ogqqyyWUn?G8W0)Rha~ukS18k)CtdH2 zS)`$%QAM`3z3o#J`v2l2;sl5k-{2$uA{tQ+mi{oVA)XpAlO^Oe z-izU(@B2YpFm99=KYMA zxLq?+Fi(Y+2EslcLR!OtLIb;`Xdn1FGNf&d-5{$XvDhS@cJX!#PR^^$IupT(0B0<4$prK2-mu`qE37L~yaOvNDEPs#dn2(}f9X`L93&tqkFtrq4X#9M*? zamUt`>ZW$TQvu+AVEA<2Y$liA(U>QqCkqj~^r|O1j>ca7TLMLy7^;eghWRAug9U+=`M+uFpOdRuQ=B*U`ZN{knk3_O$XEf+pi0W4ag{(%kUQ4`rp~q zUsm|%ymYsnUa+MXI-bND=wO$qVDg<5Ppd2$Q|BeWM3w)S{y`qbIjnQrESUuVgfJ!! z%-1AL_8T{D3>RjsFGi&Ajl%t<%-&mGQ(qsN!3^iBfOJD*ITNG%KC@Z(b-q8S3E026 zihK$dwoDkw=BiA_pjHG&TQagYz#w{fam-9)<3Gy?Il4`W$<+rU;b`sA&;Z9oVZ!|W zaCyZaOs1S$G|20O`4H>UW;!0G`;UP^l+wtnoS*R@0)xj2l;$@S(jP9Y+2O&=gEI>W z4P8E1NB<9D!zbU|#D8@HAxZTgoWR=SkhBX#y*F6jZxtBBmsMp}e)mF3xfR?J?lT+T zChWhAbRk3~x*@3R3fd7YOly1M5tMT@zF-fT!31BSw3S2h90Xq{@RHu#S|^qkt60k2 zc;>o+t23YZ1}1}JoQ}120!$quyh6~JcGD%f)5J2JZ49?E>#(*Gjr zb$JLF073A1(RI0ggg1yVDuguBYEMn~Mf?;@W{esz4UyqPSyp?bC2uWO*zhk;T6etB z83)vAo&^BMfw2}KM3rrR7Gdw+d8%w#=fvjvK8$4Ap9L5-a+D4i!WrPD4tK8FO0+dc-pV_~xD+A$-{9mPaOyTcS3f{@fGrMFgcR^!Jecf-sDooXhNneEn2bBBuP z*BF-XFRl?#sEL(KU{oC z@g)j8+;1;XxBYh`L!2+CQUmvwm+9cImFh^Z!+krYGh#xStPa{1bS?@x9S6#Lov#dL zI{cHH#Xdl-+@B6yuM4*_liuihMv~b+N862EL%xkx2>zqKbU(m*_62K~4hHRZC&0j; z0%r!u?RAUHnPJPu>11)eZe`mgOm@FJx+n-#h~=fWzKsSXl-|cN)hYAVn%yP)Rxue$3KiAkdl?l6af*T> z(}-9q4@;(DDI0%5%o+h#PIG2-u77Su0JspnJ2wuA{Oe|#fu|q0K)B4xO z-X)L?NZB#r;nF~DP&Idd_ZA-R#CTJ0+D@r+n$u!!vR8(I+7JRIQXm2L&KS#Q!i_95 zzEM%{cn|oa#_UdQ^{P??LqW7SlU3>`E%{|<38#s+)MkakeHt1z5Vl7VhbM@0vdex| zbQMA0-DJ{tN#Pc(3|SA?w2lBnUk}$8$AHMJuawvvoGf}vVDLH}1!qVA8TBrV?1gm6 z*lqZ+KlZU9^&^wlrMtjB=E-f>fgF9h+MnR_PU>)yq?m<$r-DE8jePK6!I}flw2s`q znx08wv52dFOlJ(GLzQ$8r0cBXFZWK*BGowdBeaFtl#h1d))2E_Mjzzp7e($J4r?)@0yxk2zd zQT8D}Ebvu=f{2&%j%=?djOC!X5F5k77*R{GToBX-7mk#a9%JZdtAEb`IFJG=Nq2h< z18~U7;p_BZuEcl?v%99N#o_0Io>gc$rWaR1>ELJ{0G4owTmFjExQU-_Pxu;L^-b-!@f_?P~0$9*_0A2yPv z(Zo%JvKKFn3 zGwtGdWRoVEFdvV1vsD>SxKQMNfcf>{!D|Jf?cAC6r8lWqi#R%#k7upi@p)fT(g*KR zU#hW?4)SScb@Xns_Zw$TG>mqeIv7T6Mr!{asm&&ar7=-as2j5nQ6hZWu8!xNxFzh? z(W!uU2}~cvNfP?AM-a~aVpyQ#DtQiE>DIe#qCq!hfa#-_ueyfD)tm+vR!*yTTLm=s z?xhQm@6Go`Jcc~lcsDk}FVG}JQlNEGB{m5TUT~bs`Q55XpirQ^*dHKEkl>G8N*)x0VUa`gVr#Rg>mie|_leJ5pubpG~I(ce8 zdu4UCuaD2|z`#as{7ktLh*T%?sWFn~>2-W+n^I*ge$C$>k{=%yw5xkKp6D7ASi5rR z+jVy<+D3;P5Rry-DYEkF>J|rf@LtW^oBI)(!Z;22kXc1Oo>!M{jYQ`E(c7v5H96-k za$gwbHWBSRY@!p-`eWOrkDoq;mzlA#oOW*a3h;Ox;7`tQ$_lZGhy3~e=;Ir28r!#U z7_MzJ6s#e)-w_{Ey1-T25-XyMj#^q;mK{v5QBY9O*U^dGv#P_pFWhL#LHUG(Qaa>e zcVxd^|H4YTvZA8pgpA3ut(UMFf^~oT^l2X-pKbSdMej{aNSHT&en{ZRBSW*77L^zF zk}q4q{;eeiE#TWbCRp3p6d8*{^g>X7wUCgmp59@1Ln(__;}=i+ez8TOrG>>i!}J=o z>E`X*Z^F13;!;xnBVsnK^ckOtQiEIPm{0`TH*;ucNKH-cb4P1ylwNF8m-mAD`g(VF z_s`But_5d4Ha7P2F$V_+oE$+%h9l8KZ?n)|UNXRcw^vk7Zlt5*B7EKci)w3Kq5OG~ z*<}}pj@D;nY>H5)oyA0^w9UUEbs}u6J_2;_D{-x+Jmb{O9&g;c$ z#+JliMdmuS->Ux07YkcyTxCXdUEMBy?~}|rD@)7R!q4|GjePz51Ox=S9?-WPI)1!s zWJK<^cXX)__dBw?P#VVMTks9)$yFB7d!3w|a^`{FZ)a;OuTUc`DjJuZ>}Y4_zwrD( zsb`hh&podgy*IMVZ28tpmcu>)-UFrCQsjv%W_%mQD@$J&6sX6S@vyQCRL>Wkb6J!z zq^?Y^8Kr)kRC_Ni#Z&f1+Q{5?7U56EMb=|oxJ+QY3!mA2vQ1H-CZ(mJsI{wugU3PH zdbw3W(E+bx$$;&YpXfv9%XL Remote time?) then (yes) + if (ID in database?) then (yes) + :Log "Local file is newer & ID in DB"; + :Fetch latest DB record; + if (Times equal?) then (yes) + :Log "Times match, keeping local file"; + else (no) + :Log "Local time newer, keeping file"; + note right: Online item has an 'older' modified timestamp wise than the local file\nIt is assumed that the local file is the file to keep + endif + stop + else (no) + :Log "Local item not in DB"; + if (Bypass data protection?) then (yes) + :Log "WARNING: Data protection disabled"; + else (no) + :Safe backup local file; + note right: Local data loss prevention + endif + stop + endif + else (no) + if (Remote time > Local time?) then (yes) + :Log "Remote item is newer"; + if (Bypass data protection?) then (yes) + :Log "WARNING: Data protection disabled"; + else (no) + :Safe backup local file; + note right: Local data loss prevention + endif + endif + + if (Times equal?) then (yes) + note left: Specific handling if timestamp was\nadjusted by isItemSynced() + :Log "Times equal, no action required"; + :Update/Insert item in DB; + stop + endif + endif + endif + + else (no) + :Handle as potentially new item; + switch (Item type) + case (File) + :Add to download queue; + case (Directory) + :Log "Creating local directory"; + if (Dry run?) then (no) + :Create directory & set attributes; + :Save item to DB; + else + :Log "Dry run, faking directory creation"; + :Save item to dry-run DB; + endif + case (Unknown) + :Log "Unknown type, no action"; + endswitch + endif +} +stop +@enduml diff --git a/docs/puml/client_side_filtering_rules.png b/docs/puml/client_side_filtering_rules.png new file mode 100644 index 0000000000000000000000000000000000000000..2a71d76ae01c7bcf3ed4b96561a9387abce72336 GIT binary patch literal 94791 zcmdSBcUV(f_brSK5eqg@Y80d^h#;LHHkyEf3W79|CQTvI14l$qKqAsXnlyV50i}zD zUIWq*k(Nk@Pyzw&+yT_{-rs$m_q)&Y{c+D9=Nyvkz1LoQtvSaWW6b^Fyr%MYMm9z| zI=bzuDyOyS=+#h3KsNwLdPpV!=X zS^KLwk6-C!93Xoe=A@xuFE zJ5qu_q--a18D&5!>d8&+#@rhMIKECRKU7xPU88xoI zbojCC`MTbZ4+)aG7nu2!4NBWa`P+vj9__4=d^(GNC95;B)rIkBli*F}yKU=bLc_l^ zU#P)+VG;Stc5Q_EVCFEppt^>}^w&|_zPedH-kSOV67M(hMzFN^oVmxr!-e zQQ=ShveE@bzqoT+$qb+`T&7AVG+d9=#6O+K`@U4&{EZ&F`I_x}AMXlx41V$2y}9_k zO9XSXityNnei=%=jSM5P9nE%w0~%&|KdVC$_ny_Sp8@c8jQ_SJ+mm(RE8 z2ajS(WqAgoi{Hc!KX8yfcdY*l)TI!iNIIg4nM@ni!oR03}cI-yF@g4uwzdlpvhw$uD z_MZLWx_K^R->V<;5&3FT&QROw@=l>j_9tLxuC%-{O!iDXleOL(MTbqoCBEb@(>4GjJ>C)6qJpL zBCh%L+wP!!yd>>A%geM_^LP|lPW*s#CNv}~Pxb5gjh5p!?R14>(v@*so|Ld8xkXM$QYx|iNRDox;y@GihAV?7V<9g-65Nm|IS)|)UQ{rXYwlA{Av?+Vh#70+NK?+Ut8jN3W@r!pO?;qUzMwq`6=N?7ueq3zP{3~0J$uXQ>+8qI$A^b)y)MypCE-jmh(xQ1a#{OAk6BF> zl^?q6kFK9*ppQLf&M3??RV`kZl9o0pyl($G?8dt?96Rz7EpB=KxU-pv**#YMk1AZc z04|+@MlVyRTrKIVH()pRVk!NRrlzJ%Wg>=oKjK)AGLd(kyHlus*Tv%FMYxSkGa}o+ zN3YXSLLEF`o1j>1+m=bPui$?0;6an}e5CPtHg>y;$Gqv9aX(Q^Cv1p3R-!ySz1b~B zv2re>F5TF{i?*Nrwo?zI93355j_6CcP4?l?^ThUS8|vo`R`9frTqxkvh{;LXXTi+O93B=n-1^pRYM`bzlN`Kv-#%eu6n5ilxOl`e+G}Cty;K)Q z$JF$oRIz59tVD5u!P&E=!)@<|nqR9AM%i`bDBS$<&(zrygSBz&yLL5X+vwV3?ImQ$ zyRu*#>n|;+q(6jJfL)j|%yl%kw|^s%Acwm8-oC3Ml;65v=`Q{BuWx~`PbhIn+V0QJ zPE4d5#34(+X|h{+GGUoIi1%K!xl6y@zKAjam#@%bqFjp8%%j6!d6zbI7Nzw>x@2HM zzkAO-gFb|iBc>?cG91c#_-$GHdxtSHv8%(On3I|L(zn-+{gv^;a8Jm&wZAJr4hgyN zTK{-V>wAZUCb_)KG}1$MDWg0LDNwZN*~RCN8+q%*eP|pQvQCAYj}Lms*_z*MaKj zbsOk?{rvoVeLHWJal&*cc+MR)AXBI68gsA19EgjH!v;0iPckZWZ%oseYFD<$QapF8 z-q$j=3AOsaI@7y~jTzWZ!rfA_j2XgkKcBj#Hsr5-a25GL;;1#@kho{jr(CB&h)jtG z;iE^<(bj%-gKDa(C4)|P{9rxPqq~`hH)A*2olhza<}|o^wc15}U&ng~;|!B^#Ts3c*c5-wi`rd+F_VV)UJm(R%<#9T;fA4ihSgrmE{dXSbT(5Ynldd^|+njhD zBBsxLdENS>d*>M}Akeo#Y+-@Oy8XZUlVP5CQ@V(*e$#YFg1N>}3qC>Ia>rtoD4h;G z+TExk_KnySx$cdLTYvE=ZapwwM4ePedrArX>Q!8()QPTJ9|V`q1Ze*^c+0oj*fUq{!F6Q zixlI^Ss+k)m26*H*#BWLb;L@9+#uU3+@J2iGQ?x>rPFONPD^3>B5bT;ZR=r?PE*q0 z!KlTB*+#~Ri3zGtX1@FMuX|hfy)iDX6+QoPs5DgEvQ{3#>#kk9-hX$u8EMZ(c~d7K zqGg*nURFf$T+)bwNm8r$bGG1XN0 z#c9J*_ws`G?V;w>uK2h(?96c6hRr*uL&nSV5;NbHr|J|HXIjl(>t_oI2>f6Px%BdK zgJ892G4*8Q^3rVaSmCUZJ6Ue@LvOTYe8`TdShY|^U;Or^G4Evqm@xPWIAs?fwR3RH{fPruPRqd)=hn%+v=s6IUD)p>fOBt+5IolCf9ZJCaoLj4Jqf4s*btITatF|r>DBk*cnm4(j zML13xGxj}Br2B~`|NHx0NQADD>OQwUm6WO;Y2RHG=lArrUGuB9A0>C&WNn&XCFOG{ zxQE3Bm6uKAmDmcXG55#LtLx%l6<0$9RS`_b!~-B$ob4_ijQ2>n`=y1x{C!(XueLq$ zL3NZ^FYNp*VM1eL8&*N8=-IRT%_$qAUjL|=>S@hN*!QXBa&Cbgo!vpZ6KZM!E#jgS z9#}_3f>{v2hVn0rqI)mv3&-ImE)c#eG4YG++Y;V7e`sDl6Y z$(4moNW)!xsTMAD7f zMsWW_5!;R|>r;cA7^Si!*NiL4V{ju{#wFh0r~I;QTE0V4oE*F4R#-ICY2qB}m%1T{ z?^zJW7q(-WE!sS$R$72K%*U$eRWSHN#DV>QB2^w08KW{)yf|Y(IRKw%>1Ip#@9=2T zPjy9eXe_uLO!+4NUQPdtf|BgUn5$@IQ>__2v6}U%)WtX2Wi>I71D-AL| zMQPvfhH{8Z;;K-dkv-CV(zy4Xbxsjk@sUK?^41h;s!P}YzOCd2a;tEv1`6=FM&oc` z_~*-Jksk>)>X9NNZSVXS^zm17kJaQl_6OX|%DX)@Gt`2UQ#B22J&$2I6{(2%`H{uk z!U8@1aRVk#x$KC?-Wja0^FBP5+-VYn_M%D~_NlR-<#)iIFSo+;6Z;+D1v*>EpN*i1(r( zJjaSEJRKYo9uYQX=NaR9?L?(_dmc`1XqlVj_ZaPzvBuUl$h+KIj*J&~>{HPX3kfL^ zr3*F1<2A9@jZFy}^8{1lOXPdp*w^A1k`Tv35liAS6~c_G$kPo6q@*I>NTgE8dH{*o zWibMA$2s8n-(^BSDsoPJNO9WR&3>-2xAb8$iBYQaXX%b@b&?It9QnyNH^?*Dj@}|& zy}dX;y}VGo>@gVU0`M|9UifH#e$mX-$(t@A_VaBXPf}72&5Pb)4=5}$y8F|{tkMNL zl6$YHbawuFVemk%Ftvb8;IJSQ8zd5z`UmcAdE25Ji)O^%S15i#^tPSHdr&6(Q+1c) z9yjo5J}KvY>~Wx?HcrmrG3Uc~7B!EHoQs#{ZDK@3L?ZBGHg`!E?*$WB#VtJf8#B$- zXTI->Q?5VLTp2E$dW-K|)W=bx<%T|7|IjUmpjkOt@094}#W8O~v}BQGU3~mlyZT*M zthA!2Z`Igkw=TNc^aL*<>^?oV07GL@gPe2vb^rox^^DndiqP)%N%#rYx(V#V!(Ay5 z)d=cNSMMi(%Rt7S*SRlt@8Nl&iRs_~D&GPj8Y+Y?a}psHkowc*&!i&4LX+CrWc;W6 z(YzrD7V&|7M8^lU(?kY&fan0vdAPY#wkPUbj7i(MkA=n2fS6l?c?MDIf|rGwwzl@K z7JOY%w2EamfJV&diH>vZ)k`VsIpw`1Pb}#baYfeV4AjPX^p)>wKU0TZuyu8n_K?Q2 zYYnDjm_-*o>!i9p#oZ>a;}yGk65uJ#wF^bQ-(Fr_%^_h`A9LKsYNtCUuQE6|cxIl6 ziFT_~n5}1&D1Uk;&?+|7wD3XJrEr6*~qwNUPV`+Fb7RZumY!yBro zFvN{JDt7ngiuDgASD9^vr>{^p@ub&Mcg~Guct80#EZleK_53n2xsc4LR|WPh7aLni zPDXOw$`UZ`t}n|>YSfcussunhWF^a+TXDZTxJQXJMh2PS!+G%ytW zAD&%Q&X9%CDIeWR-i6`&3%kLUj#4O;$=Iu?+*G=lJu^Ugd`?MCtym6tU4WBFh)|PS zw_d4L3X1UFo6<27*4(_>zj`4uUo$gHExx{P=YdZ}P&KD;DY!Si)Wz{Wp1!q2r2saA zVxUiVQ+-T^1CeLHHAIiFGtMmkS#ZBbX2Yk1P105m@+?%|zj(gBbsDVQvSkZZjc(lm zWT5mOz^FV63JD3h-ATV~|D%|gO8my1bhOK6JblV7Yj^z?)XcnbDnH{`>-}lh!yCM} ze}#i`>64szB0KHIxjZ8MzLb^q!fzfTzlk~tW%F|!6zSwc$Ys(#eorldU-KgmF;VCI z`3z6WWLQ`jtHtA}C>5;d;&cmTQ)!YRxnwiyiHwt9_g#~H$QYCk2@Bf*x`$COj^>X& z831fX?-kLoQjVYg2CHNma^)N+BMU!VXaC+eS94FAr`&zQL?;jm)i^A7*ew$tEU(VN zEH^A^CZHAUJ3kZ@$S)=OAJZ>zomdzvt{XGXIE>szY(9+YCB8@>sua}Fc@fO%9~y@` zcEVH3%Kh@?^b?sIHgH>jTl-xk^d7-|CdZ9*=I-fs+U5)Q^7!#?FPFRD7+})d+fr0R zp~nA$>P&2>ObuGr#C#pI&tO9?(vf$w=Tm{()Ep1H#A&s-uui7^ zl(X~9Kus*=_ZJWn265K)wK&D4xq(=RNjc8LXOJ7p!3_0&FZ2+;`0Qidk$uajM!JiO z0nuwV-00V77J~%rgVv8j25QI4&p%a`<^S!a?t7b-(U}g1^ij-cr(1tzIF;>K5#vuA zFBz3n4I8mNmvWN;N*n1;oyqfnDj({qW5;DHf$#A{)639iVE2reh{$N+ z?DyWmJi`w+O^NkM0Eu2*NG^R>hc-BW{)G5Y({rA<#)`V~K64oe=sY-iZK$L3#_sRp zN$KvWsH*b#`GDU_LiPf7AlkCEy6v51%WH#3&58CLJC7e9*HO+J+4SFj-l7qX9|xe{ zxFxf(<8wvA1$}+CF7w(aGGyT*}^njwGB4m9LTYkuh1GBf~oCdZrJ9XsP*$x2I zd6s_W%o*}cSeuWT`lsI(h)fqhC*K5!e=Ty*iz`^8aR;Q=ZE=o-V&Hx(r~V#7h-M;? z5Edl9II#v*l*jDd+H#@GuUsJ$Au$gT8kq6jI2eaQnnfXA`}Aa>PjN;&L1;l8o7d; zxJdHb)Tj|kb1A64>;Mu_tPDP(z2pFM<_Q+4pX@+ZFfOQH|M}x;msBiIub~#i8m3W`|nFkOJ!w^S*r!-9=p_ zu86bA!rh-feS$R*j>l=DVt={0;F=%Ffq1EP!84=X#iUxxSd;ctNB|vJ7ygM@ruSjZG{zLi=G~uC_|+$ zG(WpwHvXf;b^y{O$L`(Qc%wq=#MuRT2V_ua)iH0KufN+R*y%0NBha79H$50g!O`O@M-V`Jl=paNJ*Kg(*D zx8B{e-`$5^CwR?2=q8y6B5^kdL8K7!mNMv%09U7#fR5|#H93xa<(vZ~mtns1#Ac~y zIS|S^JFor;dr!Lt zjD7NCn38UP;u%zRwIvgGT4oT+WdCW79h*7(1O)|wz3pSC`wj%+;OqC_=gSnAAQNoy za`2f(RwaGhOI^JE_W}yko++1$O!;pAYU;6M^>^Me@)TlUkN-zIBweg>L=1#MLExW%1g36MXJ%LUl($2E&^Wqz>7G)|QYU4`2<~wrah_JAFp5Pcy zO?oOpCX?oSk2RCe?g3cfTG8J1=S6t&i(uM zLwJ-YU#_>(Kt>vrW!=bEc@1g|!4L_hTidp8-_FETS6e&q8zew=A*;AwrAkZF?M^O_;kWQm{(J@0ZG$yWpc&+Wr^(3^wcCiNXcufL z^l)h|19;B{)Vw_nRJp>Ix|aS3h$YDT7#fF_R{!h*&o3Te;1y_KL@>j}dK}I{d&c`^ z;%dKq`NC>(vg6cm2%zzl^Xr&g#x9uivcywm@^P=C$L|_R*|vVHJGOU(yZwUZAwm&y zxx0aluWm{3A0m9G{dIrOt;2GM2&J???!h0ThX`+IfB5XVb#e29EMDX(E;w-q>!08r zQHFbH7@H+ai>G%>bICaNMHtvy1Cbuca^&aFpYt;#`F0(WUc8(Q&YIE3 z?rAjUT!PI>o}8?zsTpZ{zDKb|V<{2XOoy(#+rXy++zgA4H*)`~cp!wfSgi~+i*I4v zC9D89K&b(FFy!@9VakkEhq7WH6I7WYp`oYtK<;}G94wL@ziqcQJF<9Y`u2WpAiY)s zw%lX9WOJLuSD#v8Gc&V2yLSh3%9UCy0kMTsnAsm^Tl(b-8K~{ECR4DR+cQj0Nh%5o zKB&3gnp`mVGcYTxwl*dI5I=vT23s*idEind(qS_Xd**1wN)8+0i{^!nJPZmVyGst< zHk%pkQV(BVS{So-|Kz)61O2v}5ngGXX_cspYzR)MiIs}9cR$HJLfg`QPZ)m^$<1ZJ zCHnjOzq1x+U32xXsbb?@N@;ro@s=rmGn51;%Fr02Z10F;5?wN)T-+_ zds0Faq`o=vBZ%>$Ue9~BM=I>JP?7`@f)ni-rI@JTns#R%5U*j3^zC1JZ-42zSx+AA z%5OU-bn+e}b4=IH!Lw7MhIxBYsBRz^Akf-%eLaDV98QzS987bROG-)cUGh)NQFEdu##Jxb zrtVy3ydy&cf;&0$Fy{KfQ)M6~y$7O16NOC)#Tz&jW@lua_uA)|cPib`X`nhOaH8by z=Bj;eBW7M+=;=U_9&7iycm+U3*KOPvmv}VZG51kztG@>BBfWU?;!vDnrE;%ic*hnQ z85!UlLha|3@*|xybxJ5a^H%yRE9l}Q6_M?*BcHlrmneh7d~t0J;i4M#ZNyM5bcF@5 ztl{?B*Pe`S0|Xn&2hx~#U#SZ-X>@w1#j>$_*KByE#NPaiihygrjm%B-N+QkQQ03Hv(0ZCkAOTJ$BQkO%e(&qn#k#Tv$kH zD6oG)PSh+`+;T`zhr^JV{D6i;qIJSlIB!}$zpUFVoma2C2MO0%GU*KuvUE^YyOj6M zTV10~+@OSP$jteo-@n<$sCh^`&atfF9y%rpg1uSBP41BXRK8$f+P|!Zo(G5J!f#=c zaC}JdxBu!BmFriY{l59K#NN}~;SGoW91zmDXleWXDC;i<#j5jJfla^9%@cLcQ0`jG zp!tTC6yiK`-!LI^JF=2|uC!+JIAe`yX`R91;8=OIxaEzeXJP=40f+zu_eS975>OM3 z0Lp<`hc$2f;ii&)cXhpTBZ6BtzWXt+V(dyHkt7Nshr<$hOSxEz>{33+J7kbwRfb|& zR~L{8%T<)H?wjf~E#RQ@o$X=-L`2@`z5NMd@YzQH9h?C{L1E2O;^c-bWDZIW%j5B> zBw%Bz?xB5}S3dH^Vsi0#MM~eE7?pG9IOJV3vWlXUaA~|SNemWADL7YedI?&2XkpqC z+&b;n_75$Y{%a1Rs8mmi#v7ha?V*z>4Rs{<{1L2Od=bGqEp_drez&AK#Kgq3xl@Dd zbq!ZFYvEI?#|1$Rji=wv2Eui)ZCBp$HQ_n}o@@ki<^9$@pYA0kB|&UVJ*}-Q5DZ{o z`_`>hNFL2TymqT9EqLSfcVD4;DE<2N0{p6}5=o{Czpt}R5<@&Mqykdq(1!+Qmm4>P ziXVrs4UvAK#~-D~x+P9s(wgu^a;*9&G1;1y8z+FaDw}V8XQ?GuDE>Kdb)HHMKHNMw zWCqW6gj~Dr?%GIkh3cm`-?qt#Z(Dm_jKhX(v40(iUW7mem`-rvC^34Sr0J)PzsjM$ zp=^q|R%Z?wE>_B+$bDL9*>}SRti%yyla6ysNl8KWkj?a%wNE#aL~<@;RGgekT1ra& zo~E8c$JHrF3QO=_ivA?#y+j3y04)T68;h*n7 zJZJQCc^MuBgkrJF#nq)D){0CP=2ndhw;u3pM8J(ywD#tec_}lQC8a0)qMZ7{7a1D* z4nxxRLOox75M~*lD|q~!8H#aDN-L{OtTnnAr;~Q>Ip8fQphT{I_$cZmyC#soVI-Q3 zBl&8ZGoVz@gKEii4J~y2f`<;JoC#zh^OoQbCcC|^jaN|LFxv4RXMFSS08T~Dw_6!> zQrf=RKO9C5*;L+hGl)|z$8Bl=ig)uDmu}q>Q>^+7_W92BMi+ z={Cv6A?Nu!XnH0D#hIiR;4pc!lU1TFpf}W@B&^OaFD=%=x&kf>$P1w8^@}nGv%ruM zfN)iyRi*}Xafn--Q&ruv*inS_$SLxbQ&?5=0|Nqx4Ib0Kc#RKANzDQmvIOF`^?Y@- zc++K&4?0Mj=l9Fg!gAbWG@<1Lh~5r`)0btp%3+9)8nN1!-(LF)>{-U`9Q!UdR@REk zmkDKi_%&6gNCU8f<3>Y~=$T2`{Ra0KDWglJ&4v5=`@5Vw>;Q}r+al1(h^W8k;Pn*; zzxqo@G*BQhDkyZ3miP`}OEQMq5W=s@puqK(scFwY>oyb&ygG+XQBEp(mzinx5z66P z8whvbTzTimazUXRDn?+M-g>xo{WTwz?Pcaw5e@6KMyl%SP-stA5Rf5b%$$A5rmAR( zJgP?X!-mxs78XTQwQ^7?C+f(!@14dT0+G}a=;3UK_AKkpe;~iy=q^%Vj=pU5?Zr3C z1Z?T(Pa8R`66!9(K$>Ojnc;5zF>C3w^4BSp%U!~HSW^O&D?4688X)EV9_3Z8|P zytOp`-D3=(?8CaVHzAwG1ET4#iloFn-Lx84d$a;O=UiJk<7BisrY0xnx-J;~W{*t{ zAR$`798lpNzH{Y}l7z46B)|Z|1a*M){4~bEOdt?4Gc!|SC(#5?eW9+6W=L9DKL%+9 zmYdqSb16<0Po8iT3FM4-xpV`X9|LNP=?nX@9sbFxp?nb1W%325Lk8+Sk-F0a;oFtR zABe~+&i%Lx@fMOmHG@1r7Iho)X^XtqdT&ooB{+q#i(NYn@`(DOAs&rJj+i<;V<}Qk zZG4x}xUe6i&&I-X83KPLw}R+=WSX_+0ZrYc&J}nt+@y>s#`X2**S_6RynDYIlHsdA zSzym;XxM>;DURJLW?)?SPSVOjw#~}^pLa1c^G=7PP1FXptwN($IFZ?WiNhge(eKe> z2a%B<-MTO>CMPEcYDE0L#XuK}b1Exm_5xJWotiXw&&8q|DMR665y-_-|NRL@nuA<0 zUK=Ds%gt9>GjSpdXi-|N0rzI3JUm}udt&AAH=J*WrZU&9Ts7bM%bU6?`qf)(-nRJRWs`@`Pm#YaE4V1BDF&Xbu|{NJ~mKOBOr%tbqU$u;oEPLDwH#=Zu!L zW2<46aXeJ~!h8*CoNdp$E!>|Bi6~4ZCQ%w}L4N-J9VGmnwI9$YtJ~h-=FJQ?fcylt zEa7=jU3A zm%eHKjcbrd%f_b0N$Sjf7xz8dbPbN`Sap67yZy!Duiqm4fbGm*IA;LFnS7TqO-)TN za)WYe9VOX1=G2t6p!a5*H8>a)5*=j- ze!7ar@}5p9sl`uzZ-~i4cLf@oaIS$juVdokq{kq$R!v4v4c3i8F%15tr%#{8NZQF- z)h9xwS*e>iO)_d;G&;0)wNP+I%#L*EE-ApGRx%HoJQ4onsHqQ?BXso4Y&KD@eeLX; z&ST#fq2^bY@&?j%Zq60`Pzo3y3s6QfJ@Sb~5RLO*0QQFS->);#6sd@L474a z0N^jKPg@``5Hz-~!jxT@$#NbXhxE-8d8(A&K)v=2O!7x>GznN^^F$vW=*rr;x&oE> z!E5^1`w&=Swo|wxnxCH8nimN7uItr!a_u?B0f_?|pC~~C?au|Q2~mVFk1?kFb*)cv zikT?lL@%t@FBG1QVF=hJ`YtS)fs1F3J7Na%Lhk}@=XnIQB<>kq90I$eV)~=mE*ImI zC)a;3_WOrczr@{CTvj2HkHVhTGP`)u)04VKOFkqjO2C*^ahYL7UW;>GTB-)zSNf3M z1#B`owr!8Y-vV8E%5PgzsB!?0mpA=-8f8ZAou0*$xZOcDhg*D#b{_4FPMcV0Nkl*k z*m0Mc~>=!^&AoJwVlc~F!+2nj|$ z@s$wxzl)&%15x_9Z*Z{6!4caRvEB;NU;e7j0wgD3(GG{a!U6x5Ec0(+6cm%NuYf%+a9jKQ+z&yBiwhiLHF(Cv{LQeBTNJt2D zU;^e;;nMsN&7+)%7t~3Uj??tod`APBYI?bVUSWKJ@4*qX8C*B#)Nu1?!0a#iKQ^+pi#62#2tW5JFlF|=0$1M-!OOmxj`-_V6;LEwl= zeE?7gJ@I9aL`Y<$?%Qi;*dDz;u_vn^Xu(+!h)DxB_(I8RUle0TIL#iJK-+$!~$av7HC$8h`0!L}T0oMrI+2{DZYMq~I32_g$B- zUOik4+ATvI@d#M3V@MT4$@P2}7~3cVzzHFa>OD{}1mJLE-wPKYjX-{`96g31)`P^* z*b5i|Ddvjk$({1xlsP%8pYQzIGYy3as9d_3+A|l)T1nGOmx#}}#4UoBrA5xFkv5Wc)PU-u7FnHod79-wo;uW@&y0a*lHSKBgU9{M#%a26lY$&Th{ZH0 zxQ?5|gocNoxnq=L%X+)9?w|NrWsvu+w)Q(w1q1~4gTbwF;~mOB-H?s`$i00=b5+JmCv7s% za2VrGwzODVTT3Y6ru#y*2pbKRQMnU^wRMWi1>iEZjd{~eVsrZ%z&xCWUzb~x>^(I# z)pm*@njiSP3p~iKDEajKKnTzkH@>}KHaynId^Ui0684=sas0r0H_bN5Im;IaBl(b4&FxC}Ijz6f@DoyBB)r`9?kjMUH%`MM5&Ix* z7g-LQO*SsY=_8ng#;dYSKK(h6Gks(?tMot+@Ul(!kxJ^QBXuJ$dft5C z&k>^hyGeH|dWk1d$7$kQ(-W1H%~J?rxpKi)nGh9L^)G9G@AA^>1Cq&-^j3qr>IC2f;El@h6cR9(fNmf05sy)U>ae{B3FYjWGpa)!;e$m!jw_Q02R9N3 z+&nx05~Zv)FI;GNbnp~7iQ2lE)3@G;aRa&qD4;K2zcy4uv<6pCf89abN+)9_npEL) z#g%Icd|z6ssy!AyRUN-i`v^4!8!maO1Pm?Ixn5l6r5{YIyK&Xs)G1bXWRnc}!X9LI zm_%h|Nl<=(8kUCIM^+~%+=LZPfyR;S+ZEDd~F8rg24qh-}4cfBXCh0kMQ??yMhAU;SK?!^Q{5e(aJlj0<*C8vk6$)KFR#gDIu@crk zE*8G^${I8jG67!jFY|MN!NKFi`}cT5W~4x>|I-ntM(_x zq?PBKReS-HnOcw(i|8) z#JFbB(l^O`=U@d~y8Vso0OWz24SBLOYA50$**?~x`LM5AIK^+29fz#wK z^xEsow>o-#0+liaW{xUUqzNC=&jO0p2C{DZ+Sg2Rv9>@~C~}l|GBPrP^>gQasDo$= zH7g1bHIiEucb2>o2*I#Cr`(fVy|yqkWElg(DMR^~*aNTY0m+2YqRqMb6fb99`^qTm z-G@^=TG-Uo1nz;Q{)_8YHcqlR6^u#H4Kjd}&+vgk_w`+0V!rhI+b8mFySv@8-au!9 z`eziU^2)-pVg*917V$X9W!wk1S896tl;{n&wYd%gT5qb1L$54cSXaU`l78({po&hF zJy{R1@Adhl^@{HghXoyl<{P%-HxTL#%1G+5s>kt7;FE=iHF_~e*fZCYa%|p85?SOC zpkBZlBDH4=b5>(6!hUscczWi>4(qehpCF4`Z_4xP+ou=Z!q3~uyp)8qK*Vq>)=y~Y zvskK?8v`Ia0a_mCj1-`u4e&Su;xLuaLdow7YxFSkyO-ktb%9j%B z4dy}EZGp`(kP!=}I^jcye2Y|wLD@jdOAJC215SQY1@g4j_%5Ea8ElzK;D3ciD<#Vh zgsy&1yB96%;zm2`Z;*)~kBD1TC(5X*s={1{(|h(na`l`zWBl12t+Qn|B}L&tH*=A5 zB8r$yBWLvemeGU;O?(q5s~>C9PsI%2SjB-$@?Ek6hA&nTYGdH76Tq+ms}ZVXBnZTI zw~RoP3p+St@LhYafY^3+Nh-Mi?1L6JkG}eq(^5n7d74+ z*yr{G42#@8uDmzxT=edJ`&@=w#VFhqM%$RPi8ay~;?8^uxQoE;^Y*rtxaJe-gI!gj zEgI9u51bj#>97Y8hKpHE(r*fkyRJ~$x5WU+*6c>+Qf3)T&fc(i3ROdcf5)h^A;I46 zsYLCt*gDQZkQN;C%+!Q+i|>dSj(zCe5#tTMx4Lpdq1Aw)OV=6QvQF-^OFKImAf4nb z&UED7_MFIjUPOOrjd|{cf1aPW9e~ky=Q>%&Bc`)k*8TRp9%1)|O7d4F^a&23~p zH>(F5Kj^Uasb;Zz} zT9l19sEHUAM1_Uz-LA?i=+CuLm-|lP!o}&EG!C`~j&~2-srDZv_Fg-L&OvVvHjIUlTz%DpD zi%Nci3J8SC5l8^>BJBXbt)m=klHMX{+d7 zW@fiHJny}imws_6Vvlqqi}txObT?UBtk^EmAk(n-E*9+}208-~N;q%iKkb?3_z6AV ze#a|uEvBf=GxUx3eBN7=X+))G^YSkBrLE%)#on*$yO(C(r^mYoB-lt$m$J7Y#iKs` zk{7NXI=+Yja(sNHQ~na${foQJPSS$fo#JIH>$wFSAI)ONI=9f%e6JW~uZetl*{3ey zt$^p!3L&b$1zlVpFiyEgqnxb7=BPF{C4Dd92LFkT@WzU= zvN5|=%jCn~0+^!l9e;tJt)c}dls!P_8v39>9sm5F*a#Tb{}~&Bh;;?EJQ-OG4cLFM z5t=%Fs<{jh=ii?SHsOsEg53Zv?OcOPD=pd=7*@e^*iVdX`nl*&-g#<2ps;@s?6!!C z;R=ZdY55$}>TjqI)Wn>J^^(!z&H&Y@uRbR8t82AcXRqNCUq$Kd50me_ic(p9$m4RB z8Kog_uW~o6;JK3-Wg%}bul+?Y$N$sy?!Da)R%TM{nar*-!mGz?2bWYmkajnFuy*oo z+M4QLCqNRmhX#sbuf@4dLL4%VDWI%UuS!kv?_L=rlvese!B7xx+p-0u-kyEWded>5 zjlaLHzZJn7yf*~oX~;$|AufQS;v5p%#E%_AT$$|bv5y|zgk&P=^sDJPWWXROW*$6H z{ok33acHN62F;-0;Iw=#n6R2d2wZP~%oG7*YWPRffvKkS`jGm!sp%64HLlYDs1+Lb zE45H@1Dc0c9z)rWpNG{s;zld$_kDy`A8U8$iU$jq#gw2Huot)Ulxc=*Zc$M@SQBCU zSqRsEd9IU=v}OX`-B<%I9lJ00cX`pou&!O6wVjX`dqys(Ml8ey&)&jwjPrx(mB+%iFK>NQjcrT&>+M6I6T?9Z4 z7`_gWH1ez9j-lJg@ak0#F0Lg=^mA;F+TAMDI>n15aR)7X=eI2DC|oo#%xWd@)2;NN55p;oYZK4?<>*U3_)x6e~; zPL|0l3XgB2P;I7f>nN`Ou$PR8lML}@MvLN?{;&Urni!PkQfNnL#dvuwAv^(?l^e(a z>Njs^Yl2oU0QNJa#zAnB*v3I~2oyx{zJ(_Pjo8#%)vDO8CLEo}CYo`ougvq-tqFzM zPN)7#<`_*iH8vUo29^~L$s|EBt#(KQasxDyMudfBPzb)(&wsDI{}WTOJbVGXj-!Q1 zx$HF%qUN!)U4Z=oTUkKzsn{&W|Mz742&TX*5ER3T0Xpf-wQ|U66-+l9crQ^bDL1z9 zIs&PRC|bZ3I@KwZiAu8meeC~d!Xh?wQ2dDS>LLJ2sLl)xcIacrCgF80p#uPlwzD2P z1OgBC%X-7}jn!~v7ie=35ec@)0l^CzWW0Bns>Ro3z4|kQ@;FM*XaDq+l$+vqAE%-t z!dEvY&wf5dMPn}=uWBeSf;E|715Wnh6e};k-SXEhe6H_*M;)=Ch-pn(KgTp$G_`0f zGW^i4MA9P!MIZy=sdmfSWW7W@))-Ws7R+htwf#@X$zmLf}e3k@jX#0l^#~~4@LC$G z7-Oj*m_yK5*=5=Aw4&cZwAuMrlc48dgRMnf{K&F$*EDt97aZ7QHt*oL`Q4R4zxa>+ z%m%zNPj&G|wi0=fgI2@A&<^N=Qu!difm0tWiR(vwmDMEoN$vr>rc^|U4~8$vQvsgKSk zpLZtKmpDl{mw{pq^i1Z@dgOH*ir`KLnFlZoC}>tWF$GuR7k`#cWGn2p-#J5|M>4UO&4MhM@d{KVNmw3Viork|rz7W46b6v0#k3GwT&i9 zmpNTJOI{C5zx{Bl;0@29&561*!$n1s)r#bX-AEn!`t>VpA2fGWYl5@i@JgcIV}aZ3Gpw1v!38W53f_;SsY0KAyxtWoLM_=X8?AMHc!q%RoqxSwYkMf`9Mh6S!- zb|1kGColC+J}DbFxh{9XowJ){_L&NV5v>`b-4|Q%Ttaps*#zFM5qa|FYLD06O*xYw z`@sv#xOVRb>)jsNf|s!F-7~ri!7q_@68d=nCJaRLyWv{%du$~X$Be56&!2;5Xq4}4 zm)Gae^sE7@-8L#PA@Of+klH21#ixKiK)j{!0xo!M#UTAnslu5CLH3(qJ?WHm4IU#G zngTfxL2Lf9TUyYw2l2zUK5?DmzP-OkD?wcxg1UJO-qOIg16+FGlYn|4z<5I#ysZJe zFe{rQ%@p4)Km&#$}XaI>b9fO~-f*9x7K=RaajvbU*GL z<)Z{LB=XX$ea|KS#u?d5Dwpm&5 zrmZaF_=i%zhStEQ)gI(s0;hi-CJ2n>Pv9%JuUPH6KBTIxmgHGD=q`YYYD;%u(-r8* z_1&@uV0u+C*D;U}LGdAIxOG{fkA&L``L5Cv_Fy7W4Syo%BH|@-EODhtIV>vb0}zO? zr6xpAS?}=Yk(Jd~OS$--OeU|kcvY@lk3a8v^1H{Z_Kin;5cy2-ZQ(p(D-{*Rb&nNI zgL(z10h|u@Ff82t`}=8buk~wE%;8~dH2^%n_ycl*DO&=}tgNiSmuDD1^-{=W44b;r zr@gfUH3bF7b~cIo4<000f;+DnUJXF&1v9rT#*|DXT%YoO{}d3Ey7 znkzXP<1G|;>Dli^&a?mQl$QBh#y_ieBaijM^TU^u{-kx2Lw1VcUxof4g`PoORV^hE17~o zfmGUkeSMI1vY=gfcs?f0dIwNdRLZsw5V~>;e=d(5de;%;*u8<0u#w)(Lyo-5Ou$BJsc;871jRp z)Rg!EzmIji$5TCmG4^vHa7uv>PK}R`FAX$UKxC6n=koS-wMdRx{Rv+@#Ot8mf~1k* zxr0~B=xi>~=n1<8jiX1^dCCX%BL8}Lnt;rpW0R9)0bRo2#*=j(7FcuRss9!5>MNkj z-4RH9k_GTLh}RAzW(f4ELycJLjnL%s_r0dTi_9i*hx&>}h|ghmG5f%`ho}DgH;4H6 z1|p5ULhL7H$UA%aUTe*UcW3<-K{v+T9K=CkFc7Gx4}OrFOT@sn!inb1gByfV%rxm} z^(^3uo~_a+KAIn9@ltl}P0(wI4v>-n+Yg{I*X8zoYc*Z{gTXpQp#7SL@+Bb1Cmo-d z!g>8Uya4=p&^5My&mJC}8C)0U<9>I-gw|TowdTtMJ_632Bn^1V6ljjQMVo`auK?ye z++PHR&ztdCl%amp@4^l7>Am^0v`n_u)g>fF&rw(S4N_^*CefgfrtvQADTTBYQ4(4ztG#7J5tYhp7)9znURR6v=X2lp z_xF3;zsKYLkE`o?&F47J<2;VzztrAcfBq8OPgZ1ZsxWM%auZ1_dX<1Ym*{^iH@k~9m14STUDAg9?f z7dHDY4TDfA+ASltO7<|>i|K^ZvoJJiPf#~WPXc$Xh$OnbaDgv335gUC~)l?VHlRKS#< zVxa$z9$$hJQWbDKDU2X5JbXy?a+1%L#2N9)SRX4EWz_Y9a5@8Dt+%eO4kX3zqqaKy z35lfRUG@+A8j%U9apB{72pWdHi^*9@lx3U54Y2`lg|sin&y1_itWY9&$|SbVUEyt6 z_c!T<4E}IYn=LAl351F5<0fyIMU394#{R=MM2ec2`b3F;&+6{`Q*<7(%!=!2;suv` zQq2nMM=b~|%w^hb@g)8dg+LL+u=vre!hs^toBZF#ME`}8`(Mlp774z)OH?&#zOe-6 zFwOF(2^Hr08SmTkY7;st5`zZ5!T&@>B`p{JCatWj3?MIQx>Q7sj6|X#N?7ZTdoX?I z15X~LDy=B)`UNg7F(HM{d=!gYFBAom!J9X4Naz@u70Fj2l&GLHyJoALI(hOIQp9!Z zg6-(nl9A#;dmmv#C-=YRAvqOEO3IO+o*r~P&ypM3`C%(AI2EXne&Oe%-u!cueKlEy zEM*yKtn}URb<+V`g<$0p^>!A6b?`9$`48y5BwnsPA+N-@kCNW1B+`-PD^|D+xdPwl ztVQAcyxlS#?8%P=Ur~`*(7h(yrL9>{Ye6@iup~!XPzta^A-2w*1k7b~tklk28+(uB z4>64~>ljm)PWnqAUCSmJ9mj*jX->SR_U7i~m!TtoL5jB%wEQy9N&LUf4^5VaF^Lcn zvU+nMH?p)2MDb_Oo<%mvV zIzdvzCrGaMv)RA@T))n8XG&dIFtRfm`{9&N9m zUxxu$7 zbTHk$P}TTKl7!8BpMtgDVjF~-{oz5BmOVar$Vw&t_6U$==f|L2W@&V2*xIi7cK&ue zD2203-a^EqH&O%E#@;5mR7KQG1%7W)QVieoX-1XWmfDiFYk3@5@UktAOG4D>)@`A> zo}SJta(}>@x1mwF=9uM5KC1RYx`GTp0Xh+N<4n4cgX){KlVa%TB-GR#m*}qLzogzm z{;288ZoP9x-UqXcmWFUaI-4#b5j*;{e@I>*3;iv8D<|f?OKBRrP)#Eyk+j)yjttDk$F4PHa zZXJ6)VSVTGQUzqt1_TI~TgC^vjkj-?b829%Px_!e(aw4zAQZgzOS~wL-n@A;g`B>~ z0DRnRu00v=@#2odK0{wmFQFyFwZ5StL#`*KXtUE&E59O&tTh9_Z&?!tJjrPt6`yXu z+%zNdxSl4kYe|1VE(r1|sw!rg`FQKj z#Zcbu&Nayd*kM+dmE|}w@&+XP121oXLNS(+fx#jL(l^9tr~sI#Jb@R+_Fio>;xSMi zR#zAV_Di%201aOcaQNk%oT4ejXn=|?WJAoV6PqA}QZ|4>OH0nixzgw9Co#3HRm8O}F8E)GT3<}XV(Enctk;EEM19^Ai=m6LDq z>!+9U;^L9+It1zW^K0alK3oAndCl#S7oxu8HL0elY+lVsypdoVsxwn& zEeeq2*TwQV?Ai1Di*(zP4Nh!qY==KQ6~{g-1G9F*SnFmO+2210h7B=>#EMPUkt;_i zFu1gPWl=~;Ol?pIeAmQjwWg-Fb_Tr6aNb_Y`Ab#_^|+WAoto%VNord>%U)`#B#U zGLB>l6Z4zm^*BmsGOAb$IJY?$FI=kfN8u&ARL6b=iO!$)c93%Ry?D;ZxqId41bBG} zgks(ZaOddB~H7Ua`yHeVoYsqjT8}DJ}>pY?8pq| zP_4^k2w>W((Hbro^}L6-Rzvi;{d3Pkn!^(CalYkEaC80SgSgUE121@d9~O8x@GS8c z_x!p@FrdG{RjmIwnYAafXkez^uOB$m&RsQ=gsUMzlpI+__8DyYkI2hi(mSun-f5sCJF7 zaa|A48XN6Y;ZM5xABVuP4u$Q28cOAaq@+rapQq~n_$&1Iz2~+2P+;fUI61Uw?ggpe zgF0ete0*|vsZS9iaTMIfLO(Fh-wMM}P0Ym)YN}A1ry5@9n12tfP3Jb8x%gq#^Sj4< z!Ec?gR;JsPzS0uq9Lu8^9f*;x0v`e4+hqddlxO1@jedjw8d7UV4l=z#{4{SRtbtykTVj8P);sB%* z{@`o6w4@1(iG6%s6$lHIMxOqD*bQche7Ad56roduop6faP!<1Xm$>yv+P*rdcFnQ; zl9IC;u#S@NicV04iAsYis%41dh#+N$N3D}p%J63mB{Ef-<-}sG9_*ru^>GH4U2nV( zOpGEFO?U+^K8gVXgXgY-*9nQ(9w)?o+$=2X6kdNUpQANsQU=z$;^jkvpdH0+^4XNl zc>Sha%l9tfhn6AuMiedLNnH-$GiH{~Dajw2rydHhKvxpnen zSeT>U$q%oBJPv%R7&mHojeZYSMcdEFfKd7wB@nN2j=lRm4>o^(aqG;Xt>*)4SNrN* zQMvlq>wZs|A58!gX?G2!Xbnh6`=VNjtF+G1S1mey|NWPz+2z}SSyM}kvw&0Q z3)9C{fioy0r`??)W>hIxf@GB>Cp)`55s*zr-e$e=@p{)y9)TZ%Z^ zo3TFz2{?f7v>Tk~xAy<7VDEnxAo^>0dfxDgPaW^JO{c<3nvGE4bbzu`dIW&anV z2PhDul2U=-Rz!BuVPUZ=g z`4b*PLxBD7YLO)?rLUqtvGC1DEW4^NboACoXw3&EulRI32q5h2J*JF4^NDhIx%YVjGB|^lew0Fo?|uFmy`m!uo}dIEl6fY~SbyLM0F;G`cDkO+OQ4GJlQL z;&C(zdWritz^RH*bJf&61VfiO zcD5_FcYop|%gVHvbtjL*;v(TJs4gG1Wirzg;I*DtG;N)en1UpW(DhglGF#UAjy37= zd*AMxxbZY>lr3m$C=s!(B;?SdFku)XC- z;CbkL!=Ppt`ch!IzNZBtWM7=wxjQ|Fce%Oc@sIHlV#k&BjR_LM!d(#XLM*vLL9wZD z;es$ca&@}QL6G>|k3WF^TZ=|yMqD*2!{9&s3(Bn5XlKMDW!nGfG&DDQsJzdox_FS^6~N$l^>G>WeB?w9!cv(wzz zfWMdNwxtc;tOzM{bouKk*t3VYv~a1K^@)?6rXeM*RIO?90Xa3h$d)c4C53bs6b?)o zM9#mD@?}ciuO?Df9U$K%H!+tW4}w^N>Oc~@K7hyG4#tdV2K8F440RQ8B`!;9IC+7<4-nL#OlFzBXrEN1XU}9w4b<;UA z?J-zK$VZCSWdjo^3!DkiYui78CB`zC(AjEfH4J6o;gP~k{R^u5xj*G);- zMzpY~X#9X5RMwi>+O$(W8NT@a!2ItkFN62vexG>m9G}_les@@Q{nk}hQgCo=s3q28 zrQ3Z3J+lA$n?2)mWrvt<@uQ!#wMVYa{Qw`HCq7&?>QFfMS#5O|Rt{0o4~GN`CMgNK zfI&O(SOZS~w$J=YG$~L#6wc$jpJOzWukPuK9pL~l9MWOtD~p;n0{&^pcjN4CkoM6U zIGsqMEl}}vm&--u#F14@Yov#2)2;UH6eiM`;0Iuhqbu70(I!zO^vVejFKi5D@L-%i zT2hJXip6UzLc5)DV`Bxy2xuta)3U0{~Uk zeZeOYl6)g<_6s6KBVIQ9F;-u?hhwZ&yYs|C;YBb&g}umsrv^-RcYDk)9=|VO$iG$A z{uP7Xb>`+Zc3BM^i#tM<3;fv#j{jxh5fdJUrDT{ycVH_gbyAlTUJe|-RNX85xG(NKwCp5G);Yi7OF z)XmVk zLVl}4`(XV{h}TqsKcSKWc(w~^o=@Mh?Uf*fB0>b<=qL~f!vM&ZpRRghdZ$JN5w?_) zl9IG^tQQUeCmPMWFiv>|tpM7TSvK&q7*jp6t!uGD<%d4xctq}i&0-UP%(I@k&F}<7aVMv+ zun?L=VAuI)J1qAGdVVVT5Y6twItopY`iphW#COnz0|i0@s%Kjsl9Y9P}8*N3nRl zUwSZA;&-ll%+(J?$9-3iGffS#6U*gKd8;Wub9L0`p`jS?r2ztq(%XFoK|w(RSlE6_ z2}^vz#zv?YggLPILTfIM_jc<-f=Whap|9+)5a7R^cXAHPf7=VK-^Vlm3|W9jn3%RL zawy%P8=WaGANqiuH;)oTtsTBWFMWo!okFc`l0WHtD8A!FC4k7kzaBUdFcp}1=?M@0 zJ|IZ^A#my4hp3jH+I#{40BScFG*~G%7=FRj+2Qd~QLOf%|^WE~a7zYLGatZR0u8op^(ZK@aBkNr%j?%C+u!?3c#4!G&N!p! z4^rn86}5^Jxb1jy1Cogpf38+(n>_ab!y~>i0HtXO1pa|}tbeR`GdZ9D1B}0zUhO-N zR35q#vAFGiJqDIKkVuJua}+!W_%o)&dR(6r&APw% zM;Z#^Mqi!$sz7M=*hN~anjZbJT#{4tJSDv9E*4db=tRM4dk?$;0(Yk!oJ{NKelkWj zmuCe(fOq8`=jK%bZ$qvSU5v96GijSaoEhlvuS?Y{M-H4QzhN%FbWpniWDI&eLSjF~ zM6+WnHj8WBCsqAy&~p?X_Li|lJ+1+dzs(EW3rASwgkIW$kzuQLJ3cuKkZPiS! z_QVE@iUZ*d^)|MO=DLoNkw<$5wHg0Su3uMrps^U&+%SikJJ@?+uOW~mZ<%M?(iK`= zx|VOe0x)NEYTd!5R?cWyT8w@gthz3GUI@z=mn>;+ZXP(ZcRt~^XQw#Uf_SiPFCfP# zAa_1F{MuelM8a*v6|8KYMBp}bAGcJuPJ%Sou0W_Y!d8^^;bNujbhyY>&7aOzrHF6f z2-334i<25#?2PF4A^+s z$eAkp#Q-8$Qu@bE%PH!`c}C|S5mwdkR`QQ|)$3p|wSz72-r`K4cBre`|GK2x)d~N;B+g>@rpCrptZG=Ln_h)f0EHDYR7lu%Y?2}PuiGIYRc1xYl7nx+ z7KO7g>bH7xCo_HGLO`wl$0l4u$gF*Gy;)>sT{o|wU* zJZh(?nLU600+o~xSm2Z7iFSd>UXBj|mekgY9w^R}xV84c&kcV*hlg94l@U9s9Fc{g zc=8@62x9B8j@wFX!-0=HE|6Y~39ES_khngjdmjnPtA7$NedF5^+h}GpkYo4flhFiD z_73IrWceejuekPU;6RC~0g-3ld^&08|~jOX}QmIDtPx*=rn3;6WM-#RY_W*ORs%OlCD9^_HBpP!8uCmLri z9_f$Owz}<}C+FbfPCW78k#hQ08_^j|nYKFw<$k3ZhP0T^d9IlcYiBDau9;=^ zZ!KIU+0{RuF`31mPpW$F;tUo%lW>5HEQm~3Lw!3RnfV`&oV;+=A9Fb9$sf~~EdP%U zj>7)WH~&98?f*|s^>ItQeTeFj^BfD91i82EW)9IgBEkQs|53>-DX}Q6Z$9E7X}o~- zazaM}U}G%DtHq4irO9dMkzJRB<0&t>fCGV{o(9W1E3;l@go)4_8` zh3h|XY%uge%>o84pxKcor~u|1+_T=jdpD*TOfip^OXlU~ra#$(br2Lex0ZPB`b4-w z_$wGAdYr}6qd7P1Wo`bB>e5EfExz4gg1uu5UM%T1VLLv{NpGc3xcqK&KCu9Aw<7Uu zoO%v=*BAk=|1PIDAHnw)it%i~wf$r=S>~@ooTy8=wp>Ko{hHi*E-2v} zQARF0@dJJ_@Ypd-k?d~9%wJ-*Yck)~OkRMj)1%RW z;0+4}7Oo4LtJSbroVHbvQ4)${ivZsB9{LxoD&Pi$YAmp8f;IgFwNF{a7TXilJkTS7 zh}B0lyuLwCGY`|7+iOHI<)Ub5CrCj`2>pun=(G{zd%-A4 z(yk?q?p$@WGb!-NPtVV9>is#2s1KCeoNs+tP+>H;dinDEa_vx?qf49pE>()zklFu@N{Dasu-zV~s9VJCTmG^G)uG6+B zCy{VmO3e8C>GEeOAIonMf8Zqjp;l-my#j^k>@Q9Wk3k+8!&kVR0DbHkbz-452<8q> zoQ<=BzF3p7@sWZ;iYYoVErLJtoO*)AB4Re0Lqx|x`XM`hEKNGRM0Irap4reXh$r|F z={Ohf9cSSj)b=;z%-u`+Q)^vAgK5C|YaP)DC2T$N?MN{R{P0^4tMnSiw8lYsyN_KPhF8C2r&gx#XbjJAVVK^%$CntyAf|3kD zU8F0pGc_?$zpDA2@>;)k9ABvnsIQW=^T%!iL#i$U_8;zVPkb%`S)qxYBAtOmgHm~i zQPvbebxi=mn@B;~>Q$uc|56H0ND2=eluqL_nc2_ko0=R0K4Rm%&_vcq`gP@LBM9n!vTmzm3W-_O{U~lF*%!w zc91W>fkuPWx)2&m~dys?KmJMR{%XaVSZ zkdVVQ1&GvK?UVMEEm4_YQGC(K8MAfT`DC?y7&aa3H36{xbAPfsRjlBmIeA;H8XvzzN-b1snJQ{-@xMu7n#H?AIJ$T$ZzrFg_m(W z+mXClNK`a)E8zWe`;VSF^|_#6g!=Yr=Hov|qmEIo9*sQHoqp$@pa|mURmjF8ULE%W ztu^&b9TZmm7E)NEdFB=*r2PE+5ggJXz-qjPJd&S=Wr>?>dBkKB{w~=zKTT{jdv?_C zpan@OkK^SB&Q)@&6zM-`2a*L)+48y>JA&$Y`0(xgmCN-=4Au3#jp*X?9cZ$cu&{|< zcnEy?&U6&(tw!tejJqG&j(@$xbwCe3k$D&hVunAee2$fS4d~q-Ha*->Y;{KBYHs=a zB1)f}{MHM>+`kzoSp1^-P5CVyP`Lq3u()I&&|k=w(I=BOuP6-Z1KNX51BDY778(SOlLWiUxZPs2F*or|G*a&&Z{=COM=dIVc8ZQ^ zT^MT~dv>&s`kiVZhrQnWDf@?_v1|&(<%NH}9}T#r_*~||E)x@$s@6654Z{?jftt*Y zSaO>Fp(=cjn_sA7WlSch3KWlH((4-3qNbNl(KTs=@E#yjpdlOKlj5WgKfz6jh5BqX zYd7&vnhBQe^p2-@!bjrJYxUS>7fb5%oBBC8s-xFRwXf_VE791=N>6b0%F{)Q$Pb2H z7g|2c0^R9I*}L4vgrBqrgj}zqsTL^9(3#x_oE-l*X{O}&Xr|~WRyaBSuNA)3mQkpN zRG&EW5o}A4et(0b2wDNyN1mHhgSP`hS}N~4cw$UcLj3(F`n zwgv^jj64cnjs^w>D1f7S`rpf2Wa_6B!<9%I-p3HdA~VzyWn@YW$duBfc$rE2Sb2KH zN3g`(j!LPhOarPlVQQTZnlo)uz$pj14QrI`pF*Cb_JH{%zb)fsrKP2FZyp1^cX-FDO)wH#=;&_GC zz2L>=h8==LDkWQj0hCTKuFrz%=r>A23kA504l|_3L{SiCSK4wT#wWQjpI$Di~D8+*BIx)QT ztC^LRUN~$@c~u`)Z^YkdeHai88{TQ$Dfc?`FEmOE;gs)A{(y|LZvm3T~+DBkh z7#Rz_I09^GojetswzkLNmjkFbvX+^rDsFc{RN zV3Jw_)#q_p{Aru&@2{98<9tiq@gn7)zr!#^^#VK#-=sNHIsAZ?`ZwweUlF@qnrKR|`IueYg(A z1Zn^n6hzf$oAS2=TeIBuR}|NV<&wUxBK?depjZ%3;{==PB9&A4|BlLec;bJB%E=%# zPvtxd7tz({8BUxyQAj%qhB0=&I7VV?LA;T8>?Js%DC!Vih+$^u-kVyP`kc}~C@Az% z7P;m&JPG#1!zaSu^Zh4`ygFY58jizg5wOkckXZ8H*F-E+OW%4Q80!0r*!h-#um2`?s?GaV zf!JB<{BL5XP6#2rLs9ZeOjP0|p~wJbM>C8lSiy?nO82e~q3k?;F&k|8=GECer7Kv! zr%=wlwzg<~y0Q|OBM*)xwq$&Tx&C?uClb@M(;Frqrqd92bjD>{B^&`&nrL4U(xOgIS;c{ zo`*W$mNVP(VY8L$8Z>B0skcHPuZ@_;{JM2sceYr%K}lv-7C+>5N=j+_+1S}(?L}1g zP^a8Od#G!dlhtvt`f_18WIyc^F#!E_-5d%3(KX? zFiD0Bnij&zew$9(GV0GyuCl-MdA94zlu+rHh0!HWVt-b9IKZYh3VkUtY05fJ_gj}| zXJy4~&LtRJ{jC?{rGdkl>G$f2*IZ?krp?EC?NuW6Ac!CVo3~qZG2s z$yn-*l-Ly1R3C){imgunq)NbE*Rnm>mni5(dYVLvV@5U@i3-?f@;} z2q&3t#_mh^V14ZcQQO^I@cauP@!-+3*GJmSdTx@iY;}4UiF}u#Kgq-c)kN7e5oW|N)T9(%hnP=+TJI6=2_fbNF^e|!tPalC6~na zd!^6{Z)D_Pjt*NFk&I|)Jj8B1nL*}?_Z3V@%|Y!0S(k9!a{k(wm>9yUhXBuJ1B-B| z&vWY58?8YN9{wqn%gA2fh-#w^q6Jd2G8fR=7LRZIBv~5~E{2bdd6ivq_A=1Z%NXEC zj$IJUqn?gNd}{A%T`nAq#2pXhl+;+zCPgAM&Qm9hcZSk5y+PK5mxO!&j;%-uyzRp*fsN8SzLrR2a5gJ~7?&KWwgtzy1> z<1CdXjl>9Fu9s4y$rbku!^x${d$Lfr(fazbMOU8gE`VSlb zMDYlos}9ti0a+qgqcY|*Ttz)Jo`Ue^xw`)9VYC2 zcRIEl0)^W>IR?*kBl+Qz%!W&bt_To`j9=ZVES<;?+V#TskYtNn6s}#jZa17)6B)~% zr=pqootMQ%{0E9;8lDQ-0K$;?)Dit&Bq0}~j>#VYNd;z#?ygBs9K+hdpU<;DOyY6< z+<~pvhOr8N2>YBM+$=-*dsH0pc9QlzHasmaC+CAE*zS+B=d!}xDUSU>eO1tU2@JOU zApN!4shN#FGG%*7Y1lJRDTO}_LwFC03_`0*0z{duYT_*;aXc(Y5W4lB7L;(hY;g6~ zP*J#y?1l~BU`{V7A>o|L9h3RH-AR$t<`!u!*u@_mE=SQr(F%mA^%g^2yJXwq9|ieu zu1ojInBK^01@N6H@YM6+2}f8e{#cadj&ERi^yNYlmdcM_jie zmNGT@tW+2~9dX`@JeBj*@r%m>DXMw?xMpH;TiZJ-v1aZ>ht+pFh6dG^`?c-RpUq6M zYErEE@>YHJ7VOy*VwFL(0s?cx2lxS^013NFgJE=50fqwFVkJfa&?PY;A$uoeT*xRu zveBO0zSDztLbA+2F!O5C?E2eKhk$K=cgZOBU!n6CE?hWwu3-!)AiOJ%Cbcv)@M@Z7 zR8&+TxI*wJW@kiJrM37x4hR3Y%=KdGPE3pkOI|2knnIinnx}xEAR4>M3kqs?(T&(G zK6QKclDcU`^SXf$_4BJFZT;{NbI`ifOVct!J-i!UWJJ65gakPtbpi4tTmSqevQhYA z!#NiO2ZhVHW57O+TlMUquR}bVkp6N6(f&;e#3K-dCNxscXs%|qT6mm?96>|X)X*S$ z0eH2*gAX}3=ZY1Np%eP4FW;G9X<_jkIU+dx4VdjOu^Ke54|<&uMJqlxFaFGCm+|NJ zsWZ-v^Z%01s-IXKRUZk&fjruGG=2UJUXUsa6Hi>_3Azw zrP&kLJ*JviI=v@oZO#1Ph}C{JK|ywf?u&EA>>-rOYS_t)HT~pQ93lBTQREBZ5cuf+Pc3A(c%|d^>5(jw&K;WI$$(lsjJJ6V z`|mGewG*U|J+93bDVd#_nMt&q^dcj>x_GHGZ}To+$cX&}v~qa!=#wnb2nY!fG%d!B;xoCo_wr?ZnLe^irC7F%CPHbA)%!Eqmj^JnOW-&~Xr-KUqPx@cGRb(J`4U6l$@{ z`(0N~U)PzxLH*x!3ae@hRtl>}J$?IZ@#`)4y5&S3uRvWp`$Pfnp}93vY>cnpRYGMa z|Dm-&VSZKR>c?b$88Op*KexhJzR|9`Au>`*n2i8P8Lyi8RYlm(R5k#eFBsKc_~MRI zo?CUEz*WE|B`V+B@u&?8a3@JKVeWBjutFw3=X_Eex^SMA#CgIHSC&=#Umw3hI9dWPTDR^E*mfO}b?J=VFz`MX|JC#ef4#oy~y*^F(E0W=F!#*aJ`;GAj>4FA)KwkHw;#?yeCEG7}a zXCvzF=Sx16c<|VOEuOXql$WirlMbe&wOIP{8)PGYFW(IBuDq^+Q?0%4n4#zb6Q8g)!_>HAgSQ%Im@>ef$ zZ+0gD%wq27#n->*b5yQ;lJ%)2`}{E}I*kyf<&6gZ{$Q7oLwSRm{MizPd*8HI8x^~| z)OIYF3doutmUl$H^3{(LwSaGVO^C-lNU~e{=34;{s^Y`Ow_?XeMxCf z4c=-g7#xS-&n{q#4u6LX9(SU*79pMKTCZQ1y8Q+O!ErZ_K_4k1CYF%)DS}JB#m0v7 zo9$Cky@$26@gdm_D}q{Fk-Wu?%kdohC|$ZbZmFT4m0_d88smV{f@?%;r^(7I#~Hr z#vz)>q1Up_*+KS5zL6VNE)YNtTMw}jNTKh}omzL3k7~vTzaaecMKjaL-aqCic)$nc zp3Lm+A z^>NFEzIxYf;X5D4GIAf(46xiiBXH<>VNG6Pzk3A`+WaiVIA!9V<1PeUBF{R*Ysn>E zmL}k!j)3e+TlCWX;aR^vz!K_U%IB%v2)6;7vuSzl(Q&f*!vem9!f1wY`Y5B!D0J}s z*&re}=|ggVi3}g6P;gzbP@%Fe#(WRxyY3a*ADTGv3!O`&RS)C z@qxPCZ~SLWa_bNlJj*o+jbw_2XFlv=bPqXotz0)~y1VRFg~_k=h+Tbn^m<};CO5Ek zvTLs1C=orRDW-eZ)4`Q9i77FC9eglsk5Af-<++nrM zarwAb+l%v;o3)ix*|7le?fg9o5@^GfJu?YJ13a$e?K{mYc@cEWnY*vk3I(&u z0NNj&`>I#uoUG=de%$1FnrTr78DBcm^7+?M9(ZO$Jt+^s?;!Ve9S7wzZhU33R&<6Dahv>u&3*_?I$K>jwZHNv@$YzEvd?;T{reM* zdpaTy@+vS;)|~e88S+VrphmWF^FDE_a;v|mr}#mv55B$ek;^JWwx^B{<_-%v*s!Z4 zXR^x^D1)WH!Tu6>G1)9R#Q|Kh1{@vJt^xDj!RWBT}PSfEa^uZo*Vx0qw&lKd3C^$CJ&{m_cgjq zuKI3hJvB0^>SKz?d#dZjN-mmH`mQ>k*cN8C|FHJ4jD7yMbK1^*qS zn5l+jGqk~PHoU*|IE0uJp*%-A`%;l>qS=yDh&|=se@f^(h5+Vh7hE)F01~rms!4_X zef`%*sqazZc~o#I5|e~Rb!=#r-7=-MQWZ7jmu?axbc8SZ|KOKwX97eZm8_GJUUG*^G^_2{6JES~g>*++6aX4RUoSlYC0xG6&iJ}kNm_bafFHm!&6U9HA zO_mw_eTehG{vlCHYo^**NmSrq7DPf1*Pd0gKO3?z2uBrJzY;V&c$ucC_35XANrg>- zL~akUrwHE7Fuy(a)>fglsU}>ahAYErlHaMpoq!~qTRWj<&!N3=?Gx+>@3@6`4Q`cbaggxgDBygglif2C9!X63KTUIo$0&@qJ$dCjhY zp*#o1P=YFQYl8SRC#rv2Tfj-av7C0F9$UN7&)T!+9$vG7Dg*_d2zIfqOqEbDUOMCS)`7~} zEd62mP{-Vpa?aryW@Ea_w7C$LQ-q;Ia;@k=L-Nz?i-(a@g>_W#Z{c53z}g{=o#q{; zdd6C!abWR^nxj#sDL?pzLN%sZ8?w6Q^P7{abQJGxpq6OovY8(=O`{H|1VG~SAewhX z81Xk4A`F3fa-sVel+s1Z=3hrjUo8~Aw1X~hKN>*gLAVm?XLDMc^^)5xUKF<~;Z`>_ zL2l=b1@~y|pk$)+LBn8LAI;?X(d<1n@nT=WXdkVAGhn1*?ZQ?6af}xD?ZB7h2JEh0 zdXz`DF0n=dJy$v8iJz~bTg?AkrFJ==QcT3yKd;vDG>6#q;X;A1!)y&#?oGWh5+GE^!$!wD{xId)%5{Oc|44zY-0hqjXVse& z!@n=QD;m{T4<7`E4JlGHf2u3lXRL4YY3`t-Pj=eDu*U{LKg_FzB_(H2w7z)sy|C|K z_UcdKZB_;r9S{7Ds>BT5J=*^KT+JS>baLqrLN78G#$W3cVLEVn&Cy)o)No*dc~Ba6 zEs^oQyd!x{8&Dkz!Mz=>fQFeF8LvV`gBcI?wP4RCs>B13YPq@!UE60-Zx{G`16p+4 z(z?}o?M66C7);E}m$PH~r`!?VX6Ekn>H7}3=|$yZNiuG2Zrpnn;B*(aP5ZFKv#9dv zx?HaQ_eFcWe#NCm9nm^Qb#>I=FbwX@6_f8$t#is(_&%Ei*@@^cCgaAUcQkRA{V~8U zzmaU3EPd%%NC9#(E15eJbPt&lYb$aZDD(LAjt2{Q1MWbN!zAXfM05%Hq0KM7eg{u_ zkMt9RM2FL_UtV9##-`*(b3^VH;SFBjTu#46sobP0%G|0G@Z{_APMkTVD= zCJLJI%Qhc+FNePCNi)@y%u3in69S=(K--YqNG(9`r{4A=*Fj}j@V9QuR%2C=w~Wor zwE~geHt2Pjpsp#sHddAHx;R~V$krMeABR3~U(8oSf&)rj(vkYRGDZ9qcpp|HgPia-tRT-zRPI@nA0atf*C z&?Do2i!6r^nU=ATqt{&4R}3GN?ZtasKOwu$A3-7d3RIr_$BGQC=(ZQw+I+5t9~rpC z7Si9`ce`cJ3N7hp<(=gV1B58OJ2VcKDR}yxzs%_K;o6-zAJ!#PhkhcZijIs__Uiv< z)`IBGqYa5BS_zG!L@(^$M9b@k2pqdmJE%YYORw|zVFly+-pOhDj}Wto+mt1k=VOBA zh!p4Yi|c*9oY+X#7qvywkEqXYHI)9fPyN`!`ZpelMN~@xZ}i0>*d>)T?c|E|!-Wjp ze|Fdlo<#arT%B(f7uS4?%uzmnpogpNcmoQ-^((!Ph+SWu?394HeD}oqTS zW_-Tc7`*(#W@*&G%j9MzYE@-qWq}S&0{WUchLo77t?O>MFjoAQ=3$SCD|tIA|L;|M z?!*oxmRn~iFRa0nHPPLSt8F+web7D^-}wSbBM1X(J!T(tqc(c{o&Z-PZ&%;Xnl)jI zGE=QPvT{V8n((S{cPJsJ?B)?@peZerc9Z*3Q@fBtiTEV@lAkVtVNu#t}t(bu?*z}(`hN@qmH~;mTL@qzs1Jo^#Sv7uiHNos9$=H~RjzN>YBPCWp5l{0PX7=uryM3BuU+`H(4(=0CxM=tI+7M(*)fWiK zP1v!3Ys8a)$qEo1Rf84f#l=2|sq)$Oy@8p+{^y04elniur+B}w)_Aa7sf>6gM@rO+ z1Hf-{e|G(RT3^v|f%huq+(K_k^;<4~FBWlc&p6r7%6n~xuodu==eEg@!#`9tX11je z6?{30ue|E}`mtQr5A_oq)^Vl%VuYR$ua3>3_Ng)AMBJK&?FdcyVw@)xkAt@_by;XHb4!q-v{y zZiT>*?L?Z-`WdSCURn$$5EDW3PdGF1Z0R+7<<}R9h;%$zPqrkdnzC#$j8q87&&~Bi zSM$krRF!?$cYH#d>;#n&82S!HF5KF?iC7DC&jEX5BA7~L&*3`K) zh@c)%O#q(Tc0nnHN2iw&t`F>=zMMZs_KY=mly_mNNO`BtD=*Lf#bPoI%8aFz62K2{N9FI4>#GHgClF!W0&%=bil%x4HB-T25K!8si-_d0$bptLg`)@ji$nLVD&|;xhdbVC zqfY1#E($n`iodKJj0q_rfa?Pox=;p+1}A zZt{JjG)le5b=4Pg-62R@>lU=U-N;H0b7{IYT=IX>xRdNDuWkoS>&D8q9l`7rmhykO zL{V`GwW{KD5MGpPzEl6}{*wWWhgAt@0(C(I&fQgkf$;AIA7)rGTln;+3>p6o_A4Dv zpak*ajjt2Yn$X3PaS6qell`Z|J6#o*M&62;O08%+aPLDc!YcTp{}_Jkf^1_-g}OMo z!B|(M^$X>W2uCi37gVY{va}w1y5{cyPin=A%xU=}4J^~dZ`I#x$Mqlvh3oHeFzy2` zGKg#y$Wvl<6Ik9q#1Och^s|Buq`m?=16B2AWyl2XGds}#X3Gl5`nFbhTlp6#jW1hG zEj8Xih+w9SVqX|u);M+z^=|NzP`dtpVC?H1hy;Kw%>FPUpc^jgJu-F`ekLipE@<7_ zhCr&F(F9p87VbMRmvJH&*>-EhHj5gEq(k-U@MGb2+YjKB1Dyc8){7F-)>}GJuxA(o ztei=5|IOxVH9ny8~ zyas0x<0EDD``(`D&mDMRv89UoJP#q0`+lcN*!y`ZyM(TR0l%gQqnXWK^%P12jS%i{ zy^?daN`mv&RF^&wH7F>{bm?_SOEl<)M-c!HG=mXef;_!~{eW6IAtA14&nfE+sh~H| z7!#_6BSsIJyZcZ+fu(`fi^2Z>5dl`?Hzcx>eEL>{iz?@{!Oql6E*T3rUROKToiHd- zs-eHK%9vEgyS9>xtFiw%HnTVJoTo^)jzrEC{*zEQl|AyBn^=y9!^&TxrUkJuFzmN;caqhya*x3b>(>CsZ0S1FbA~rUZ(5o{Lnuv zEG!tsJ3cEf2$K}b$hY4{!Sg%v1~XMG6;a#@8QI|x%`5Ys`7qtR_1x^-`WAVf$>Sc& zeu@uG>6n10Q`MMSv2;hV=iKFT>xOb?H#DSTfCE^yDBE^X<>K-0ocb=#&T;ok?jtL4 z41-b1au9Ek#2DyBgz<8mhS$VLla(Qw57$ax)&bgm>;!JjISnTO|(b2GU?E{YKb8L&R|DHmEcSWhi8(2+5F)Wu}lMB@rrQO39SDk|`oXgE69# zP!b{)BFbE(B2%a&^US&S*VyxWpZC1)Ip=f!IG^+U^LxVHd+oK?y4O8i*LA0#q;PU#xBP)P?v&f$Z|Smx zvr|*UDIH$H!Q!XhyxT71CCIwd@KOb3nWp9)3H8Q%0VywRf+^Qmzf!hQ&t^Jc#$%@X zn}WN?VZ$4i%B}vlzpTmM{-&;G=+(}map~_RkH6RNjDGs)$c5ymL#{GY9cd zIm9JuMjSsBIj;6#)6MwFv*kHG zJ{&~RGl_n`)kBy0A@Q$oQ%GYw+G%YvI<=A0OZ`}9FAPNI!y|6Vwl@PL0H(ek9$t&B z4kVt_czH_ulfcoDrwqy3NgpxAU=+F9ggj_Z30&-@+_5q~vC_D(Crxt~#%keyul_SB!esl!b*#%_O1w^TIraU(G60x5m9OcC*=8ZfVoSzz_RxU?&_ETR zC?gt7z637dHb10<_2PYvY(Hr0_%wGU-n~KiX~!#H$hpl!9D&A8w&EJw?O;xMZ-RQ;$$%JzG zs{1>;d>Y--uUy$1FD(uR*52{$HrVFq0D2sTzVlu99~r z6C#oFgj^z_ye5`mUK=oJf7Q*+Qf3{snd0{5g)bLWS9ErKVl!PIOVAs#t$#f=)}^4) z%~og~_1l&DjHPN^Z^)mnw4^T_Q_YI8dpJd(z5!thmH3M>%s?nNqZ(aUMu$d znCFi56*qB3Z+GI@zVM(10R}u zp(x6jx>%p{~=J8SZ z=P|1R*$$NK_0DiJ75saW*j)<7grx6JFYgX^ch&@2edQPrWK_(~pZcpAd97 zx`Z^PJNJf}Z`pwxtWm!K1@Q3jM3?)_3ZFZ5yGj?WX$X6X)-z;(-UqjB^4-VzhWz4s zXr_u-eK~xg?5QR;s+x*QthqqfkLXvQsfmxc=tZSx{K?K-JpgpgowK~yw0s0#4_^X4 z3h)KW@+|rR;QorlRw{G!-C3&+aA8siX z*>F?UpDKY199q^186RquxQ_cT(AWb+-)-Hk(f2WLF0^ z-v|i-2bJ3~?~f6WpyG{N*8<*~O`*Yq6U}dUuu$c5KAtE1HL(TMdY=B zio*2`&#Voe$ovpFf7AjURTnA3eJ?nR?!IxKX6FCNdZ5mIxrxH(DYqK)9~q=c_Xl>Q z+b9bts*+!4r&sLjOUUBJYbG?@q?#G6J8Q$;*voW--S5bWCH?wR^RZ2rnFnrV^JU-* zEYir?8<(ml7jz$6a=RhC{i_FtHB+Xi$#ZF0l+<%{P6O@oBn|lBI+fsj zd8|T9RpS@RK_Ct2j5zLmvvB#R(lAj~ECHev>H?A6@!O7NA8cqfLh#~*_;Lz9-^6%d zdGC zFTJNV3OY!<=Jd!EpZe>L9sx$EXP7%Sc^(7&K^$VI7kz{mZW~WRm=g`CgTlAf`BCpW z^^TsMyME!`mpucJwzch9Zh=E*UL#jUC_s)#wEnf($sYkz3Rg>C|NggYNY9|DuF@oz z_FqFGtHj?3?by*OvZ`z=A73Qkf8#C>-YtA3@;m4f-NGGc)raGM`Q!&1?4lVPE62?0 zjYvNi4ObmKJmXMuB;vJ^qj&$BKr*VxlDA3gBL)FCA#)A?>x+=z0ZytksoDkZA#swv zJ(^#O{7i+CM;F5=`JN4#fg2K#g?f9`QYTbecam~jbOm?6=5g#6S<1^w%;=@krbc(u ziG1<*%Hi7;#fMvFuZ*uK!m|7d;60JW(n>6YpODTE7zQ^4l#vqy9P1V&1yae$v{&nxYI^s$Lzm~p#4|<3&6&3b~(^p-3Ayz7_xvk^BF)MUK%1aUm@9fU0Lrzh>+RO|&- zqo*8)_TDOLoTya3u56s!J~W(V>=7GCGQgh27R>9=`0CTiaHPf6oJUaI->-SQsC}Zc?{}Q*!=drX--nNP#1S_A63dRyO5$4+&YBsZ%j@JV z^G7{dU!R+{KCRV{i_$;ZgoQUaY~~yEDSj;D{Le$Wf?C~4q7YG=Y#)`Lpj;G)Sm5T6MU_h& zYyaILgTV z&|sgKXmoL31WbFg&?*1AEUW;cc->Yio zDmnh{UB~k8U#+rrK#Bx3u>atfk2uSvXu3C5UiCf0EaMsxa1_MPz4kGL;M-p#={Z-l zM2>kGVJpd~J`?#|rU}hd7(+SM%qA}eNLs1*de%AO+(ThH35(_I*&Te{z>*${`sIl% zZxsc!7C9LMVwPCO~REAz%a1ET2pj|Y&B^CV-S-qDGX?}uR!XRp~ z#d=x(+(&83%R|W^pYY^gBeXl3ZrTHx#^MX^f+(9S&b(95Wf#3PBLzJ@#z-pE7zZ#z zcdn~CuG_xcEm}68v|>gPHW7`G2*U#;Q@DTuzgvM`GKigW&V4cG%u>Z`4 zF`K%-4d_)UZ-c(syJ^`K^c}k~y2b4}kI-L67%Bzlp{?J$W7(AxCr@I($^OH99)pqiaVl@?5oeDvYPLE{HUHW3am+EE`$Zvc)@#{A>9as3nfX*O&C@dsTV zE>2zn7=72s=l#GL9f1T)m@=QBuqY?zcNz_{JtLmmlPay_kAY^ev;AV-~R=mEx zGmAzljoJCnE3_kNYyX-DLdo#3Ly&UN4fI%tOb_Cw(&x8K=0>o1jo{NivSq=Xdpj2U z#wlm-w(&=r2E?1{bB;QFHBsWz1D*aeO(P>VrtOgIzOi|0VH&a|D9DA`AGkAE^J#iG z$Gz`grt?K>MwP&zvd2HQJ8Dy+Up1~Ux-WHpdX|>Z8=Y>td?z@VTHIoT4XPu{n3NAq zL~h*Eb^pl%r(OPhSG{(>4ceiFAt1f?_tjf^_}8lJ-TS2VAD7uelA?5qT@0-^0g zClkf}7k*eEW`Se1y`bjx>SM4X{mRMz{DoPV%z@cOI{l~rmwxhxuP(ZzS$E++STaoe zOXH*s-@+BS*m?C};^WLP!uy&Hh!(jn8_+tUSGv$MSd`~EdVkxalM8*D-7K%h4${^V zKKxvy#B%K$8h)D-F7VvOpj7@N9a{Kw;eI7MPS*L4M<`zV$De&$*7A=()8GD&Kl{@) zZGCP4`3k4_Gmv>4Vh{3Rk~G_2FB4@;Gh(`jlsg z_8qX&=etUgI4jnC_z()nqw6g!`HdvauYpT>Czq}`6kncis__q z*r|#aYs~(hmEC)4+&E(Ujg_n+_7%^HzdAH^Qu!|{{mm0R!j(mxe!;6 zCD$z%NHK8Qclpv0SzCaI3&v`f&&9JZmo?-~iJ>7>w8%CGGFu13rMxvQN)H z;>!UTF(hO|_F&bc{Wm!^BJWq3?Ylaio(M%db7p3Nk6DKu;Kv{c0?aAg{&LBK$(oxy zk#A~{za+9J=*t&7z(sMnV@dKK6v<{2B=;WvSa>(GV02Vbl0rfv;j$8M&g^l&#Nfg3 zg1hZzA?oFvN{PM$JOKCyQe1A0d>PIoeLg`oQBZ{BVo@rqsH|if^QLKl=A`(h9m9i5 z4)!d3c}2Lvg>1}7xAr`vi^(~?a1`4Mj;`h4<9k2+dG&t%r-@|;JB1TSzSCj{P!-bu z?90tv9@&HdcX(=+LL^oG+(xjL;2Qa3h{+=XY`2THtB69=8PPrs6t z;g8U@h8kT1$7gQ;x`vnGbyWS&SJQKxdHKGM=q0JAwHK$q@0rzFDHh}xPN%Gx9^^Or zz0!BdUWd+oJ5S?nBO%n}I`gv@kk+cD%Az;_-Nf$yh(55Ssbp>jUa4qL{e2BZQgdR+ z?#vx?>*~y58kxVJlmB3ND<|cn`I(8(3Dwx@apvUQ1b)kT;HY2CK*+2RB za~{Qv|9ulZLl8UqOZdT`N^w_ScrP?|H$fZ+MhZPTA{4a*rFeOtn19xGDx%t^xe)@& z*OVz2j4D1b&RyQ~`21hecZ;zF#R$&7npd9KDrJrvbL(1+(1c#Q<}T*7e*Px6?-gqPQRiUV4pCBtal>@fNzp-wEx_>2{70hD_K9Pm&H|&u z=H=n;JN^B(1%lfaJjW0OfAppezims?w}A)(YMf$)f~>52oO9@? zSGIMqU*dl%?^L9TsF;g_JMPY%O~R(`@=Hd^S>IW(EiKRtq~IoE$?-|{m_W*Uf9|T| zj3Ov3xJ%0x3k54bu_4={#LOy5rq0n!ei{+>`blaZG0z?SRIjhtvr#GCTZRAUD~)dR z2dPHMn-n?D;!K_XhoLvMN{U4R)dO;uOMgya=X|?!{ zMuNTqVG~JvW>NIMTwfw<`HeO~|I;Grn@%*Oz=_zoi=B52@Q`9JV!JLKKJi1lfbi{$ zl-<1dIh|}N5Aj%nG0Cf|2TYcK`eE>i6n>Fy$0)X=JxN$Z#6!*l7fvE+g2n_DPs8n& zZC{Q722C4L>aF^)p>sxb+4e>FUD}Da)J}W+76eK_l!x1sewhk;xkWlARR7b4BIJb- zz#VB=`H~(ZD1ooqDWYcbY1%{vu(d@l^54VE`Vjh06^3!%gdA(fZfdlcCuUO()=+`y z+2L+~DuxFWy%VX|;IJ^SnSaW`?)8_vkr3LifKSSbXsF?)2ahZ>>l&i!rT+<1y}dMx zPT?C`sqWp2z4wWzM?nQ~*qeX)BYDRSkN<>T4F~muC3vY=Vynq&;{vXn7H@f3n%F=} zit$3t-E4c-VsQu%+lqPvX-yPpZogfO_IZ$(bKWk;fh@wM|7agA!5tpFd!tEytr&2% zyvczSj2Vmn-#i9srKOJ-)>pv@WJsZ0pd6)rZ; z^@DCYl~M(+aY6@_r=ZtnIdZu`&{vms@pzDd^CW3CP!MS&`9p2PX_=W0&``$|5WO@Z z=Y@{RaHmZL!Q({xRr;d97@wI9OovUE~YWF zd+L0M(LUPkgRaG!o5}5FR{Ln_l117@#0{S9W`1>QdbeO?Z~p5)B5jf%?34R8k}(i( z#fWN7+|;>@QRynX>_cPJEcB#ZcLN_AEou0IbeYkq3CesO^?RQ$AzMsb5)m@HQ$}w@ zMlvxlNT30Uqt2ov(l=2dyo_xNa|+X2C3O?&abeS&WmMVb7o=8UDzGO!N%KR&ugX}0G7loRml-rI!Gj%J^Qllx_Cwb0bhh4Wstr`U%_)> zxD9Kurz}Y?vI;A(MhU;4orwtRZxH4ivsiUlLJ3`}@r z37ciD?m47VL1>z66L9;KVR^fb?GYr560xH@&%C>^4ZWsCnBs`gtX#6szavgGHuDcO z)ntV20Qwu%tVsc?JTX3==!p!=H)+Su!*i$Q3rZ2=XwD09;gD0bV`ykOgnC-s1?3A6 zte4Ba4wRua)T3Pe$b8o55U=O`O^{NF5VJvd5=-3YKF~dU>lqZx??oXZjF5DEoS~o> zqf?ACRyjNxvbw0R{^S~C(l%?zag6e1${AD6Zv-aNj68P7={P7eEe@QD9N60bNy$l! z$U|pJLMK@@K6Bp-)OAo*1b|WZbMt!YjbX&6qiO=82bKTlIUN(|!QBo>@phy(VL0&V zoOpEQxKl&2@N-|6{*8C!%D~#m&H?uxIPRaPvX7%SFp2><6w>H;n=kdJt{(lo>kRW& zO9FAycBf{Qk%>4bT1Os1uZhB&OJceAk5h?;U&|{xzM1$OY!$sAH>3x(C`pa}){>T_ zNxnY^u$Zi>I*?X)oVjq<1$XJwX4QSQO3@N4ej6JavT6?iQ#{H_w4avZcT5uKuc=($ zVe`I+ebjU%Y>;vMA9`g%b=J!2(H54Q$Qyu5^*gKX7Ex#eP2Fv@IvVQ^ucL#`EfmKR z;ttxdZHry1k(Lj<+M2;iEgRQlH3{rV%;VCbAZUVJ=87HbzVEQ^G`trh2`PP>^&VM# z0c{;|3ZyA4$ma}p0sy`nl33zP)zsA?30J#LX8hazFSn@J6~6$Qdo;tM(qzk@F(I&I zRe`zw^qBLiNQ07FcX{#H#*P-O!Ocj0!X2uPjGubZU$xihd60q7SwqIsN+mziM)U(z zjaful-J+DGj{&H}5~%I2=%TU);v`3^WaEd#@ItpksQ#7VyC|`n*Q-jhQBt6^P z+B&&c>U6}-yie7wDJbx(jg-EBfA^0MpKS!O2hd4OG4J+npD%a?$Sr4MoletjEyH{1<`HToUO85dZ#Pseol*IayAfX`9gV zSVGKv=H;PVZT3u>^E) zn7W(({jT|*buQ_o&o_PEo@bo6mDpvQJjkhBp|qFe1_(kz2{|%dwx=RfpbS;mkplgV zv$yJe^!&G~vNEO526I=``LIDrG5#{9{z_`n@Tve&U;?i>-AqfBBiYf{7zIq|<@ELS zwH@2DV&>@)wvg&WIRHi7_=JS2TZyI6zwi8#^(2S+px4aGptyiGC8kg90ejKEj2R|@ z8Sbr_9e)OJpqh#{<2NEA{OwO2p8cdbm@+?UyPqF%^wN9G+^XyXK$i;4pVQ4Q+Mn{@ z&CEwy+U>~7o4#*}+<0;J-Dz-YJD9v-nR%C?Q9$W1m!()a#DA(^e+5Zi!`v08V=qn9 zFkv$#;+;bzpmg!cUu86HzJ(**i*`lzSv_dQ=_! z(8h{A*@rH?Ofs%|`UfcOQ2IcVHCdCDy+7SA&ioY{YRHbiv)Um~$0xu}ZVvqz(Xop< zGY@+fBPI=!0ji3jI_u=!k@kf)w34gZ@Rga%SJ~=HovztZAQ+sBBN|Mn$nlatOQ$EX zk_I~zfGmE;!rhlP!av0c(MoqkU}i4M;&u@I+B(3^w4@pP68$K>!~VYXm&xj-$B+7@yY{ z{0?hD*wJVAiMa4f<2*f{GvnXhCQYb_1FYM7Na$VJ?lFV&AGB=^2~oXc_L0s{X}HuQ z4#R&(>GILhXX;-reUdn$uxh8t$P*9gdM{X*j#TUvN0;HpMuooEvlZg;cInXX0YP9D z?-3)r<;j72&URF=X3G$B;~Ga?F4`EO(I0$m!Prj42g04qLgn;(S>#dd)kB8{2^~%} z_INa^Dk>rOO*$+rtmL*>6{R?3-wT;j-6v>1IiHh`4J5gP;_sB1i8>~->vHv54)vnS zhS-3CiAin64WluXTLEO(gNCcPCecRusoQm}!-t2#9;{FlC%>C>TFBcRgZqj?T0Pr~ znsGo-q@4O=E`EeT1vo*jUz1@AKN=kIii(C&Sl>;96ZITPbf6j(>`&FD8s71m^a9P{ z1N21nEo_e>_a15``>UlpwsYi*3C4jk`2;krf8KXk+WLmUS}()mCik~L`xxq>Gk&82PyFDW?-K+CxDzXb^63OEA(h%MUD6YZ{E+lWp zI!3Y7f4~a{rD@O`PuXwqXG$KekoRS^y*mX35dpd7hz)T9X!Zl?5a4+(KgKv4T;j*J zpD4Ztljd0K`(_aNy*GdZeuxv_2xvXP@)><8lWXzi#2~D zhJUe3T`T2({tF_{cS?nQZ}E3+{Kk60EpdDgvn5d_wxtKGU@%r>t95SX4+=)H-Ql$l zkDP;^wG&T36tD>|!2V1t2e2IN4<%`2>Yu|3xQ%vleN<#Kj>AC4|yvj4J2$iAb(V>GF;x>)X`M%%hkQwAJVart%9PCTxY z_mWRvE!XC5SduVxsQzhlcWPByu$?Kp_eM~YEZ^y(WsTTmHBfxHjcZNH(EywM-LofGC04}Lc0$!NQmk#+W>%0|Q)8N>zw zV?-di2^l}_q(eTMMqY$lkZ{|ot6fansC^Q7zRzwE; z`d)CYuLcIzuIr(9DDH?$A`Vd|*o!D%IeHsir9_MLKc-(tkDgB#gDq4={SGOC^hTM) z6JJ187M8>5#_ZTiC8&Da@D^S^5CT6$3 z1|_X4W*m6e7F;1b?B!23bD-vSr2j;!8EtlMZQ5x1zV!BMDi^s#xWQ0mr5}+}a+~!u z_)@=L%|CaNh;-Qhi~jZ!^9C_Vof*IP_bp;F;=n)f70b;7Utv4ji}rJo|SRnv6s`69t2}%#rPfkh>4vne_q3_m+;$ z?bJTg9t(YxEPnuN(I@#k)4_rlYeD5^3NGrIXMJG_ush2WG_!OKm)WF8v-`+Tf2lRh z-f>#LG&rW;#`wl=#>kWB*ea(T^ZY&TVgwni7nzB+JCH3XxiG9`obvp)ID+SN6Ex{- zO?Dmr@@d%eNXL9m25kVYqmFLiQV9kwoG8LAuXhB)v-bntr<=;tlT>t#js?fv`J&;& z(Qo^T+RNTpm-0-fV&348Yvs*>Zb3p!j61(GXu~*V!%{w`@y)(NEZ-SMDc9^L&~!## z0%|fo{E*|%03^+5EW`3M@kLiEtEs`q%@I8$x8dhee1?=0Oq*R?>ZZAD+6`&MhL1+&{_|2 z%zl(hX*T^1kp}BF2`gK;K^q?g#V2Q)WKV~Q%E6I=IpN`F7CYnK#m;#O*&5p>=!I2_ zB1Rj)k+(h^{TbDK)}%i1O1P_2kNnOSyAB*r$LjLjhycoY_>W=s`0LdIYZyfLZEEYu zF7G$siL;YiC;7*{y}9+^&g^j@WHrYM6eliL@uR&m=w1#yR+?@ygj(HbbP3$EWXql+ zuek-4ierI*QtC&BcIv!n#mBz0{vFObMP~@YI<$rAz!g0$LH3)yFlky5*VYl6*oggZ z1{Q#BfEMrY=Ocj$AG-HLV>!(QOl5S#iMc?tetjPV&2%=k{FL6iljD2rTcXE$+EOMF zPz!Y|`T@x#3O!H9R1%wPS9G{>H}Cz3!}sS#b~ml7nw!a;aAFrxD*Ama?ujdxMr(3 zKEZ!eUCQIoXyWG5|NHrU=_;Z9Nr(7sZYpqh6Ikh@mpyT3zlW^wBqg8H(|r1Ci=cS$ z1NfQJ^76#|J4|$L@^z5Njc%{Sxt((4$#+CK9lapk47Ara9}H3FetMMv=ZaTkXTDSgOxkJtY`k26Y%s%K> zg2!%bH}tfVfy!55#I_cjZ7H&IgKAx--WpIMy5JQRiRqFD42s1W`A`B^ubEN!s9QQ<~(vsxr! zUSO%xT}}>ebwd7bn|LNfV92&@ z{Sh=1x#D5_&91eWY9u>m1i?q=eql65r7(s<_%?&CE3%BGN!NC@dBhL}fi=vAH;b>u z&=&RboKSaU=^#?^`KzeSWZJ~SbXd=oEZq}`n2g@)nz97yEDOiQ%2Bmh+kfHXn^yX) z$#_tdmb2=n*39}!y)3^xkn^v}j?iQSJUzjST(_^6>lt-k;8vNh-?y5~KKvCQ+nM*g zGACxnNA}G7rAW&7wb1GHL-mDiNo`KA+@b0FE1mHp*Xt`;cVuSdulV}SClRu4hNNdE zn%O$T-zB7UY_sCyPk_2eFvnpl(4ZTbXpTW>*C77U1j*A>;_8R*HoMy1ZCytuHJ$Zw zeUGk7lTGW~#L&CwSlPRGU|Lv!MiY2r&>`A@8pGWkXtJv!BiL zKgx9R*Ejb#z^?z>OxNEdNdJzN=B71y_T0Ix&PJ=5xC#0kauAi>T>IJZF~^aKfIr-Y zope3Lj(6eUhi-Kvje$%KBxvI;;hL5g7sqU>YQri-hAWi6!fY6T&q4}proG_B4BGGN zzG880X32RTMY=2^A_Dz02o_O&7#N-~wOcqj6`VbnMDDMCgA@`vw(yIHgo>gSrxR)r z;R$DU{_8EEuLDUq6^4ADAvsay1)t~ztbM===uQsfZ0xXGxwB+;!6=>CXJeB#x!3Id z-dZot`8N{dg3R~SPq9%E4H9?g%-x9D6--uyTqA&)Wa2kJk-meA9SP>w&`huTCOA1Md6R_V={w{h2TQfBfx_OZfVDM7&?+Fi$D#1KED zD|u^sd+Bp8_7<0dG`sDjY?~oEH?ai`@YrGvfrs3yZdl^LT*^LqSYo>Vl?nxB?;Wd% z3M$&KP?`h;*=3k!1h!E* z&)#U6&3Ae;6Tek1{&h3&=amT94cy9o@HN;wmS6*}t=qSki$Sna ziN%xoj~Ow4|D_OUah2TGdl(aQ9I=CK-SgwGJTVb(8`N=#WAZr&Hiq!`~I&Rw)IKm)$E zw$&L9&pgMEHQgJ46`8R;6s9f*7ztj8_G(dr*#mnxoTXE%DTJ*Ap z8!+;@C3^nEX-z)%oL1(XZ6Gq**r&yf95Nw7vlf%p8Y~PHgG0i%ipAf%rwn~ps=38j zRAGnIE}{lG{kU&iezZW%snt4EV8QPRe$2HosiM^I=;%DOEg3o5<~RT3bTxE|)sOs^ zPNdzDF9^!hez9fF(8+xURHN-fr!=sh**_j6wI(2{p+~E#ja1j*2s}>4}8;jN%765E7D$M#B`c!k?I;qKt>Vn z<9#g?WR!I25M*ADiqeOvBa%=NyQ)npPrbXLiQpL8_~_1h3Z7V$-_AW_a2|GwixXSg zs2rs{g8yJ7x~Wa-HoX`sd`JNkIk_+_rZt--Z~P{DL86xq!j8Zy#Z`-QmzWyd}Tsr&L~1In(MjE^Ag2o>0Z9 zj6ekZ*}oo_;?8r4p!6>6Im*wdW#{@O5Xdj_s9wE&??Bw?=p|B^xr4gpg`d!E!?ip z@o1H#^iXBr&e$PsqlbA5rue-D^W7FD@y9~0^>#)PVh;>1yR006nhbw?e94$w@+ zJMuz*M^A;^x=QJqS8x=a!cU@1-){s>4a{NPOOK_weevB?UwwzoxW20D>fRc%Xy{6~ zAc9!3=Zben26peS`wq3FKVz(7Hs@h!pK z@v+Y@v{>Ab_t84`22eQt$UoiDnnZs?ghxJI1Y#rdLsb@*xj8{w!GAz6#i{1lxVRHo znf?y0aVSK`kA1^xnqpeLXm&NxW(JmgN92-fDCfvakNrHoIiI8kf<6PqHuhtL%L^P& zl;ih}1Px~D#rA{K>*2CnG*aI*sMOdh60Bhyz253PZBj4k$6b8)6P1uPmQYF3-?PUf zXZI^jLO~BKqqe^-Q-<5rB!S&ty@62#2{Vjcd-Bl8ow-wUjU)KIy-Sw0hlGWx!%R(b zjl?eaCDCCG#pbq?t0->bBLivPY8}ixZ~3kv#j>l*KIhdDaOwJxt>tmMGCuG9cwI!$ z*_?PSfH<%w0!zMepIbS9@%BB5=TVRi1{Lyh^DrW<4Rmy+jWn-?6jy^*If*uxsFLuQwWDKa`*p!>&6UW+|bq4nVi7G(lW2dfq%oYV#) z;gaai)}FPBhvbeDe*kHdii<>%KX(=EXGvLEnEk3PXQPs0rcplfWw!*jZQF2?1E?u@ zDQ_BYQZ2Y%K6wP>EI(-!ct6)j2+7Jz>AWH`>Ts~{iO5(tNLCiX?%e{n#rs}6kHd8N zI4I+LOG`^9>uneOIK7=mdG5!B&xKDB59rETMLj*h_+-Z({;QwtPhC>J@W}HLsa8zo zVXi;I$~)^*E08^MY`gN16s#cqaTH zxB5?R^v{cpAC7Jht61nE9v(2<7Q1)XY&b(6koE)8FyV7YHC4okw^!0G@h@;V-v&b! z=h)n$RXTI{gN#3??SeqXPpv6#7K~Ylv%=y1bDo)}3yKbV6W?K1Rat)lNf2F*>v0yLdnDrLE~Jw_rJG%M zT~Qycc)6{dEp*N=J5B(0pns$^C8Q)SI-W8$ngZvQED|-xbo7ov9PZ|?5@%{LoEw)s zXST?0SJ*T&PYo+ggb{ceK|xJsCDyH5i3<1~`5*QPX~Gv>_tGyIof?-$)Hse(bHYIj zmu}fovF9XwfPU`!Y3|Cp1uyv)8bwdgrLe0M4iG_!fq^XtJ}E=y>fMd$0{Iu`zk&t8 zmkCqKvf@5reut0fF0*volr*(Na^CfO1%iw42>O}GbnV@BMgP{piowLR(ZBCuG&r0F zV3(L{ztU{v#8p+!3}^%gNH##OpfEDIWoBp9$vaIsp``)Nv4pt2;<_NT%>S-nY={{x zkEFBw=)M9IGdxkWXeJv&WzpQ%SS|@!2_QYH!qW(*RDJrZyy%ttW}hi7JvA`^M`!sO z`U)s*`Who*1ot?=&F%exnHW5kX3{k<2)^wLg$6g&C z-*|C$7>sdKf600%&c2ts@%(j*0yO8=)e|etz5jzjnbl+5xuU@on^RGZ~(5 ze<6DQOBDty4-sp|DBfwX$iFRP6yXd8NA+WlEnmkhSjTvdV`3gP%`uq)6Ocmg=|eT+ zyUruMo6{jT4bN-^gB$8)Fj@OK4m=z|y!d8Dk%Pu|ww$5@JOI!ycj6SLF((kjw6(QG zpWz(dj#siKf;u$2H+_m$ix2cKqZimo6O&SjJtt>e zhi|DU8)+&AL$K^$z$UXtAj3ki3C`Q74r3&ad&>N&W^b_yeCqV?_{pBRp8!^k+6CoI z?IQCv&P&txP&F#iai4`?e9Ix=Zezcm;QbiDA1C+=%jbSMOJ0D`kiYzc{u3QVU8z-MG znyEbqR;E%4{M{Z1qw^f#6a~)0q6@4+(-7%jabe2cVn4VyT)7@JI4N?NR8Gs-al3et zO-)?>Tjce{mNee;ctFbT%z&+f`npYCP8Z?agXC{rWp_V9O1vn3BcRkv2}efkjRD9B zsVfM`TtsUmkk=L@H|a{}daxHFNe+-?P%$u~EHmKi%q5(Uu>=9vCV{W5Jo2&@i4v(> z&!aJc$Aw7V(K0~D{~JFtj+c?=8lZAZSmSw>aHIm@&s9$CI_kn~M)#OAqn!$jYOybKD)Ek znt`B&9_gy%BBV*!CM~V+yqg!lByfQ!8>t{ByfyaQzr$OH5@mZ-Zr?rrnrYK= zFA!;*kQDM4=0SIi)>$!S=K zh$a*5xcd!0-q%vH&wXw>WH$}6?)^a1VW=J?1@_;A?Xf1%_S20p7M$W zn$VbI6{V6!@u1+M3F&_UO!Ic>Y@7jhQ*on0r(ak(WCP$DaOkMR!eOAA`gHKzq zt|QgmjxgK{QVc>=;GG1pnM<|WP6!S7F)+>5)zaCY!-Y7HKB=;b`KYb9nN|6vUr5UP zT*XJ=JV}+wu%nR9NnQ(9u`7~8=>jf5Z4&^0!N;jK{lncr{u-jzfCLOnhTm#Hd?uj5 zUJ!;zM~h7$U@UUz9<*eDBz+&Ud9?R3ojyG>?}^QaIzkoS-zi0ux;b)*-vmr6{FZ~3 z4>M!hzbbVf?U#Nt4hR5Jq0n$OpHPgxR=k~9m;T$JSyT^H^cVLMz?-3uUA64AIgyn) z|8o$+Xvun6Ra!mga$)tqp4B|P%rfSilrQp74iJb3CKbIi5De$y@sCgmv*|)t_k$2b z)llesl+&wP-_?9V+F_`2jP|aQZF9lOYy!G*8ZuQ`=Msne%pbA2nZJ@GKG6vj*^L94 z*WtbZmzrvql$WFH2wnJADce>NNcdnk$n+Mo*9g@20TCU(e2net9X>RsJ6Hwq@iiW< z!AkCZZUql2lmct{R9(<4^PYB6*xq|DaYKZv5^hHxU{mO*Q! zC|{xXsEFkZ1Ezx1h&w+4gznuVb*B_*8Q8%w7hgZ;05;g4c%d&yb;vt+J_(E`1e?lJ za;<~PI#RNK@FYJ$(G?X6I&SiU$T_p@FK@wyj&ElnagO|ku+^7g0wT|@s!?cm3Q0&) zcBWARb#LI@v&`}fLJR;Swlv7ZPE~D977i`fjPhkZgkCn$zkP7Q04v^L*f<*eP z2-Oq7lrw#)r22x=eCy9IuNJ_jP1QLMtc|2I?+#s!#Qk;q2RjyEsH-j?V!O!4(L8%E za^==;SyZWJ5aMSRQ*V@=8^WbVg|lpOyz{?RIl;A~b^hAW{cKDlH;>Y zMuZq^hB@8qnXCwq%jw^-c}BnAww}ax16T{V%R79V0~2x5^6{TO0GyBSgooki0Kn^j1aJ$rkzO=-Cv)Y3P5xGNc$sNFLCpfXSk>dr@mr zKEFjdo-b1<5AKz@<>ezIe|?B`C-$jmH5A~aRZ zy2;!q0C4i~@SsliSlK*k@-uMbfLxH&<>vorqI+sm;6o}j9UwQbI(fuvEyDp)oY%b~ zbart0$meg?CDM`dxvPNvyg)j3=YT&HpP|{mCGJ{C$fbqQYRD5%6(P|63tD{}ieMcr z$m|2xp?o#KGFp<}gy4+p{D7MX&yn%WCCXE3XjolP$-nehzmVLNZ5n;|L+Fcogl zFT(0_oWWEZ`o2i+fBzMSKRVSDp*O(Ur725i)=iwBGlFofgne6ic!F!!UrNEc$1zjg z6knd__ZuKb0(%vA^n{5?^~&%ZCzc%*tgS%*#LEmrp=kP-ycJXhD6h6EEDdUfX-1oM z&sVc33~_7N50U`eYWdBo983zEc2z3QV512Jj;TuV>Sb&nkm-b+qV|Y(^Rj7oxYBSo z7plF4$^vDSgDNAJ56qyQ;_?A9YaLh|lnfvn%4)r<@6JXj+@J|F%Q%u*1FS(lp42k4 zoYV<{`&Fw}wH7X>vL=EcI)N|)e(~Ag-l^W=cyf`0B!OOs+%#LB-fYMeDeTyL!to@1 zD)Q1e|K1*be{Xb6SjF$Z!7>j`392M0Sza|z0UrfSeQ zdjh8f6QFtcL-2$0<0~)P=YO+W<+=0RhuA<-V(*FjbP?!HE9gyOJ^%fr;clluMkk+rX za{y|QXourw|7J`v*UVnd%`L||Tz9v?>svH|5^j{z#8c`R1gY-oE)dOr8fILo1ILNRzXh95YdgkI$d>h!O zF^Ovb-LCW{W@G4X0oQKN6x^xgI_WdtULpZQOGCpX?VJfT5y>Qv9Tao;!@~ECAbP!H zq20yTQczF;;7L#Om%PuYyPpg{Hi$F5^5mu);o(YjgpOG0?cbq^&G!AK6K()1O#b#C zy6V(dcD?-b2X2+-Zq~eMh%`Sx|L>2#RB5=xt>*W>-KZ*kkC{fWv&MNJ}kPb?qt$BDv>s@Ex%pf>AtcTFvwC@u@ z-8U;b?SQHVH5e_np1Mt;lJz|P>u-3CyHn0(Yi8%5g3Nc?bF$1-F1|&nB-(DgW8#u& ze7g|B{pCcQ%AMwd_LwRt;WxNVx%5m9pgPEz>y~Wb0Vh0>%{zJ>w42WXENDbDKX!qo zsd7Rib{e;n2W~gP1Ig-4xD7B#VbmsZGeQ`%^Ibn`&;)D@%~R}XzLG_z+x@6f0pPsM zq4D0%Zt2mSuc-c1`0bYn{Ma{iji~fu;a6P-cBJ|%B|~m^u-)@{thHKoY?{Lc>2CkY z9-`d*d(nhU3v|m-gnK$Hu}?ahGJ1Z8aby5+oxjmWUop`-o!o5gn4U+J@5SeUj2!4T zEuSsFg#-eY5Pd2}E)S~Y^8Qt)Vh+?KAGL>kXY*bU3cvac%e`;0H5F03o*GDTGt;S@ zhz&0w1ma%w+neNA*W9*ep7{Ek=-#PSsUpnO_rv7R$Tesi8+WA!QE;FXCkB>gV9N9O zJc0`+Zkfpb9PE>xEzS)aCn!sdfZ#U0!T^g1^AlgJDq*VOU49=reU^l$VH+GIo&6rc zty$2RWYV(Lvu1Sb2Bb`}TPMC9wGrNz^E3?Ckv0RtZ>`uwegP7N;%f)Nk5To5m7M^gf1$M34dVi&=#BYmi~L28sFlpMb!f!Nxh{b|I8L zp-&eWt)Lh)K0XdSIk-S73)oHkh+yJW(Y9@M9^J-4oP#(pgbZI_0uKW%d(>UGl3kkN z5h;R3g{;R-@$)}IlnJMN!;6mFW1tlP*tvVMfnkPjMgCt)Z~l8$wuyhTaeNF>Nb@5NTPb4 zv;MLed?w!ish1eJ>l{F_37}b$yhQNy5NHwAfQ4Qn(M~F90(h__9gk*k?D@LM=3Pjl z#ILD{g2>A5cd4_9SV!a**aC2@6-o#x&SqZwK;oBY2HyA3Laa6=VuLSm<|2Bj2 zXV3I>2w#p=>dZ%@=aSr94y=E|m4^hfG5tFbkenmF6+vig-MDOoVLjuFpVQAN_Vk4kgTCC~V?4pCv0)UOzHcU^jN+?_Ib+v;AYSQbau_u6mXiahUoF8but%}*0A zroKIuIQRb7sj#2<`$SL3T~qEC+(KlCtlAV9nI2}Z_mN({42hG<&>Zi$7HxC3%Ay#; zt;!Chv$6I0v6o|%egk2|!+Y2Q9vx5vhWsLtvh3i}uNFh9;SZuTs|#`m4g^cSmZ={M zkobKqd{ma5LD-ZbUm z<`H^OY3|uxwZZ}V=3g}%KbrK8_v|`6ktVCscBHe&qVYv8@ZnYjP8|4~&^H^&q=Gt( z_Y;1@*H@*5l|N0qy3rC7NFY#aY%d@W1y7E)6wTTiAtHUN4t1iN2qZF+UAafIv3#zo zhd^j_JG>CfCcwqu_t@0SAqht0k6YB1n-|E9|F+MITidx|pM?l~Iz;-ui?PMCkk&;Z zfXDJs5A&AWBIaD1t(=y%a2d73q;+FGSXex?JH_piE7R4#JX&eDQvocGpHM$?Fy;)8 zbf5B|$oKrG#+kXvXW9|~@y%=PdM8)#8W^ktgnOGC&cSTk&c}9Lx~Ibq)d74C`?L3p zGz@0Bc*mHs$1t4SU&+Pk8L4ooqMdtvGIJr@q8k79_}Z?p$pguU(`45W9%$zqpu@3j zhd?;?vdV)`1lUtp1}xM;fJ-{>`#!hLZ^zJUwq@tyDF2Z{FW{2f>=o5RPS33AcIFZR z9PTeK2MMfy^+c@q&I9auOD01F8;2^m(rSHrZ$LrBKkag8IeLk}=9mBc3GEnsj9>Ln{j4;AX>oIj3=LJ-3Bu_;zI`ex zH@6!uQ7Aa7*@WELjWr(`A7+x#E;a1~7G3pdXh?{BcoN`GO&W35F+WYa-n#V^(t)3r zlF4V;{3K96zn0gkW8;{qv%I(nL9%^X2KV15<+a{!n{7Jg(Ixc9iQMwBHylALc-^Lg zDwcgi33|85VU(@n7NZ-`uZG;`HyVU95#@O-OHs1N z4p3~51>vp!HIm$t6aCfAYU@U?KawN1kTHQHCsb0ZRcuag-35t4gm&oKYnlY$lJjV1 zqES}5PoA`@LCWEKCcom!l^C%larAIEL<^3nu&Bi>9O~C1jv%X@AUi<+LQVhlgH^XL z?W1Wv{OF@}sNhQ?rJj9Ym=o+N_p+J}+g}7@T3qHqs>&8^C@`Gv(%QI`kiCgJdf9+Y z&g+riK;vZ=RKCJi9MJ-BeEx&+d&!Gm4_)|^-}uSZ!i7J1bVXkL%3q(kX_1euXG}qw zUb4%~_E@t%q|$QSImsvQ|oxy zYM!D!Gt1`Nh8umJB{ut~2hA?F?0ZhkS;o0bW25^J7jJ}V%{+GSpe;7I-V5ifoMy{= z$gQUXMuw0U4mJU&xh+03l7+;#8?m`0Ud023wN zz3bCPsFr8i#AOnT(I=h(={GP70G>0-vdh1Fus(6e^24Me)GD*GK!6+JYGnbnv2Wiy z!cE|nzLbc7xXW7k;qFpklo}98B67*?dBXr9Al3OfV0}LVj&pQK9_lh$!v)mh z8V>tC!NLF(hLQQd0NIn?(>j`Fv+MTNR{wL-oSb)6u5-abs}2JY3aaXn zrGZrs8wF{02*}{rh`kII;Igu^Ni#bB6Ot5)Ghdi2aeLVaTzNDuQeK@O)gwkf zV{E7*WM$g1ZPeN?Lcy9KeOoEZ@1GyKDDvu~&#NNBKPpnpStu%t>!G1{r)(G}A`;uu z4?SnloS*2_t3@qlpB~?ZuT}5FaeH8W{)E2YyeKv-c1x$frREK?YS)*a(#3vZqN)i4vj( zblB(^81`O|{w`Ta^eKi<+Z&?{lhTI!kfOTOBf7eq-Vfp+Owqgy;w?ZqB^TMSk(>1- zI!L2=(%fuxtDP+Xr6A!c_VLq|2F#^4kC-e-qcDKm+0mkOW+ zOXNC!TrLG!c4r9d^opvZB3Bid56vBLr9x~6es?B2XedVFE$h(Dtw(LdH7^G2m* zN8IaeJ8y`e=XpPZm7}fSB-wm#a-`htqx1w<<~!02z7fSTuiHoupMH-94MYcolq7V- z*rVS{=mlUG+>J>=^nj4LxAgep&q@*ID^?S=C1=zmw_<@MBuR$GidQQ9Mi>gumM?F! zcHv+UKZWUt*w^%2fUWLAfB+xoZ8e>&5i&aH1N372!NHZEi_SisHdS61kw%-w zh#y(aDu11GOKlF%tc@}ji}@ejeR({U{o4JLCTSvsC}SB)5|t@RLJ5_r6e?v%NO>~T zpfYPWNeEGtq(PJ+4WbY>l`Il1@V_w*gEYh7!t zEB|1)yux$fs-)v*meQo{fWVGZ=|*hObnDVK&5%ei@sX3tOl1d zpjZEugx>g9t6H%rLN%MBr19MFT`qu>*HV{Oj*mckmEoNoU(zCv&T>O-2<4UYW)cU+ zFDfQI6VB@$->0NweRcv9>(ZY`pIi?{0FZ`_rvAp47KF!*F-xh~%oYh#$M<*9E5K{r zV7lOlVLx}5lwhM`*9;FwQeclsM-v5Y&j)Lxq@M3rS>UCp|H*J+eJecM$S+7;oQ>Hp{Gek*T$UjrOlm9ft2|V-aFJKU$Xt)NA&wmx|F=v5nBi1-*X*OwVYint-zwCOuvPY{tIIAIsc1+0DrZTIn zh+XEaavEu(K(AtIMIH1DZ1j_X+Bp0Ii_Op9zw|P(j9F%u>H6rtl`CqTF`eRVf!U~4 zpSOdHLMCvJYF;XfxtwFqv9V2Lneeq16`!Z`4<=n=;ifD&RA3xOmaW65B!ebsGmiI8S5yxTG1S(CO5TUp{HS+f=e>poa(9rT5npe!f0#v_i4P3NgVx(6J4G@dV5 zvFESTpTPlig%(zvWgmDDXR!{Wl-)XC(zv*bz|-{+L-{kMGS*E-&0fJgXO=mmi9%*5 znkbl|i2|C508!Hp83aoKJMjE`ID*Ni-M(Q{0ZKA>zUc9zg7jQi_+{^u*mU$zc;EUH z<DA6Di|2l&u zr5`a8o}5&ce%{2A<}`pb6XazH-L3cDqY$&)?CG*UKah^-0!u;t43-|a3K3U=`Z96W ziG`~LSAP)0u|sxh*nSHOIEm?d(wL&h!=SWktSxAJC7`Q!?HmkzZnIWWnNzgL_XdZ-JAzknT=qPQNkyjlY_<$i-f&Y07|NP8M(GLm{E~5x%1vgEX4|>&DrP{P*Q-?Kmv=`}gmMHNl2!Z&{THJrobC z(JTh*(VL|8pCLNoM-Rk#;^nD`?jVYR{NeS6rBT7_DSEyx;p7t$YlcOTTXA>@mptdf zj|d`n_6Z(-17`xT!J$Ij;2T>fP|FQ2cu0uo+{ELf@Bv%6(D{ZzJi{>PJ;TE*(GEso z4o5CfBdo%Cb#^{q%{kS`#(&t4IkJ<8t{0ww8fEqEbI5)|L&fK&3BCw6xJT7I#f97p z+bU9ZFVc!H{f%Oi$5R<$wch(I^RZEoDA*kot&RNfw;0Tn5&pbmDuFN9!kLbG0%5-% z44Ap&S7E?$b507kILuN2g7@r&(=Rw#OyNtQuniJthtxQS>;KhP_k>Bf|JW?@dvIvbTX{({YJ7B_w7vcF$x_<78)J#Z z6ve6i|I7JA(Ed z$m)A*pA)n&AutX$ZMj!*EMI$EyAt6S0Bc1C*Qg{j7( z7K?U%r;ZsFMz2T8WzBA@WSfxn$7$J`1_>kk2;0(@;^@hR{6pIq`Kz~2*N{g0P9a&IUJCB{6v>qEA;Tiu(aeLoL5@^1k(w*$v&1Mlc7kCcXb6jXA z7{G^M?9TzW9pk%%us4V2i12S?V(ZJcxohIaAJfUT0g);jX14dqOkfGZAJs?tP9wJS zN$4GV+&ic`@wlGtoOm05Sx%&_lfpNSztky*=l|dR!eKx4!>8P>W{v0DC*+gn{I=dU zAHH*v;=ck0`4uGYTR<*8yS@?C;O+%W>2b+YF!qK>M__sCtaT;P*;@-w5TU#y3JZ z6OQ8Qd*YC!thO!@k(Yl0gju#hJP6Sj>X8=#?2Mhpg6x2Ad;Zt~Pv#YmLhmO@bxt%- zvoS9Xv)tM8%xT7@e=mdhj-HF+ZH7z?)7YKbk8OJ|6lEmFeqsm~gR%|afeSG%>oYUO z1CL>5d`(W((^L6vA4yP`!m);N?J7xSiguNc(MbjdMKrtogy)b3jy}*W+4t|=J7=vW!ZVu%8k2lw$G)rjHi1<`uvpDi z=^Dt@n!S6bF|LAjyJ@`~i=S^nQ4w(xNE)&I`UUDi>N328);yDAS>hYZo>NdzfP@K( z_f>H8QzeYJX~e7;i^i@Sax6J&B{S;4)FCiBT0jy=8H<}wxZLHK;Pu4X)O+hsV;@-7 zyEMNOcX;vf!2vCWy|G&WM2nRC_FE@vogpuO;xfq?ZNP*duUF1yl_Q~nw<<6*G#s_6 zAEc$gb7l}FV6Zdf)9!p&DGj0pNCWESeZo_y*hDp(*l=g2wW-|Q{p10ncp+MCUFJDW zW~yg{YTQ9z*_>@h`@5_i6nHo}Z+HZ)K290Md+SH}%KFAEiqKKUPD1PK3}&Tb8s=5q zY7#ly*yTsOrj1UZ)mu01k8+qY`h(y1UIYJIm5^zeQz#d1)l=mEA6vO(o`d%}$~gLM zm#-S1;KfnQh4vcUK7kGfQ^f5G%`BfS&BC7AJo;7>hb;E zEp}Mck3L^A|0rH~H09N=!P-{xIf93Jq~>a!u9*-@m-9_`M&@)YxHL{T#K@a(A*=f= z9dB~K>dS9G^Pa&DquNA|p5#m@Zv~_tOkT*Uu*_6sB@;QN@Wryd4vrimD^;m~(YPGG z3eafejdMGoj<-N$<#+Nfg#Ec3W=)yLQ%&A8TeiR4v^l=B%<3WrBnzffjF-HQlS39}d++}Cv%?}yD@s35DXJRFiWSPv0B zqPHIgAG5<#B9DXGcq#vOW?S5w zbbV7Y`@V%WIsT2PvRu;x4tIXl@D2hkOGa8fiyuts;N(PBE-nqPv#(S;Js3s1tZZiA zS)$|t9#)t6WN9cuIOHHDN=PNfb7uDI&u$A$u)m-2dqIvR@NpFYDTGA66|ijJcyyf? zF`y#eW3n{p;R=7>2D+Z?A|DlD7yHH43eO4uyX>Cd_8MhFW$D8x-eH}R9!%MNZReUdh}yveDMv-s%Al-xX- zdfgE9cP(q{Tb@VLPTrJxgx6|4v)e`+OrvdGjR`GyivVLR3$EOE>9iR7#uRZEHkCi& z#=!2R-LomA55j0uFfvjSRg&2iV!Mhv?x#$K$1@o)1>#H2-%Y}ljXk~n1$u3KPHfzR zY`ZbzPRUjk4>D&E*6txT zK)ME|2S3M?b^0=e>qc%CBKL`U6TgC^W@QzYRv1Ki^w5CU7hh6ry=(_MuxD=2!hL;{ z=PK>VczSa$G&+M)p6xRih#SuM=F!#f#qk#W-#OL2Kc0hY<$x`rc}*lRVw0U(tGZH) zkl2DgGo&dT_lg)@ll6?7O@H1KaM-~%D>*G{!iKTrL~Psponhccd_ObhZtyCpPh5SI zQ5uVgjf-npqpPJg0I*qyXP(omJ)O$^ZXC1UX8=$JST7p|dM`+scmw@zyj)yyCffd| zhkM=(?>RS_qLoup0uBk^DUsonY4=L3Zst?g3ne_ePqD$t%BwzKWTk*k5b@CFA6 zfab@yh**46pB7OS+F2MpepFSEwq!Tk_|dPbLLNu7h}U;MwH^dYoV#{SIiJTWCEx%1 zy6M@R28Ds-@RavG?A65@{^XKAbM${QH8$>l7&-JEDV(Dl zCMT+j`&=-j;TtSwXL;bjHle07xplWNpLXsH-Ep^y*$uq;(Y?qzHL?KC~p9z zqqlilwgN&1ukFKzNv7Ig*tHs!?!#OM2?TvEyBG~ssP|yR5ehq8+FvKU!+TKOU3%&4l$1jtE@ zm}yemnqoH~KQMbyZ%UEcaZi9zZ`!&7sz7SE5aqMl1)`gR>(n3RCXQKS=XeOiVHP11 ztI|X0PGWR!`U$YPs26(g6IOZaYiq1aVz-?ZjdX`mqTZq@U~|o<;tUIDXRtI11)(r! z2aEjb&}|^}P}eok4Gnfzds)`-D*P25)uVq!hya$hpM@K`DE}-BP#Amjs_yPeZ;xfj zu(OW+Wt1Q^1%8|4`7|10;U28x9})9R%>zLaqXUSpjjpkxI^mj~+SmL) z?}5B3x^rLqPV?umt|yo67`RyLaaYzLF}f|<$KK^-GQ!_`N>ZE?AM0$m}S z;?}#TPh;GMBLHKfzo>Va_QMg>MN&=k3``iiODjx_H?Lox6XkjQ2?LDxPNYBVCCN)y zC7rSa1mp#yhp|k2ZJPQ+Ln>i#v(XI3eb6n6u1OY2uEIib z8{Ci#Syl-@H_^1%-t=DD8-EPHY0Kg7qZv?3tBURekor(Xs*+_qxAN%_$>C+XgSMm#>QTh9t z?kDmCQG$Ew<;Cp$l@*BTJ)N%K51W*Mk-o?g+I&A>1+U>j#5(F48l^LC?$F_S0UIy- z;;2a`ABp4^pfr0v=4F$lkVY^};868SehpJ&D)?P89x$BaEFCI;5f zcj+@72YVf?<=^fwRz`*hbxHq2a18eKLD39qup?G##5A#RswU)t$rA3c!hg4_ouB8C zysnAWhdX5KdzjaZEQwgU7Qq#0S!jTw0J0`FZc**4B00sSFwWc$8#R5Qp>H`NE<-8? zv)2}*cqPKJu_*j%%POU%V7rkuf&zDk*=-5>+qFC15hKe+fR#LRPNrRV1p z)7|a%&6&7zK#NkVR83vYH%F2RSE=t7NPjXp<8)v)0s2}L_d_gBA3yG}f0fGA)O8RW zP1?dix7)g5s}oT$f@tSMQpsG_RO`82vOR4zQa?Q5oDPqheY4gE0#&!(WnD1P*mkR# zo;90v-Gq^w_WjcN3uZn-mN(KHJQ5Z6)u1A={Y~TCB?edQN)X2Mh zY`8Ir3!&66eGf96QH6x|IpdO(R{pIp$G?*T^*`YzdzG&ASCs#yVATDqyLtQ$FDa9b zIo8Ed&xy$e=-&vz1VR6fEJ4lgE0ku9c^B;v+F4<$@9UF-rzGhwFNdaH2pA+%=DGD& z)C?a(y%~ewIN+UTtCqI*jR?dT^!JXKJg>q84SvLwjp)5EK0v&!P~rrbxB?buC}p@V zZTn)B)o*}J85P#9UH8eT@tR)xF&CI_i97vn&Cq)Og%qLDO2R_~_3n8gp^gSxwlJ^` z%@WrpHe(#!LFzfVxi%_ZI+@P3zM@uhyIab7Y}-} zoFxgQLOD1d*jDPBrrH^9-Fk1|E0(@XcftB4kxopE$y0ZpxO02I!G{&>px@W#Dsb$w z-JY{9I@!x+!q|r66w%@bOZ-yZm51ZdK^{e({3Hj1=+SN1a634oAGXAZnI= zOxyFs_#VN8J;DVkZeW)0%*q33V~^nJ^2FxzgLsI;15beVW22sRQ^}2vmho+E6^dw$ z%R0b1?uz z(E&ciV}~HJMLG9XaFF5$d>95$n?|nC^S`z`e+^lo$=D<9{>H_j(narb6W(2XD1_@t zX(bB=LqCYy^&yh+jE|v4)Oz9t+e0d5(mC;IkraLB(VMrBqc_yN6OU?!lLV5tFr)B{5>MEby(6GLJ%-lNbT(cerV5 zUr#t1`ss6N;we37qLtm3%z(u_Q61*G%`?vXhlRt^G^|5YbsFwc<6=q z4B4Wl@NHe~5EuZ4yZBqohG>>2Ld8!G^-GduI$Ns4z=3Ll_`M7iTzW3rWI{e456=xM zLL7SV@F99MTBWG;em;dyqnQl#X}Pv$2DBPspVity(;GaE?z* zg#n1njBoClmy4aQ8X$qK{f-Rh_EE58xnUJzn2u?A$wQCXJX&WjkUbZ&!0N2#kpFx2 zl6*nW*EGN=fAXVm%yv9^4g7Ix0+)tn(`^>4zhJOes#w|Zgl7e!a4rsx;y~Fj60xI@gXK2HpkLb?Ov*%LQ~L_AsCS1Ejd<;# z?DFM7!^%FVB?h|WEwFAO&ITA3Qvp`X0ww2r+!?SDLWHy3%xdtWxZa?x-2->jR~)C6 z0S3kDcF^PE)8+TKu2{6x!5o9dQQ4V@dfo^u^h!hjyrrJ*CcK}ryu2K~5yLRsvp{Xo z=IY&#evWMD9;xU)!ap#k?68NnkynnqE+U<7j5MDL!XmwC<;IFQ*Z{JBP&WyAc}F#r z${3~?2o51tab+V{2m=;bH`dx4;nrq*a08ZauP(Opx2&~4>Xm=%IH4Oh<-o;tS-;g_ zTK4Sc&UYH84V@l=^rf)}%3LhfLv#&(daKG%lkW9XIoh&BQG9EFH0F}(65m0b)Tvmu z3^Wt~B?3i>c>xdK$(^uX{Q(3g8>=vue(l%br+~OjT7B`zSb#UPGwaFcc4suL>a9Fl zZUcFFCcn&F-`4k4 z&;uch4NSRS;do{j`nX7+wMfsll<16O)rW1O&7aPv7@7^&cdGgB zFXI2CyxkQ*_(n^w{=MoPQxzs(oj_n(LxI3>bhqDbE!|IyyMV1}mhMuVEsMSD+wkSm zC(-u_nfOLcw)34Xv+ng(Yos4lC~KUDU=c6J+UYIH!KkpZQ=^mVYSL7SZ%xnGEo9KI zi9R-H@GhX^AFqwyEw)#M1MAVBIZo}Q{{mPMMp>kxd{dtB=6UJ5Nh8u(166J60o*j` z4I6fs*D3#eHo2txQP$fb$+(sl05xgryV>MrJWXm-_9z)Pd&HK$N8DsE({ano1-dmSR?Yo7hq?H{HOZQ=^UtHCe@w;qzdb(29hCaS>ZHE~&!5!jV4 zPTcYOM`_I!c;O!Zv3*Hn4+!XZh07&21b>7kijZ$M__Q7AudLkP$Dlr2-ULV?WSNts z6~GaYyE!ws+RvV?5>mF>f%ASUf*f0azoehaPyzh_U9ULMB06T_Fi8Z3Pc& zF3-wXRC%HN_)(MPwRdkc_XQe_4ul{(Tg_Of;^2UT;`jg}9NG$4=z8@$jD(fCuN|37{aGv`0%mv6aO}*3#?IhW5|N^H>6!VE<;p864#b%}4sXBG7&I} zaKR-SO;9pGEXVx1CF5luW*Iu90)Oy`xl0x=CRENeLAKn9ORs>?v#D+KLla}((1Mv` zBjlboc#y-n4qXo;E|(g&+GVet7#*uY566~*Zf>8RB8~z_$#|f&So46VjxFPq_w`BF3yN%MKs1H(tz8Kov>Jg4sGB4FpJ^}t(u&kdb}c-;2-f~!`oBJdH% ztpJZ~9w&-1{M-TtfAbEAV7IV`rRm{_c5W>Q`GI>s;*pZZ41WQ`laRz+E1P2(k7u4cs2714=7- z0K|qnaUa${i}Y+LEA+F#nF-)md8**K)_W6w{*%$==FAMgI%s~r z`p%3#0(~wjXj9kH#%G4P9Q<*2r8(YGIv|)VMuQ@g`N!!Lm+=+N6*Q5_+FC9TPA(*_ zMzF-~<9skNGaWM_A3jX@cw2tsFpV6d^~Y(JLqbciiw>Q75jJ)6Aj^Th}mrk>G9E8)e6CVh^6SqR$xNUT|;ty zJ1fcrc@Iln5WW80l9-yO^|A~}1D!!^4Rhfa-vUnMxsWsAmz1XsX)*WD@lYM3ZhoQX z!obyqK6{eKj3oo~-iqkFl;&|xEGJbt043)ROt%p6kv8!kPGg1aq1;;);7fus#?zwf zq$~y7q+3`d0Ushms>^I=m3@lIk01=4y7zrWfCW%Yv`2jx56HQ@A0OTeA3C^bNy@|Q z0=1T=o#1tje6oyO>-2+9GImOF%`*i~Xd{!QG;HoB^LrI(m{&$8Cvp>iYf%eeC8G>% zWOTtw9k04~O*c`|^G}DCdmAwHltEnPzCLu52;5kT z$dflh*Tr7s8~FH+ZU|H{b-SxSRscuH{*3nUA0Mx|FZf=s^2X6*00akh_kdIgX2-rO zoB1CN^|~v&<|sF(uKH2)C*N4ughTI1a=N0^j|}v}^;o`UeqaWe?e9v{IAH?1Iw|sI z{C90vI{-a5vaFpd*M*D#n~IYyXfWS2&@QiDR5K?qAv!xA1!ap1O~AMvBMS=bN=%(w3_HaO_=Hk-#28A{FrXsdjNS7M(7 zuMJyq_7zc&;Kps)PBKl~(wy4tD#Pwqb=?722|>NM{VVfCNOyL%-(_1H&+N2m(~OAJ z<-1Y>u06xU57b?k*77j8f8pke{(sQUKPnGL<~4o~KDi2Qaao?j=c5~{eLars&j3C# zAp!B06E#_r3D(~{#eonmQ|A-YWNphGk*gClctk_5YFyR^tB=cA zoBVVZo$3}h8DCOkTHw1=k7yH1URXE?LxF3m&zZ&Ep4euI?86C;Nrc2)aBVHAn0wNG zTN7tZCuwkR(}8Wiuw%TDH7I;L#G^D%UAUsh?fY?W#{Kl zTD*NJb&1dhw(4$RlqtX5di=N-U}U(7;LF9NRLQrA?>z`g9B{DAUCWK(4noLAp^cMJ zquHW`5hNVQ$j(R=b0*YVH#X2w)kmC2^~V6#ab@|}8#K9nqh%xnwyQ82S$z-bM??QW z;s^mgX{tg>@SdyCm-ww@fiFUGQ6%4Q;LqK5XCKV0ayOjKJkJ_Fdk#}4mYN_#WD z=S~R|_x!MV|HLSNZsGTj-wdt&$4|s{{P`0Dx7`2y3AT?##=cv|r%Rm=4b+XVLSiT{C+9aMy7EaY^-Mm#l zzDR}lM((Ru$PSc{Ex>>yEilho$(A#;#$R?k>=9JEYnG*qC3)x{>S!V_nd9$ef<(C`ro@kN>fT)4v4Jy?SmGD;ZLmS zLalaKf%#tL5y?7w?0^@nTPzN=|2c zF+L)^S(_j`9u}TKg!T9!GD8L#`H!FbgN%$AV2q3`IPvb72^kqW7eYoxue3t`x8vCi z4h;ln^}dq&DrC4CWhfAmf4cx$byhX1^#~;E#j1ZoXs<-9LCwO(*HTuYk|LG!sALeg z>mv)6){+(;$nS>s3=YQ77+JEQTEK|cH&C=<+g@kr0QFB2>DT#kvA{9idRkzmhMA4A-cT^rij-92WcQeiEh z`2NR9-LWGO{`#Wx>%3cUdytHXiii}0vV2l%?64l#{A83E_-z|n$ zzryJuMy+JFpSixiK9E9+Ah`#&%hcWz_Q;B^D)M04X+XO0iunWUM+$a5;01hvB8WojGx7(8hg-zBUa6i9mrI+Iwvu!2dJS5&~KF9#isgvGv8F&>+o;*Vg`C|jY``llo+mpD4$4Y@%Z3LN0t?n-KEws zR^GkNQhkDxOAxF;wb41@qNX|D zszt#X;X2?ktg$WU@bx}Uc%6c=8IzWjhy=OTI<7479A=|vVyf{jm)LJ{YiP#~jquzX zMPq(Sh^#D@W18Ye2uD3XuM#dYEBW=4wi%xDeC1TF$j?33$$_Ue>q>HE_d=CvKEmFF zmCE*Abe;=Z1A^8}xm5?P+j;0+JXre&M@7a-K=<#%!VBz?;6`(r^}yPAfEzg$VEIW{RYto5Eo zJ*JSWS0(yM!5lTVF?KRS;6RwxD+BBM0uC|{qIhAsQHIkxF^WpX2DY-Z<+Raf`_u$E zBWNN!pSt(8a9S8X#J1KfJS3jPEYbc5{ohIFVL7KC;e)F-cvEDv??}U;bQX*#mNZFA z2D^7)e{*xn-j~x{wms@Yp9-)yHO=b4TBuLr7osjx;!S>j7<^5J#JLu4Ax2%G@?-*! zcNY+#=*?e5Q7>8)A(J;_h7)qct8xxPL*o2%n4QS5=uJhQZHc=+q9nAz+FE>Z2hy63 z^(XF;x905?onJtZv_f@}=+0bJy58hYeX)Q%n>;v)4gWl=^7^@t5?EHxb>lQR$fLLj zCt$)>IU*@kgpv)&d-XAszYtuWy9s>#l|Yu|Icd!h(n>C0emR)UTb!gpu>;6y;WoV7 zNoe~R$!Yvpn2co*pYLn+>1xXAtT)N%qS*n-IU>ks0R<)K14(*ug%G^e|83d^<}*@U zL8(W_=lHk>2yCHv+a?I6cKhUst?P0+-l30#8IdL3Y=(=sPO*)_jvbncm4v}mdnQUn zXd!DK_xdZO8l|pqN!jl&7o}^U!pOs6Xi+r#)NidUd+}1=oXM_goQ>vR$)r

6#KogE+RBn5J)fb@N+2U_5hv}u2ouThzHd>3HIqjG zA3;eq<4QF86%T{WPP~2!Ux3-umMABlfF`b_zvLBwagZzfJqM(6?E96J@E_NdTuOF1 z907&Zn?Z*4AD*YOJz7k=rITV30x_!MR} zUTnP)VtTy-I1wkEqw)Fxl7vw7Y$DoQ$hh?9GCsl(#T7>uCSm;oIMQWe7%#t;DqM~(wf zyJMvOGIVD@H~hk<T#CyQ~1pk``_Ygd~9xy;#8wx0KdcY0;PvheBNS*q| zZFhdrGJd|8Nn9#>-d>U3Q2qsGT9^`>H{P5CkaDoaY(!#p`!6Lhc^FUrzDI!U_&D!z zmniu>!lgcknzb+6;?8~krKdLfFRY9v=-$ESt7*Vs$&5{r93^Xat~UG*>zl9!SS9_G2C=yVqiK(d^DxXE(+ z(_aC^=Cmi#C1^>96&xk_Flc=D{mcPP)qkuq zy4Wv=aBpPh*0=G&9YV*O)eg*5<)5T_ykAU;Dc;kdjrc$e^RLs~%3r_nulK}AaY@7@ z$`A$KT5!UUF%9?NV+7<%; zNWPT86xAOc%&GuX*YS;ed+kQs8TC@a!gZK|=wkp)rp^1?@8LuaTHD{emr9+>S!(>f zixGhS25X>_t92kiCTjCjM8>0<>sN~9totywm?Joq`x|4*t568;4~Qv7ssm@P#+2o8 zwl_LAA8$sK>w_ZGQMB&sPd~!3w59S%C${F~_GC%h>s3>kCqd};4*=urB3CPTEtET# zU+#%wd?Xgje1EXWp$~ULcW388mCQgTX0kHjA~eBtfhElJ*1O>wPT?h$EWwuzy|@IX zkT@Jz1sGGnD%=RibeOqE6*m{#rUFvoy5;*0jC7lgoW_{Wq&_>;0(RI1z1Q%)SS%`9 zH)L~<3u-%|p+R96D1va}dRfElc7%;Oy1Hd^bBa_{(IAOcC}%|FC_pMBQ literal 0 HcmV?d00001 diff --git a/docs/puml/client_side_filtering_rules.puml b/docs/puml/client_side_filtering_rules.puml new file mode 100644 index 000000000..2e3ed1a66 --- /dev/null +++ b/docs/puml/client_side_filtering_rules.puml @@ -0,0 +1,71 @@ +@startuml +start +:Start; +partition "checkPathAgainstClientSideFiltering" { + :Get localFilePath; + + if (Does path exist?) then (no) + :Return false; + stop + endif + + if (Check .nosync?) then (yes) + :Check for .nosync file; + if (.nosync found) then (yes) + :Log and return true; + stop + endif + endif + + if (Skip dotfiles?) then (yes) + :Check if dotfile; + if (Is dotfile) then (yes) + :Log and return true; + stop + endif + endif + + if (Skip symlinks?) then (yes) + :Check if symlink; + if (Is symlink) then (yes) + if (Config says skip?) then (yes) + :Log and return true; + stop + elseif (Unexisting symlink?) then (yes) + :Check if relative link works; + if (Relative link ok) then (no) + :Log and return true; + stop + endif + endif + endif + endif + + if (Skip dir or file?) then (yes) + :Check dir or file exclusion; + if (Excluded by config?) then (yes) + :Log and return true; + stop + endif + endif + + if (Use sync_list?) then (yes) + :Check sync_list exclusions; + if (Excluded by sync_list?) then (yes) + :Log and return true; + stop + endif + endif + + if (Check file size?) then (yes) + :Check for file size limit; + if (File size exceeds limit?) then (yes) + :Log and return true; + stop + endif + endif + + :Return false; +} +stop +@enduml diff --git a/docs/puml/code_functional_component_relationships.png b/docs/puml/code_functional_component_relationships.png new file mode 100644 index 0000000000000000000000000000000000000000..1a2a28542c2a3c1b0497a62991222ab70af16592 GIT binary patch literal 121607 zcmZs@cRbc_|37?+B)c6F(jZCMdqic=sLX6KviC}oGD8$uSxFR5GD0OIl38Xdl98EJ zM(*d?=lb1``@SFd^~d+y)z#^IzmMZ}yk5_BxUH#vo{pNCnm{1XDJd#w69`*_34|>* zR9o;jF*`Q);r|_XJA28^{F;-uy`_~K;k>1zrHiSXr3Jg0H@l6So0F?JAD@%GsiT{_ zgZ)YKYYrYCn%VFpPS{?!qf)dqmv+M5)COTLrI8c?-1h3GnQ>`{cNbS7Hb6$nJd$ z76BsBnQ0v_zT5pWf6fr&Qr}Is5_4zxtP`u5-2TQdbB))vU%#*)yIwJOml*5))|2j| z?IZ0IE9@J|N_uO@TF5op60Y3Y+t+aH{e!>Zm(L}hxOjno=EVi75AiY5Ep~-Jy}$k( zN+mbR&EKkJA+PMShp^u}w9bG>naxh>p~bPY(L%=;wRmKs7fV)po$YgHYIm(DNt_7s ze^$nGOi=2T*yo3Lhm@0Bt{q`Hglegx`M;fADK*F{VvR&+A^TK zPlRFCVl9#UXI!Bz%aGYD6>ZUR+wJX4{TZUKlcp~ zgXVGcSLXgW-^_k&p?umrJJvq4Vk^Vf>)jN!7gt+O*o3JGDt;ns(4sMWYj<=jUy}2| z)SgE9-)A5HKFj6$yUxw4ad*Ro$+9_bI-1HOx`m4$FIeb5)qGKWARuJVx2ii+f;&c? z_yn&@jvLYjlGU%vYg(Q+79$XX2ucca7rcz8K6^{*t{s&5>pCO0lIl7m`Ea(HF1I3r zC3&|JcP!-v=J<1G$@6rwc6Taps_)dwvx#Xxd{2>_S@quGY;A=OcKIJQv(J|7HXmvn z=^9!dGH#QRENfdU=}G-${Bt2-wp99OG6QPz-+vCx(+&KeKeHd;lKFTmzRT}Fe@^jlCH=Ief8WI}&LZMy;p#bYhA-y-{rCUANLJ$3e?Q~D z2b(_d?@1nU?ywBY`H+5jv*y2lRi_z>pUB91n@j%Gy_=yK|K39F-x~ythj}vvi_r(u z1=9cfaQoqZp9zb)pU5%Uv-{sC38u14IRCP4Z)<7g8X}l(WOCr=$twQu$DZbscVfOA z^!2}o_<#J|Ev}@c3wr+V3)#iZj6)xr==}Rpgb*@ID*SI9J2v^>Qog4nE$f?*|K5f4 zKwV8+D7O9k7dAWNNnfULUb~r5P5b}njffQgJ@={q{@wpRHTJ(}`}ddWDf0h4|G&Qs z@}U3kyX8oKN$3~*|NCHyFt@ydf`Z>dt4Q=gDKqjjLI3t!koLci+Q#uK$jQlBS^XGz zajh?i@xNE6``_2}XVG#f|38=BCaSo&IM;o6WMpJ`SUpve>A-;ljEv7qKMactSiL`W z`t)gj{>h8EzAH1V5+3%GTH6P?_n4ZR`blrzVhhl`cu_-5Eix)fPggfPaN?Sa3x7{n zW8W= zAA48*y~w;Vsk^AC=-W^nuR&S+T8YCz*;nz#DAw?Nd5s&sGA7kN^))?M751~FwF+7; zeDdT;fE!iHS*bLeJC7@()T%ONA0u81|nE(*9{&?W2|RIrBWj$*zMUjt6d@ z;N{I5yR(O9-1x(X4~~wGQBhGXiF~;Ywp}@qY%<2@&)?$TR8JAF4%kRi`@-^e5B>P{ zM~@zHUF>OZ|GMy}zrVkuqhqj^Xi+#;XjFxNcrP_o-BU|b^OTTKL2byYB~@k5O`3g< zLob!Wv(Iinm_J&c$x*(dGnWvXc|N+c%IAC2jzZNE?**&Ed&jRif;wiqg{i2Lc5J1^Z4KDh5uJ z6%{#e{@qA63Keyq+`TgUcVlhhOH-5L>)J1Yn;W?;l3sIP(>B+wXPUw_S{QRR(tcD< z54?z;`8)YBe|5z{*oS#>%OAg*)YR0xJnrg9EP6mdz_)MT7H2H2trPFvD@yy6*wI-Y z{p&|-^59zA9#;ATqOb08j5~~7Wm>$n_&SnFAaw1(==bmCXDFi^NA$~&H!MzcRs1b1 zEOhhK=PBQsDifHxS3T|Miqqnm-dxhDDfoBJ80|iO{20$99k70XXFYaVr&Dpe@MsKr zmf`xm=+@>r?vLM<3BdwexVW=p?%mttKJeU>{PPiZ_Mn-7nL#oAZoGzV5uZto1>5Wj zm&1qm?;~#C-cC!K6_J#1F1lWdafNYVtj(mviq5kCnEFF18X6jE>PO!bn7J8dJ2G$G zxzqf6eq?%j+TY)Q!kJCdOLqIIZz3*-CF@~RiqtA@XR{hAr9*u}!4GFx`*)~(FUk(b^J<0a!4 zJ@u7mhpM;H?tkSkc=F^)X=&+ zv#tN7Di7X^_wGx27b{h}T94lt(K|e2vdUF)yy4N=@W&-_al1YzCTP}%IFB^Y6^3XT z8-I}Aj%EEVMQTQ}-CUQmL#96!7&x=#Ug)jym&o=gD=Ultq4rSpuFB=LHdnAyjuF%V|!gpOl*o%Xe;lx=6gKoQP{+I;pqLR&uD4g?fh|_ zd|OgdQm5;};$qe<6-CALtd!NRFD2G(+9QJ%uI}#cRUXq%va^L;f4s-6(t3gRmKY!Z zJi6m4_wk?mc2I1($n^JYCay0%3ltI%2)}bjSzUd3X=#t!8j*HBJG*r7V(w$Vl^G*F zy}kZd7f*Wo`aUzRPLkl0mzTdW?UZEE4vR8(OR5m||GiHVIT6`PLp z@KksT9XWC{mG`19hlIye`p0VlGbyKNF=Vze(CXz;oE7gN*_= z);Bh`o}Z_pf4yJWet$KWeq#9u-}mp|$G^zkpwzNi|2=Q0F>~pljEsyvm8TCrarqRM zysr^0kI|PjtX}P__Fak6PWk@bLfQAsnKQw`!DRstOYbI|Uf=13%1}g&GVdF3@jtutG+{>6)kV&eH{cctZ4n-$KV@A4d*`&Ku2dbfv%$Ktn6AG=@9;u^0n zE8^e>U;Fa?U0lWR*$qy%s2rCQr=nmY~OsCB#g!ByzT&?AuN`zf*#p`U*2sdL(kJO}p7$5I8-ku?3 z_oXNOpK?UJ7m@qH0JR2!If|Ni*wY^_blq2}>Q&soPS zSDw3!Dr3<*JN19w_x^^ay&T0VF|M;V-^k|f=EkL!Sj?|bH+}>VQ%X|OZ|p+~sxC>$ zE}^ETCOCMDXYu=x?YjatmM-f1MjsG!S(^NKOe2-}Y1?sn7gyJb(XM8;73aS+GjED8 zG#0ae(4Rkq_OR%lBa&6u`157Eg#7d%_o;9hW0iY)UixS3?N9JdV>^i7kE)#jN1W3o z6^hCUDSy}1o?(u!`CDTpt z#EJJE9nQb!z5yp^)$-^)rJ|-*R#91-^iR%ZvZQ*~sb?9+m%&$$BIGlwROk~b7&BBK zdgi!uo35@dCuinXn!V+fm7TZL{Qy88S->0TjJ?B?ea;v&TqV}Gp`L3)#HGFb5o4%o-ql-kfEKd)3PW2Wo z()E`)u=sldRvrDTE4IT6XSgf6=QZ8-?ca{h_UemlwhTuLw9o_93Cq>4{|dj7N>gb6 zT8*s4V_M%aTwx{h_U&)SQ;F}Bl9-oYV@f?C;rjLl?13%2y6qfLnT?w|(+TV7r^_<2(1ozb!2wXNDo0rkAx+)8R{bkx)?K(sDnJ9eHpzM81%g^pS9nYCTZvbmw>)&A#x*-q>Z*D>IAZ}vAxdNi$)!I=du_XA!`e%37om0r`K?A ze|cT1l$2D(AJ$`PNrGl?sb+R>pE>M+`6eJBKwVK;=`wa6&BnDS!=54U10KFDRqE%= z431Y}US40duP3ky3k!?#nUjX)?M*42mOCI$128y(urlX-zQCAPxSQWx+CdJ0? ze64f{IFbIZm+1vZvk$j-9doe`&M8j8#JNoY3J8o|74g(?{O($2~(#opX)8-*wqw_Oh zTPcwUL~h-hG&(X87aMy>ukm&|Er+G?OE18pNY3+7?JsPnga~OC$R^d8U9ftQ!J zy1JUruw3)frKX{bj&Zf9$VekJ=XX(nBmD7)6buuVqjqC z7v0aqH18#;FJn8JCKD*_z@-kzmyn_wreqdEm zGU7dP!VNIf{b$42f^BjMX}o!IYXlq@PONpfWs{$plA>nd<(PC3XV7uW2Q+P}sOMi# zdNz;Dk1v-ykL0L6I8<&%Xb&rLGMMy|UyM!2840eTqRlK{Q z!3rCM+AZMctOEkBd-KNTK|DF_LCN0w`Xl1vm7{y;c7W?oW;$F@RgI{vt-W#M2B=qB zTkdQW@b=A{H8qg2`ClbggolSqxKCbi`n*TJoF?T%l2CY+A(dzO z0g>6Z$6A?3)gLNq9XoccpNW9kma`E6^T0)-V=)eA*`HdOACk zM6L~C0PNhk6LY3_sMQ@q3w4L_DDA)c{-Kz_sZ*Md8fUakLeu>s{9D_1XJ==lfo7<> z50p6oi-GS=cV@R>eYxwiSA@~2czM;%@Kp^A?cTdr+WS|?vL@QoXb#tpCwsbb)W8G} zOs9Sy9hGn!XXoUsQ{#BcY9J@UKeJvF7DnYbNv(Qf3!#(5B^f=@&{R}aC0&20%E}V5 zod%yyh>MG3lLW=bq9Rgc0&7Z2N?yDW@g!>j0)BBTUnR^K^>{?lcX_I)xVW>sJFBDQ z!2_nq`{w56;YluATb}{nW@@bWm;~aOW>DAz{9rD|Ms=%HwlMM;zr^CL;P4%p;o9R2 zB=5TU*I!xr^XJbvzqjaFm6K}#oF617S5;P~iaHOYHXi3I7ruNcIkPA`vy;Q%g`>N( zbB5Gs5^^+O!BDD8Is7;~+tSR8-*d*W@VWY$!i*rJzLe8tMncR9043Q09xg6L6_s$z z(UT{x+1W{e6@06^g(k!7yM!U#{a6d*UBAXpEb>i7-y3}6t-Lo1na`icNPIUPEO$nm z>HhdJd&gXc{4JZWEglX<*vl|= zy11+?!GEKj`AL4h06#yM-RClgnCy_IuzWgzbwuEjbfi* z_O*nxG`4A;t-05!tokloxbR(hPeMWh@mRJranGpo9^xCxz`qZTxCpW=BpprRI%TV1 z$erlu-1(Tq#GWU*`8MP4H$HUb=^SGg({hpU{$;^@q2oFCsZ(>{nR$76zrAw{H0fRE zzP<(%bG{{szIadKyPRUg%vjY+yPao?>7claG`aF2>z<%Z=?@*0@ zODo;ohYwXvoXE*<*!2_^T_dBC)7AY5$OOr#)VAxWvYKsIf0gGodHFi@_Jg8MLBhF~ zmX=fgvXhd&%Mm=f2c&%BtBy83tNknPyKFyWcveiTf}T~Ji;K%C?ojIkfm=MfQ(tN~ zvAb^1fSGqI&6e#Wyrl*bXumqy!|VY>b@k(MSJ#CdoaY`te(W~ceLJ+?yfLb@xLE&% zBP(6BO?#cuX2eYWNvi%WbjFyp4yL9VSy@?yg^msmto!$KMaUndKqrP?aEpOWz0Qo8 zCr!e$P)fBPoDUd!vrqYL_&&bgqw~uXyQJ}bcn=Cb;S(n!J{?g}QE85vtlq{>z3ANj z>ArB8O2Ubbo}SrH1<6rS^q%(y@kEJyCdHmX%N!yiA{d8IE)E|KDf=_1wFi>p*MR-w z{iQ$oYS+(*oQpbOYh`sVBa%gA_0JzI>zOHrr(%Tm;}8IYj%I2pDl0cnbme+Z_d_5O z#9`C0qr-}4f4c^K6v&PKDx=1|d-qU@gHRWt8(CUdP|`75SX-aB&3Z0yPG4GzYxJz2 zblNAj=J@`Bd(-17EA#Vx>%KI5Po!&@)`ilk`nI@_w_kU-O1F4nVtZSfOl^P)2;_oU zOG^t#&Fb8^zxnm@Zyo{-jZz*48EwRhhb<& zUU1!ovMlc&Dne7ZIB+vSc<%PYhX;UPfZEY%i%#2cqDh6_xzk@{zRh2d=rG%r%P{&N z|Kc;~ZK62|L6lDCr8@&;#Kn`UwuGTMMn=*V;J=9zxkuDD_Dg!5DsBI23VMx$GdRB+ z5YlgLnYT)TU=xU~wV7>hZid>RapN|KOUETsAS)oQ@AF=mml@Z&kK_;f(V;GDuoRJM z19heg+M&f8XnI=3r@eoD#{umct1_*lTz!id>nl7nx4QH*Jv|-Rp{2RGv(gQa;mMQZ zWjbp=Mn^x^1_m;xlw8+4^pc+fAAX1NVPs^)qI2PqLsPokjU4rq)$tBaVM>RB$B(&$ zg+C1p_=4UL&onnRsRI9aP-B3XUpXwOYiv9O*~KQ3nx1}nZtha))sGPAR8>`NK2G+P z9P_-#!^z2sBWc-^(6N66Ld95HnufBn4{E|(_$YyWg7o5i(BKS7zyvg$`WKx3kO`8{>(F0yUgw^bl%t+vn?$%A>N_k#feD%E$XT>gUl(Dcw ze~9jxht>XT!bgvW?rcJlc8+B~d4dKsh1Y6mY#h1zwEc`AJE@2b4F>_+!O`Rm$Y-`> zudc2VbDR^`+dDeGsjptD4XA$hjL%bO=IPTD9If%RghQeH6hXUv{;qqY`Y}{Q90wIE zu0yE=8Mx?pNzmU4VF5bBoF=>TU=ff?ba!`e zZ!hTL=g%9xMdo;2M(0D+j~1bw-@kufH(w8j9zdEof9uw*QotqC$1I+i)DA)Ega`x$ zwF(piIHE)zkW28*3{s%F0f1 zaa}QIZ-6JFt)s)elDX%;hsg2c$03$x1VZ7zOA$=9d(R#zOjIcIuV25e-B>o*PzeHn zx@>HmDy)!`lY@>9O0$5+w7svuV?83GMquAUdQ-~&3Cy)M06&xv(K`Ec0mKl9r=i#g zyH6*qt*x605ZCwx1kT2M3=&P3>jjRp$@+*Hw`*4%g!J6(?4p#A=vWQqqqbCBoSf#C zmhKasM;?A!)5V(~l=k}#LHMleZN`px4$=5o{7k$4-5Qa?+>+qX`;><9Y#TJN7G3A1fuT@9MLy6$hoc66Va z-Yw|EgqURBcil;Ks?%UtI9PhwH=%%$RMS-rmg|H~p=ycYA?5XL)tW&V!rK zZicra;_yCs|6aA972UJz)2D>0gbYA2R2YOfmCP4tsDN$c=TE$FMGgF!e(|LtlBFkC zgN^yrpFe+u{`OdzmU}PcfS(pTR!KZr?f!EwODG@49*d|`I*1Aj?Io zU#Q==qP+b5{nPRW25X5X0U5P!$@*9a?mKtLotj%((C!=@9N;{m3&1bIu79ZwY+K%y z(b=i3?`x}i?lut^vT$SQ0=hw0clYA_yeavaqpSQBw*i6Yz0hkc$?4FYCs6h0BbnWS zz=bJeBX+*T(WYY&j@Ww>okjm6!=gC4@Dvho{^#&&{! zVA-`Vj`sG0P=mY|e%uTR342HFimheV>TtRWJr+IOen0aV4j)PxQ5Nn<~nii&j6Z~)%*J#XfPHQ-)Cl`7GBC;g*1k49wyOZ4mAW= z^nsvlEL01yXtY`htM~hPcz8raUXX92fBW{Wg_RWwrZ}7sV~kA^JdbgN<$w9|C6{@3 zSJw#ryUgp|khyHFyu&co-_@KwgjBL8sy8=NEjEijj|3;VvzA9aA4jEn@QGweO42&`@~678Zv)YxnxheI;`^iL}5wN`=P`EsTe8 zl6SWJ*khI|?XR3aj3Wbu=tG07vQ)r&Z%FFElcW0!e&mW!^I%)iJ z|Ni~sKYmzQTApQ*?GJ#|4ruBN5Qeh=Z3E;V2Xj~qT6_O9a>`(>Q`wufTo4i0MMcFe}xC@8`P#OPt-X30e(+!DEF>sJ?= zF@d6fl#{b}=gv2j3i+*gd;tNSvu8&^5bV=;;78l`wvNo(d~a=#eFA~=W@zY#WRYm$ zWb|H;?>E`IGW;L4vQKLkLeGNcua+##Ygit`qp>*NA%Fh7HMB>bi-n6zOOat=SX+84 zeX`q-fnIG^UA5BJ?+3hrxl*B$Ldq!cpJrf)hMn$wpPljW#mkrb&;<9wwXYioz=F7_ zUDoDt)ea4r_=|*HMJajw?B2DuPNSJ=YBI7giaB%1#{h(&(WXiHCeR7eM;73usG1y! zcs`H;l^7i;sY?BMUfu+bkybR}x28yNYjg7*sG=P=^YeegTtL$*fY$sa9!uMy=`r13 zy0$W#OOI^Hle|3BS%!8G&CsCbFiJtt8Zt66@C6oWKQAml7UQaeLp3NzdAVqWbH*s- z_PV+{XcSv-Sk1qz6ulS}6BlOyi=2Me{wxRsI?S(y1#@x-In`#y?8RVcP01s5 zjE&Xv?GQzZjf?BsPh=p{E1Wy$vJ{4$P8D~jUN}$iyF*!tZixC^SXuFOaY;ID9tt{1 z=P4v8cvLM3lx}Qv)cx0YrT(KMR3R!tFNkH#>J?!Rjf~r7~DO6&1sggpwC1+w%GRcPD zkerls)y8HK;?1(<-o1MvZ8K0)-w6-ToMraa2-*wLHtW2%w^*cO^~;x5tx5Iy@*=Q! zpmNoV9izCN5EJv4JV@SAfeq5&v*IqWkhP`78VRD zTt*??;E?h0@JL|PqrrN5t^j_1sqwGM$_lAI_UafK-aC126C0cHV#^k27Ts7Zm=+G7 zE0?fV`yGOwO9lQN92hXr&_Dvq0cnJZi3xj#c^w^{T?Y<;1d0d#U7wn=7p5(+eiwTL zdS`xNAy!*WvW4+GIBq;o*<9>4AZko=NO5o%>}mUppl@N^VeC8hJ}282M^2>yO9JPo z!d2YHN!oEx6o*AR?b_9=l|Xf9y>uggKkRP2|`&S_4Xr53{(sr4Lp|K{lq7LhmpvGNQP&a8HBQ9 zKQOI$pR$uSqq+Gink^(4YxDLR;LP`ZePAvD9goh&a~GKs80ncsJGi8f1{3~;O0BG{ z6bybu_6|D(`Nt1mOC$>RyOns}Sf;R~Vl=^vXlZGoPahDp(ZqPf$(tD)OT2sc^)fjl zv+@X{QxJvaB|6ZYwt8M<8V z=sx0?3fE(4ZH;?&ebtHIfs6?@x2t)1?wh2#=jP=p)vdvP zKOpYLB_?+H{juKQPf*pvQVBm`R0$u_gu-J&Np0H_^ z0kCEv{GQ7o61!Vk!ap&xuq2aMCzeu-+2qL4qZmJ=35RheIP+)&oh>gj(_B_o7V(Q- zPv7Aq3Uw9vHt2&x6|T3YIfky< z*>RJN%zMEnc&$PG2)z5;wcpV9dV@rFERYB;TmCItrs1)%M#`k9P8I$58#l<+`);&7 znh!lR&UyMY;+zTiAc_RdJAQY&`ls&h_?qW}ZBxP1WJN}2&x&|97w7;@fO&`eL>ru)9Wbu-h*b*zrMj#gbDhk$}>9hx)Tn> zW5DE1TfSkF(BGOXRv3vX7^!rEtfLQz1`mYn_Nq&U@_BEpmcmynF8+lnYY=dTkwAr1 zpID^R8F_hWI*5TmOQ@VESI@_e4j@8n^5OG` zni<)o59)UwKhJ@B>Fe!{I4H&3L%c=Veq01_mzZB|4l?*5MItdSu5O6z?M^gMaiY=OuV0Tgq()WXuSG*Hlq+f1+qr8OIvVJ_ z02pL%@1=LGL!5^Wfk`9O(X-chi7_ce?>=Nnn0*m+VMmlOJaL+k#rc#JX$cn<$Lao7 z0!gkueK30L+qdhdw+Dsz0QjR?U;v_#aCabg2_Xf+5v)WYdaRPtcHeD`OQgd^2q1R=- zCmffvq(W!ii+cFfuSP*h+36J@nqf~!PX*YA=Z(aJ^24#Dtw9aGb6@2Y6yAbnW57W! z+=TUVSzjNlS^#I{{JY!R<2hu*3QcMuo%7z_CqXf>0KpKN2vm!H4x^V|cj;7;ApVoO z>X#Z;6P}Lne|g!~wl?gRa&a>whQs2K&o8|NSZoU6$}f1mL|=MN7W8GLEFT4KPaqsN zM6>eqdzqH@1sT1?#YGf^gn0ukcnib`Ns@*{cL>=~Vhzc4S@CO!a=DEmho$@zJuw5R z88Ar5&{_wJp^9>gnlsk+mloix^Kf(D{*!%rf2?lk?>Xn8NRqLX-g@JRvDy2K(6D@;kyJ`U=Kv9>nQuJt5kyCnQmVPRp6eel7%Nq^cV?1Zij zNuo99d7q7K!nuH6;;_dz_7cv#h;w>ssxgnTTim$^g5_B2f6szNMbiRT_)_T}9!VxE z;e?bdbR%F4_{;4%uaFll+Ya;@H!fEHmJX|O2vcTZX({`93UZ|WVn#}G7{j-hn*GKrLiJb;7A75KBcI0 zh&utPH_LPl4POGpqen)u$)H&-bY7t6;pH_dGP?;FW@vbTg?tAFJ~gQ7^g)&widchC zE!?u8xOwsmU}S&#+{Vp)(ls9*PK_Y*{Cztq-3Mnd62S+rTZa^=d` z@$vo4%*qn`%6J5+EU9exnZuqLR%nB1*Vm5%RoU;C4Y6@Btnw%b4t|4&$$oYq;r@M5 zSRD{?Y7qmByiT=e@7`a3*3oi=**OlspqD7C1I*tU~uiW1M z2?)o*-rmN+&BG%h_@~J!%r2NdFdR>18YgGAnW1N@tE=0~x+uCa4aio4D1_~M)3dXee$Shua_6+%-C?S}*x1+@YfTn$`uYlrFY-CaGS@tQ z95zjjvBg>K_ouU?eV=qkMd`!^8O8t57n9?BZhn!-o}Ke87OrUb1T{nxi++$$e=9t^1rf8XuWxudwL;l}Oh~t63=B@3JQ*d-kH8Vi(iO2z zE?0&rcJy zxPc2Q-SdHkA|utXy(|eQNorw#KX}qkDYEPFN{Wh_+S)$z!&_NmWVLld)1z-l$K$<) z+s!aXu(z=R%XDF?&d&4TA()Os>cv|LjAZ@KA0kYjtYQXXcxGk>zD3Ni%duw0xy}5i zPZyV$^)xl-=H_gc$W6$YCs0Osv#cejiO%e4Q~h5tF^mVJ&jG_)oJ!eGKLVJ_R#${vPnDq`Qu&c|14-&eeRid6}-rdk{Dcb$^Q5!y;1GNom@ zW=c9t!SF~;(Rc0b?BdX^XqZM{%-tQ_#i>4AP`EH_jVg=9{gK+B<_A&js4SN0PWj`H$4Ko-T} zgSZ7on)sGOu$gfhF8^Ivw$q{{9ixE|^6dF@n0s3Pdi2>jkr;E|7ykH^Y=E_c2`N2Kf*qdzDqiaQl_1V5&1GNQV)*Bt&#pwhvzO+QDz z3z7&D0GJYO%ZwMfQecI6xR!5@KTL`NgJrKR^t|7QmhQ2w@mJ z_5fKE61GCWa=JP|yMoc#P`VB!W@ z$@hpcLp`?7r!b>rQ)N1{e(PwYmVp7<{TyWOqU(CG_wP52%xC?*`ntUm;~7|;q-^2V z(qqj+PYiDwns7)FXe4*0>$f3DsUAP)w`}tfXB3wZFz3vVQZW``lhEFm5cLEHo1S~a zh&(_F;yw{f0&^dC8K9ANcXq}`+3i_dT?L<}`CEW!l*b+FzxQm=)AmlD-wP;=csB$I zU^zY#?9|pqcy2EXa`gp3ISBT-y1GKGxpr+9Vdt{*$!D0(h)?$xapJCn?c&0M&V>uK z3=G!&WFe8EmY6PJmrE$SX2W@vEhhvjoxW{hiBZKdAVt=dL(*xT z;qPH{Y{hq7ZI0(YQ|WcAv7zCdqN{1}ZnW@+SYqVk@gO!N3JEhm^^|Gwg9m+(J^wK= z2VBDAhG~ltJiBUlNluQezynn}J$=f{hQ#O(h&Vosiu!;t+jnWr2{Gj(N3v1;MP!V} zFbIMFtPPGlfL;w1vs{_tVkoi-ll`Un1qHkVy0Kb0xln^&lJCV9!%7XF3{oy$i;$PCREc4h{bvG~|X%DMwYh7Gnbm6#P z*cs~KD>zJi{`~9TrTKX+Jv{_EH~jes9L;TQCdC%pv%g?H%v=U9CR0&S0Sv2YXjqf8 zTLF)-TQ*WTA^*1LXS@10SZFwTrv(JeC6sEgLGAn09w#&R7hScr#Ym5$BM5RKqoel$ z)U^ot)YF=VhN?F{qK0u-ZwgG>{!$al`1skgXGyWRVbo40Sr-5n5YGpSvBdZ-T4sHI4eAo`XLMg>2OL+Dmz9$B z1_B>yxUH=0__l4g5W9R{KrnF~Z|4C1AW2*}*utP3(onpR@qkJ|b8Xm~vYAo4erU*8 zUA+)Tl1Q&bO*E^gzFVL~Ab#q&Vu!1u(QGnS=F`Y|w?a{)!@U~YL$cgx1W4gDmnq-G zJVi#{{c#tN4MJ_jQdwsK6`@gw_zatRdsmc|Edy)z5nlBmiv?QvSNrl$3W6=r($|TJ zyxiQekr5p@M4<*O*d)%=r?E8&p~=|oslkd8=r$mfFi;+(rbd}jQ0Xcw-$5EDvcq{* zNgfm#{_Zx_d$+2i)b1kY z4LC@ETy;D*MJ4=Ld<&S;;&hJmx&=kLxS=3+%r6-dtS zot)b4aPrV0GdQsrvbaN6hfVL*3p#hH+DEA15lL4>9Dg06z(Z;O{@z}%(dK(_T+QnW zNCzBDkRF(>aJ;=DDk-TS6ahIw-=Cj&Cl30bNrv+BYh?wO1d*x8)3nQueS|wqCMI8A zzrHR!XixjL6QdDd02jWMGOGmvT$|{xGczA3vs7>;2wLh*e<_;&2lPz*hY?J}1X6Qw zD7bg;BknHsPiJQmWOw4{+Ty{HC@7HbKhJcAlniTWYHIgF`lACh;ju)}b688X$lW$I zh3Nr+pX~6|R8g@*Z?Gr4-9N2^0ozihoDva1+b>649hakZ!1qFL#ad)WrpCs0l2URg z5txwJ?90cWwU84!r*Xj&H$Q>l?08b$25XubVea<y~IR28ynIkD)f;vh#Dp&d;;49T@C65 zw9I0~a%Ykh;7hwopYgxNRhQn^^nSsys>_i#Joto!+yRPFdvL(a%%?B;_*CL? z3t|48gqCy~QsA#CYpn1uruOa%$@JE_l`7_H1xJVYq#JrNT?zq;eK=^a{9a=NF;bq7DE9(hI_{LcAU6yz~9Ov&nO`wfuvxW zF4aL?Bek{gy}!0A)dVBD}lqB&R`WtJHXvx?n za~|PL=HPrm+wZ+pl?eYFejlfzFd>$mgG0{1pb8;eIzc!kYMrrCS|u%v^Yil{WA^z@ zPmQZ9nFJyTRA^eRh%%6rNi3cV1hc3EV!wbs?bGGBuC|=pvB6HWs@yz{W7!eyox%+2Cyg*9N})T}azJVOakB(W7CkefO=^&Fm=1t5-kIT)4N;Q7UW<@>u`ax`N=X<)jf|9NvhIuKcXSg+^o5mfBjjMX(;CCiunMu{l{n(2u-=0wY{AgG7{{pWoLzn?b7jMP;o} zn>HKsBHtB9%+1w-R~31xOmCpZh5p8@05F7}`F(X9l%7STFe~e@jEu?U%ifTymS&qy z+^SIHIe26f?C)!(n5Zb?2~ID5vdAFBP-LDbFdDb$1V+F%8E}5}+buS1d$~03y#g20 zEpB$I*nG7K1Au2{M+yz+m@EF|$xF|J92}_QIg+xD_&!RA=V$_ShsO4yjCz}cATESC z9O)&<>bz}ec=Gu1cU=8JH7S*t)!#uzu&AK+7JmrQ_7779Xb~Hnx?RWA4=<)>@@o!b~*l8kkB;FH%Oay#^?pn^0hZ^$e3J*lC9sK;buyCe3uM6|G z+~Yc;BG+m+^iI*u!Uqoc`^S1_8Ckl$y!wk7g-<#`xDb}5Tn(H!;+ID|59f-R`J95n zscH7hHlN|(E32wnkgMT(5ND~MM)w>3x9H@c^6$&2BWr6nFuSf2{Y8l-CM3Xi{Z+fUF`L_V95<|1H%B-p*GHmdT*gw*o;&w< zeWA@Qk!IgX&9LO4Ia0;Nk00?Xqn!tg#IuHmgmhn{P;!HV|JszlHT3^}5d{yAG7J{- z^IoNexUeIg_NCkI9+_V=+COe{J8UJy2*&S5NeDV|ohIS#wTZkV*54n|ax76(Q-c5_ zMZh7teK@~HKlDmeC>1^ZkEl>ZRn>LkrgaM=kQWJ(5SI}Pqha2P7)0KtHN&L>>lz6eKmClP9gn-AU|H%_kd0#DlQ{tO!uO=RigK zeiL5#b_XM)&%n%B(i+5y+r~&%4KtfefGR6RSCfcgC|X{|*fQz&cpxIvekl#{1&}nL z4(aZXR1lyjIN)UB*B8ia{%Kpv{k>|x$yZJBBM$~FJ+t6=SH*F;odA`Ht=Eu9gy4?< zjhL1eMrc)ZZ1nw=HdVM-biVB&kiibRLFm}1KN2v=at1Z^@USxYNt$NmZ0XF;{`zZ$ z#^@oD1HdL&emf$6v^e?GEB8n+;rWUjH+emX{3qq^hclJ^sN1B^5B;r#%}B8!TSigL9RoZqc!n z!EjquA%9njhMBFBSX`iXi(et`xo^_5yudFBi5yTS;ZU`fQSXu(MBEispA#MyHV#v^ zsc?A`2A1Skse3% zvBK`-Y0SO7s7(l>QYgw{na0;pYe^TJL?a%lLAXX_y#w9`UC>)xuZcy_W1t_`fBt<# zd@b%Vz6{t%qND1kSM1k29aXLo+xDai)V8TuFo$4!JGh}cn5`x^!pBw ztA!>deAMdi+Luj_@%C@f742n1xUM|_l?1AvBaauv=(M+>n?Y%UkPccLO1lXFMSMvT z@3m{-BVIAM$$j{+1rnF`JhCP2WiSePg@qFUPZf7F-$n?FKN1*?#*!-UZhSm$s?;DL zr@(M3hCI407al}STJk-w~DWOdJ-E)zRi7YSXf^E^zkF_8-ldjT>>P$j zBKY14Z)Kwb)JXzCn8>}dB(djFc2-vV%ERKL55qdVa03mihwG!#EQIZeF)``+`7LF_ z>hK*OJ~Tw!TfFR?#`Bk&Shn@?UHl2zh#&={w)ASbjg z0TJux!6GK9UWY;D9y#ys3gcHWIL^h@^!`2WdpLA7-u*&y>eLTe;|>g6)z3A1hhXNu zfbl=uwU%j7zVluv5k;MO;A8)=Y?L?&E1UMon_i*4@+M+XGMB(nNE*Fu-_~E1Q$6H7q zZse~0`F0cwzdUypTm#V~ z!J0IHO3p|kL-YW~!mqq&Zur{5rHhn!gM~}yow7=u992^TmSd+*#R$LDcgM<+a*kGQ zbl)Z>`amLPYt{-;V6aQNi*ZR7@P5@FahUNngYK_?hq3N(FeW-Ow>iw&nyAx!>t_+Y z_2_yw2EnDvmp8^RD<*Q&Ko?i0`W9Ln#l1GR3O}bP#>Ke)(;W*LE6qbgHSTt71-hZ> zqhSHLdIkn6v%PW0Gys)o+VyO}LWu69!Zu^D4Eqt!0Z)0~o;_#H`IxzX{qbuyc$Ar0 z5s2SEQJ#vWmf2iifL?}QvQh~3&i?dIRLo@XX92#-exu8&}tQFvy}ABt=%9~eACVyzoh*S<(s?(?Lt z&&~Y5c>3;ms^9+ql59Fw)-jHfQK6-SB94-g6-gx{l$8}4wqqA6A}O0ggR&d;C=G=a zDyvc=(z5-Y?>^uAcR%jO{m*Y&zyujjfR&B{7%b-Md5v%(jvNGl*P5ClEJ z5r?p#XhA63-rJkT9a0A2V+6_uvLPVTo}NAp-Uk|g7!p*ve{I%*L^`v{Bm=a3b+xMh zHI*@ReecGV#+9%tgQo|h@cg{Te|!1Q`0Tu{=368=d&(IdaG6if1OH-M3RrvROs*ea z`OroM&wed{ZlRtJI;o$Ki-TEbe1bU5<`CO^1J5}F$LN*~D$s+G z+&7|srMI=|w;hEHLr&A~*yJ`SIP>ku{;50n${>eqHmu~f&Ch}Q75CTV^|%e>d!Otme;MC{+0b>VV^JEus~&MS&p_y+Q>R{G{Q4H9 zVJ%weqi1X^w1M7NO?2a5`MwO+%mj`Pw@L=*UV(*-^Gc64u~RnY!qV2K5P_B1=U`q^$v-;T6A_@+>*^vhRLVM3L24rn6=!DF z!Qjx)kS(J5NBiKxgHRd3_Sds)FE3~sJF*NGH*?muja2GeXnC=23()kwnI3(;zE^T? zW(HcKj|CY)ZBL#I_STlwP7|Lz7pAoFLSOZ}2mZz{e`1c}9!sq$qr=d^;Ubw1B%kfS zU%^4k5pI4JA_ zw9=5!F<|r!<$Gii1^z2AXVjwt`1im3%v)%-G;W(7s~Ipb7=AI$$;L+dl`H$RN4zuT z$}X#y2bD9{ z9VRcOV=DanEdAVskMSe_^ew~DqWaowVIfk|VFyWq8>011d6_w-Vi}pz9H6bEW9^%H zu4u7xDy$Z`Yg^A09jh-SaPjE;?baQQ304C8{VrX12wY5!vRdDiIJWTJ|2F=oSgrO@ zN#pT@Ri2cGHz4@LlC8I4Rl4>2v-o-25^i8jd5IGj2E&y%Z*DNv7T%2IAh`a(fEz@u<|v+&oFnILr)(+9Z2@)32K<+s6ccIE(1GvvtSD!~mXe2ZiGm z7M)pCgfY$=ts7O#+`z}Dbe9w&W<-pXwce=!A&ji9o(WzY32qQRdE&&|v;JI^y}wZo z+;iiGo54RJ#%#9JSaSi;20a{@ph;h=6bO22O}yo+c$av-Z!l1!@6A%K2zdQUFxYy> zinmo>Yf_#re7@#UGxuE`{@e;(xv!hvMy67|va{0?ke`TilB&$WVR4q2j z7|osSM=xnKrkAt?a>7SoJ8%?!SVUtLLGQDKrlM5C<%IaPn{h;mrHN)s4P*k3%&711MsUiL~NeTIJc zs1)g4O`w=88->1)!L8*Ut(2p}d~QeJY#DevC>fgArSAd8$2^RxI?Qak8~yg>_ktoB zsjjp{CHoTYyI8821BVz7>!$Y~JQ$rrSC*BRf6~wp_B~CD&2Ebe%|vYIDTBM7uVHou zb&jf~bHLnuEs0Ih`aYdsXZOZ~B~of?qPkb{j;rlTxOpWdCnhEma;89Xd&5+V$6^W` zgwVyBYO~pK|NqZp!513&aT@wtd}Q~_m&!=?QA^PYgpB3u=qL#BVP+o&H)T6l1qH|zn!f#}w%pA_0l@@}*h8-ni$BYVB+8AMGD zcv2yi$Li0q6$>BVp`x;yctZ+oV6=YnMqX$HzF+nUm-mqf7Rphf%Q3jKNcdamI#3ph8H_qr z>3HDOfq@G^oYJKf#^FK*iQH_Ok6)t!?LN#U7#fjClB-$p6(BL@rWRAxsb9b7n#{WF z7el{%`GV>@jM5~RmR3Q@yhAvdcS(gT4L#v;j#_>AN-f+?nwquXNijJLl6iHJH3(80 z42NeJLrwL_#UC+7g_qY*k4o-5+tu70(KrZE2w?F8*rM&UtsEY-U5-_-P>^MGSO%m{ z*x4ajf_UoLCN6IxLkbOHzl@JJf~pQ&9fD2Kn#Ag)vG+JG_$wu|CRLiyIzwcGZPim1v~VA&SJeY{3jJ}CXgMV_8?lQR+c4qZ zPI;1-pWood((eB@>)}Ik)JH{G+2c?bBGIQdSSf}J&$jBp1LL%;>Z4N2R`|lc#+7{fR}y?8>^yq8w{CGoH8pju*J=tp zbowS{qTa#Q{IbRCG{z z0IbghECQMBRpMBA`5Ifqp>h|C%9HO0YUPNWGf18)v}Pclh%D?tYYlzF%eg%cSv55u zpqhrX0m5#oZWE*m&p&x1)iaD=hr5m>x_WhF&ZRK#zv%NCyoLPQ!PZth<*)m2ADZJG zXZ5w-ehzt)z;DmN&Hei2%f!!ArsCyCpFrE|sjW6yhL|MLZ7u>2wbQ?`B^hI{o%|VU z+py|H1D$=+1$fIDJjOdwFCnX&e&c0cgF*mbF7ZO-Hg1eqVQ*x_gUuct#t0H)%3p&% z-~ZGpbQle)esc2igFk1evtK(Aumbt3r}=l{_$l2bk-&O+0$|SYRy!kX2k^>h_lWu* zA6*CyXkEy1>l8(u2#B+Y7K5sgk2V&erZ1m7(VKRBz%08I$2n?-G!J+P{)qdq?{d^Y znh;rVKl=Pfk9H2HYy)cd{JOfOrKQk~+HY4v4-SNs13m#bYoy?Y@MQ_QRv@792+{xeI!sCm^~xRK4R3Twyv90BgBEfIty@2;b2Xkoaq0-beuQVV>fKNe*HS6o7fPZ@zEXt#c?`P^}8A|>u1)Cmn;gPOg2N) zLbtgM(=PG*r>yd1wr#VpvC)L3r10LmSFbk7%d_h?GkdJznp0jJX6DE70=EM|M1UMu zcis|My&Awjm&j%vV`C+72fttLH#39is!K&W*fSc~uX;S6^cLj@MEb>;D{K~IRYz&y)md~YmhKd!ye z>Rv8>AXm`NqlXT`%kbne-xf?}M-;bfYD&rqdn5Ao@ZrOF@hnU82jI1_sXTeTy7~;h zJ&b)3vUy8WPn9?{AAavCZSC)uLo0=KQm+VHId z5ZMVpgv6pl`vD%*GfCXG`^(T!_RX6}>4=1&a&RlJE__Rd(WpXKXF1HDqa`Gg#-8h3 zCvus8J?JEIDKs=Bf>0nq*sno6-wW;A= z$JjX^qdf03G+bwV19j|zk739~J5W7fSkXQqu~0%nVw!i#7rL5=Vq>kYmw+l^zYfcx zlcfC>UPo5>ji| zO2rhyY>=MV3(JS_L#eyi5jQ*m98l}!gjpcd!tesguz|k5XBxYwZA4)h7*h15AHRG# zjn!gR{P&f}^olb(B(Ttqf$UQLtbA{li$dQ^+uchtxk=mj26P`t<^l3Iu-K?b17D7V zzfx9hF})I6-G##sFAerRZ0wNETpwl1jV;G{3?oETz0yJuk-hOy8Jlj5nrhH|Ra_ir z{9-nH!tzohq_OVk4)a- z(j_PJ{-o6jv?Ms*^M2}gk&}|ZamyVvGrNaj4)sQE9`263y6@p6u1idz%XkiNRtN+Q z2)ZXa$1Hp_5N*chM_Y4V1IQc9>3Z`92E>x4n-~1>Z*7CjQg9Kp@$$Y%_`4vBFMFd6!R;#JqK9X_723Fef>tt)k{ zxbnL&UO-&~`3}8$jUe3`VZ`+9MfdLU$wupBo(@sa_PhCoH*Xm&5k~fJGxJc;VH=iJ zR*ra5vBxwZ+h`E#M*5<4*$GoDHONb1e4YV-Pip>&A`Lfe+ib`A3NTN{;8OvKF*ceE zp~}hNlvv~*fK}}VO!DCo$|>{-*{sSZQ7}*Kx639e3*%gV%;45%qhyBAJqtMlJMROd_p@RZt_Lg9&Crb{QQt;bm@IdDPkU>cSIwl zi0%c3r5IwDE9v({0*waPhl&cmtVLDE!Om`Z=^lK-vs?&$&*$h9uL|D<$I8(PH<4>^ zdXu)KE<*dI%ad~O<|>8=ATX~ahKR6(tv$DT3Ussy39zkmoHKe1@m$Y+-}M5Mc%9VN zGISC=DLb7w<|)V#-R$CXibbOh%B#~WQZ`sg2l5|>V#Q`h3EY_L)~>BvGe}x3EDW2q z^*mc4@f*?A;4u6+gsB#cbD0`DMaMMh7PqOTWo5t&*vevkn{yn@VT$^vQcb|u4lvB* zH*7e9r08cKu}BH$LHdC$;8{Yrg?W~!T5DJ4grfW3d}FJQ@bWc=Zf;|Ul|bqx6!d@= z8g6_KPb|^2KYm;`<8uyll#qJGZ!SPVTD$@Y+W>tGnjV}Fk}d_{;Preosv$o9;$f<# zRgc~lNb~dO&jDGBJd9{Xt38;E>3xb1uZ>JTPds*^wysFhjTewiu=De#?{`AOLr9ul z_2c_KsjpwIn`{D2x8VRYQz)`6VF&X(v=abQ!qZP5s(n~xqjTy0%$bMF7!*04O7}jN z-z8Y5i1Xk|#!evfM7i&b26=`v74JMhe_i_JP_#C*^7A=h(w>HAjlpF?r3isJ5z!3V z@r-rt(PPEmz6T#iLj>~x%mD~(fysLP0_!ZsTCmnb`1VYl&eRZy9EONv?uK_61&BR7 zmY;moZl>2~{ApaJdCJSr67n+x0j~`g{8NkT)zS`;mdaf#r>#k7)u~V0B-Oz*U}bb{=F* zLmdG6tY!EI6q}b*Q{{R#+UlMc&N|!bpRF_c?VH`YJz^A!&f?n-ksnt!nUtAp1 z^#g|guhTt8p-@n>TTPP*gBJD<&6;=h>UB{Ma^yM4pY3XRGS(Q8({r+PIC0_xu1@GenFHO6{l{h;FZn7D)lEXxn;0V3yk&xH z_+K?THs(&N%i1X?#{ec#^gbsLky%*7p1=MA6K04$rJZ8=d=G^hJ|OyL+TfN$E%jwc z2J3@QJ!xhW?<+tjpmhGj8)WKIOK@mYXpx9~evzTMgcoLgN|KI%X$KdV*c@-uo!CiWeZx`!v<<*`@2n25MX2l{nOY_&11M^rjbZ zVg_hvN=Xd>K*(0wN#f@x9EO+`ppEgYeg{M?ZtYt)(gyk;^w_r(dv=6l zGyntzBd=%J-a!~(l#N+#siEoxF57XDa|e@IYN83nMysVt1GLbQnA1!ouOefA}lU`A;&Ph(h)XG<*T8M z+qtZ+-;Ta|@gk*-973`Re{~F>2V{h@x$(YA6XLbdNxshg$O31RvTpq=KQzojL}OcY zGcRvteiyVE1cqMjnsRA%It~eC_27R1k~~cO^`xvi`vhgesfV{#dGB7B8KE`>f0R=7 z`v}f<#6+J_Wa+_2R$?|nl%2r27im#Zy0Blxw>1M%81Nap)o~y?Jv|-D6I^VNo0pd* z@X!>OXzbDw!E8fdaydC`$eaUGAA-g&0iY3nX_82qd4Nt0&9ctJ)M%1+Zfmv*v*7^X z4YWBHQ}M#rk*d%J4X|e!p~l4oL$+8k8TFvLx}NzYB2CZ^Q=i)_SJT(1S-U;iAh+L6 zyZzFZx)=wHN-$_dypMt=vA@cTHuRl#6CEjP(=cO9>tZ$RgE(6!cRqp*ym1gfqJ-?n zl`NxBXbm#57iPZ|qYy_F?(Vy04?)O0ezm)V%qpCWs}No8T@8C(NqO0gi4|hOa3XXK z`kB%f5LPXLR2kwN6mxT??RxG6$^Y>eWM9{&qwvBZJ%`$BM5C=R+|>W*TonK4^3f}J z*p2EXSnBc~9g328dCHw|*r0NKh4b|oPL5LxMVrBnqucm~OKt$MgolNtS}!?5ZH?}< z*;E8nM`d|=MA1F>n45C!?CfZls0zBkZ?T`YY~hy`xQ&By!v?O=CCnjUr_i@C^8HY? zaqy7E5zqiYMB&JS3y=VuoAs+?biUi(S$QtDO{r}Un;rXsG76%9AX#AC*!UKu0$nC~ z{4rD@a0W-@i~_51L({%P0$wF&41CSbDzMI9Nl6ik=lcrbTLafu;!M^$xB(au-WD2P z^w_I-cxul4To=U551~z8#yO?^D3qbMHxUq_fCDBhV2iD$GqeAR5_-;Xi=UT3XN4kx zZ;!)-y3rqtmuTI(x{e?V@u6}=#y(o~O9-ye=3XFPz=~K88my-K089-2l-(VArVk=9 zud+4h7eFzzn*Ij$UJ8*qLWt1?uJ_!1UlC{Z%5@J${f;Kw+NPocsU1QZAK|OZtE5hMk3;B(>reP8@ZQK4mS7vvEYn^3-@OaK{`bDQt440;&g<7>jhC)zX#bWbv86s5 zinQ|b@+t{L4+qwdc|^bg!9ZB<(51m8G`Yp#&{`dLVBWS^f<)yp-0vVJy?HYq&wWhP z)GQ-}hoiD_k;ip#!r@S&=ZpfHn~Z1R)`i(qQm4lQp=8Xju30dn^;kd^G|kz7TVu&P^%aWb=auAwkAs?W+F)W~b0710qirt7tIpw#!FW?xgVC>J>L%k)O(6C}f z+`4a-UtH{AYbzChFZ|lIKJ+nzQRqd${BTa>6u!-Wg44q;+I7JuRfyeED-D!+y82l< zTfMmp?3u*lKX0~6EE@^XaJr(pqfMn21Q9kHWNeYgn^5~;?75KhaXT7lU>#8+)!3T? zd#M|bDW~{)duPX;6RCtQ6-lH6a;D_B)tQE)5W2y!5}+3QNR^4K>+ndK5Q0`6QFMJH z124lCvYD587L>yHal{zKzrohtehQ>o_Y!_P_{Rzyd`!l&Gt)nE@X9!F7`8Mw*WZkI z!naXO>*Fm5)S*jfFxUt(Ky~UQ`XW?>H!$?vwb3Yv`eI-nG`mCT>-z`W-y0OVnp|;v ze;-&~Fj`n|^+3^lA~-hB;<~UuPHEUuQ;wBDcebjbu`x?=Gd3+J=L4vmg6PsmP;gN~ zscnf+d?HN&|F6o8M|xV|4}Ia!&fAP$7C)fJuPiKw2CSnm<<}(3pKov4J3IHA%4Ykk zwACBzM01A|ADaz@?fHubiKJu!KE8XnCxw75K0hw!hFf_ngt#z_A!Q^y?tN>P`SOzxZl1Y5@No5NzKSe@VN&{Qbxaao05`}l+>>M`?tu+MUT>? zT|LLB)$3v(dn#eqrhzVE)QysAzYi zASb6ZU^Mz6kE-O8LB#~XknYA>?3 zpWbQzu=YBns{1@Wi77ZCp&gPsuR{T7$fEQrvK3_y*eM^V^ShCl7{afkLL`vz@!4Z0 zadKMC%liYUz&zW5`vEie0Mp0oFLyClOu3oZIVIzXsSRfz+3_j@i(`WJyI3JRH(X0h zSA7bSb1Y?U0NonbtFSub&Mkk6$pd^T=e3)am49)Vc&3ro)g?ItYW-LG6LrPx(Y*uF zB8EHmshOwJULHYrA>f43nJgn6-g6kyml^G#Pws7%wQl@L06gx3hUHA1t~i;=GKRR5 zVYPV-rVbQ6jCiK^?VV8W5tGtn8UqOgRaLd8w^w^1#>Wj8XqioSc_ul+(q#LM=nXuz zzXSKjACu`e{|G*2hlJ9G4TjyWG+fBA?_Ojtj!lf(q;D^^Qg8@g#{2Mk|Hg(Fa1$Zh(9QA@$*eB3n zjlC+-Rjx7TF!jMdISd0HmVb1YLti9d)zBxpz`KnS`yrigf-YINwh0ga%z9QLjX0Ep z$YVnpfjg13AfX6sl*o_yc3zmZIT>pb7zhTKafk&WwSd0pZb=F8J3urA(al2l0hLcm zNg1Y7ZBvNM>9IxlXsN9yu_D4LY`Q%z1YT=%@(*IikjBwq`l`;3$uAvuZs9B+BJkGM|W)V|yt232e`PQUh-!+!Q8w^uwtGXM}B@JHeV!srDjF41X=cNoTePiP&UK-qk z(1Bp-pb44gZTpH&3?Uj+g?Nx`e?V#J@4su*b(}pocnN?8*>OE15$qD2PGS7s0=N|u zogi6I#BJ^54KWcBbl@vSQkPwmUelU{gB&ODDl+wDOUpV{)vGOL(vR?NJJTvnxMZ$?E&CzC7eFh?@+M@(l>_` z;WA5B9!YAwJ;n|YvK#@=O|Ae57SnCUMOI8vV{rSmc{|6B1fYdFvjl zBAg0<1oq-U`lXPw7WV|X1ziE413#`atwt+GFr`yPtVFCOda7$zuf7Ku2;K*nn3z~x zju|d9iI@?7*oz;b=EfUy=Je^u_H%3Lj4fBoD8P_Z@MF~kO#~(t7obwVGH@+c)#vlB zod6_NmF?cc2#ZD(p0Pv5?tz%kQ=VE3b{K`yW%)Y2#rQ%-rO}<@0up0m=o>|2cfK|G z5pK>ic8xE+=>~M8!#`k#NV>oF=hPH#c2J7HCqe!S+#x^FG6;naDB>_f0Ni`?$WL6@ zk}7(SxT1kTE*u|2DU7K`6?%WXx|i?H?;U^=6opQ|ELw>|Ur6ds->2N z44Lom)yWHda_2#Dh3C+nO>V+jEBd(@OArix=v8tcMT%3|ei!@31^G^wl`1bV0Ip{uy6U}f(2ULtIuL{{2wFe!1afx z#lwwRY!IO)$mYN)72z{vpr_~M?cHFy2Mj;vEZ{(sP{LAmAE(r%Z`rhoR~Oi{8CNC6 zHnM*BhlEBzG=HuSSQ$QnRVbIroCWGxZTsL=diDWeWh)}VWHv5)`>ek|7>UnReKlN{ zatggM0Wge?!oKH#-g-)DR6r4eX$*a6IY4$GNd4Kh?Zzt=JtFmOp^Ksx?I6@A9v(^u zIQ7m>#$U%X(M6s^;v6IW={_D=_ngI@%!t`HwPfLRVbKULphMgHRL?Y97pvU73{ zdsm}T0CT!HA-T6{#bpmQy(&(VD@SSqdcf>fRASAxHZ{QyBC5G}0U%>zUs%pKZl&X3 z(L*+voPd{+D&+B=)O4bV)6F1BsNNye^W?>g?{Fi;Zzg=ule&njv>4*w3HadCpBrp4 zva(Rjp>g;KV2!G~R~<740uFW%k6uUEm4;`EZcMMFW%*SMFb>;aK8JQnaecyj>Fb+YT=U#`BLVAsJ2ne`P71sW8>q9HxAFy$7Mdv_4RgWA*qxV z2<1l!sWNb|+2<3kX1W80DkSQ-RC2 z+>^ihLb-+m3$6Gdb0^N4PXU`vQ54+hP(c_^~;}o>BeT zjdLQaSF@;(VNCWik^um;dnkORIwJV{y1Q5ZmP1Hx?_zP)%Tizo03ItZJZ?o#h^sV_ zj=b?~1^|%tZdI$cKU_jG^HybL%vt9MIF-K~#2t~FM7)VjdVP^3MWuqSn||xxp>xQq zR*`V}2&A2=pSrBmDpd!$z+~AP=GZ6b$DLhWu?+VAHsVa-Qu~&L$`8H(2e7M{IX@~a zQs=ILxsPfTfx8T=oG~Z0T3!7CM2?xZxTOR%E1(YOe&Wm&8j;D;>~|G|16ukl1J1lL zY3rH*5x=|LOE?kV;2iJ!_({L&WL~sC2{%N6$E6lYDL;t2Z~-18DC>MHXUH6N{<_#y2W6wa!%e7AY=)2sm%%HY=y#BxYqTfU_-KY6#aFT${#1%6%gcjT5Oym8C)bhONY)uC?U@HpYk>VL4!kZ>jDN zZ)`#%?`i%D9hzNVxW6}AaT~XMl5gUT8z-Hd^7Xev+{p)} z4W14b#o*6F)!vRm{jsgBt%ws5*G=w5#j(4kinPahuI6e;Q17J+N`L^ru?}u$`l$2KOVHaMX`>>`3 zXX_0@xn}Yc7r~ZoTSPN=I>^i4g#|3wg@s$CxHu3N>a)LH$c~qx;>67usbzIq`k@nR zO{<6-T6g!*NzQvgb~ervYQdzv4!D`ktUKLPnbdun&N~@IO!&H2e*&Dq8%56w^(lC* z^u-<4fY=a`v%^TP0G&|7LBeN#|GpXokYM6Ch5-pGC(j_qH}8m;%^uvoq%?HW;_%^X zNl9A))AB@ZfyFX4)Bk`INggNo+D)6h5Hf1Q`3QtafrUU<2VegiD=P%aULdRT#ZF2F z&%bJHTv5<`_jtR+X+8|8=-3B^Dj=x|8Q#EEU9x{z_o=0i&zAWM7>l9CsINB8u&ukQ z;w~D0&)CUn9oH#THBdrzs@W0{lrb}DMez%3v++jI-J=mz1+-1AHB}yFDb&;^Q?I&* z0R(TNzm{oRX+X`LU~D+7UYsfzi+hXbHL-(I^$e;{>@XO^{%#RPIYbM_kcfEC3V?yZ z0q*9!zHdL>g%cdD5^jF>`_{d$?Xa8nMJ{y@&;#dwec=@scZa!Pe9POy zA2h*_sH|W^@Q>scfx+Y7&XICO)946y8XrQsX=n(5|(Xy?}Mwzm*)NuF!<@yI3P-R%IsmoofN{=LxULF zu{5W(=*|f0K^tjuu&A#&1jDPz*FnaVD+2uD;~@tuz`}pdVKOb*_4Z}t3f2F3iqy;U zVbzNlL3H9hA1Y5GNsH+9lE9aM2RLw7=9j?wO`BdleaflZycMn1KXU?aCwg_Xg`Vbr z&_dgC@K{`liP6J3ha;F7!!gvxwg(hGZByqM85`p|_WW;uaPjTi3ysH39?>>|MqxTItTl5Nx16>+Kz0Jk980^YqHkr00+P2JxY2K&M}EDwlpL2$~b zUA=rA5SQ8m2g{ST$5J;tawN3Ee~#O?b{gv0DPdMN88xDpm;1WvO~Sk~{;rx|GfW)w z9`22kBC!qlYKyGlLLvurS-cto*x$|_xoAyarD#qu=k-G94D!}Cnv6I&cM^L&CuOmhDG ze0*sCspNQwH*q8v&PfGBqe@r(j_OLc;O@FyboWr~7wA%veV33^#P+8cVhMJo{ApNI zAp-ibrs*!OuRVPOdl%9L+L`m68vv>iGajZ%nySzbks$gMdV;CnzauE8(0jG{`m$eq zh8}}xvOz6Q$=Fv?V4mFRIk1Z?V-{{!@U3}L2UTEvWF#s_XR9*%XTUNxtUM~=1V*LS zcuRmoc`&)7K~zG8Pi9;P5bZACaK1&wr4Ja%K|}3Y8mZOfu5LWsj~67)E#+d^?%j?Df(0h zoeJd9xOovGC9~LTj;F@a9Rq1m-nI?l-8NqFjMc)K=C*M%=nmexM?*$CK{Q4&UABvq zzz78IucD#xObk-LSPJ^tVXi2Ar*sklwAV494?yC5>)nDD>^n{|zB(97*SiRi9=}D zu8;>0)RStIJwUj?{U4#tm^-dHKN%hov2yIlwEuxSxtC2vQvL`2q=A-e!I|FG^T@#5 z0-Qx@>6Ul}o&aXJb?fFr-0C)Y0RyHg@W3KUzO;racgy<0fw1+}M2ynH-+p9nkY$tj z$A~CId!{kyxP$aIk`~dE<{Au@Q=mEfnjS3D^XT06H8#yipMHug+$)@X zZ(yE;TFc9v+4D;v1kEz0z8Kh0r?KpHe$eU6sSd->!}h`^+Sc02x~GJvwh3FBP&CFD zmErZuu3HD~eBJs`4iOwEoSr98cA6$~*f6`VWU4Q+xM?D5bOueo`@1SrV2kmkqket~ zYL8#Mh}>9l_bxAJx?CT8J>>nJ8EjYy$R6*zq`bV@GzO+`Vy4j58v(3=BTME1D`B+x z@$NF?>g1&7Lo3n`0>~>W28#i)+wi;NA3l6AVHswODkv#o0J#20Y>L%l(}!PAQtGZn z_UrPVK7AVSBe=j1uU}gvztD`>cb3S;Q{1kmmUf`nd=oSPggy(b=XL%Z?&Z7Fa8a7L zssr=q^;ksqA2&pzaocxwGl;hlZEfwf>ViZ3<&?dSpD=R%aMEhR{8x7Jy&7`G;I;8_ zhY8lf(Q%M5+FN=&flw?c@RU>5N?8$X_T~*6Zb2MbAGQ+6KHB@n1QVyp5muXUyE3j97aBt5T+u#48=!ViHiL`=hp zmr|H2%11Y1D`Zz6`Hue16DY14Wsg|H`}YeN9WYd`=;X<-A%i2kYu11I2yqz{E>Lv? zx?N5&53k~!C||I4q3z|0msj?|23#15u?3P0Ib)saYgF3IoSep@in!hiO0EbQ-vcC^ z2qxb}511);Ng-_0J0| z0Op1CkE#HB6FIfK#D(Lj(d6oSv45&nl1SPqJ{g(rk!T4+cQ*FAxzAs}s-mj${;dJ# z4yBd2uEiTn0kFv>g&r$VZ@1v41F}r5>_YDg=@cXE zKm*Q&5MNx=6^T>r-?IUTGCCQ(H`USIJrMe2ax!soG-81utMCk)U$=d`<*b3FPWm;z zZoZ&)Rp6R~tNwdH$P;e|4*dEx5q?hnD}zl#`Z2{VxUAgupMf>)EY=g5y!IpwVvT>|K9gxB z3k&|#5F;0gjKh|F>&7FZq6}(QYk-l9wrx5f2nonWiS4#`looC`G^J(nt-iE4 z<>5j*b^GDNch8@@BN_zVRio-FAO#2<9Ad=Y$31QM`CdsYyZ-m~vP(+9NUYNB3mGQz z3m}EV^m@UkXb9&aUOVa>)CmGFcq{Io`+H}W>kVed=&r~RGk45xz=N0Z+(&7lDnwuw z3Jwk?%VZ=ZfncR7Xz-5Y)608=L}lhW_Ceu97U;N@nYK*)z!sD*w%9kE8||B6?i%y z|LVBVu%o;Q8G*h-BgmclpF*~Lkk9P)|9H{^6o8!_y50Z23%%b*-)ClfIYkZX&Q*ft zp`lvH4}K9Nk0QKd2U*v`bpQU#H*S2u!}khX*;UF-`7|K4Ppsj^3q@#3b^AgWI4iEg z!8=Ij2wkv8LE0g)DpKIIrB_C(NH8G`&t#v| zq`ioWoA8=%{;|~UG8<&ONA|)$e0J(%3;YVxvbo4xl^_}_7b3d> zOjo06Os1Z!DGiDX3p2AJC;?qvuMskhOYlM<5++J*tC`)~Hys@c!1LJsO!`#lTXUUh zbzUw|?ofO4&=l_6N!%zmNN-a;`-`zlYV!AQ2!B+JM69a1Q#OWkrT>Cx|9)EqyXdJ(@&U;Z==5{h|$JQ2JqUbLilf| z#IUtdUm$I2GL_x}y4i;4r1X!2ppri7Sj2;LUq9SN_rGs|{0}H$F;Y3aDnW2n5iT`~ zFOn8c0d*FVb4v@y+TPB{Wf*Vo2n72Q7nbAZ%=WZxaXp<1J@TlU04CnOn@86~)fI3- zyYY|o)@GZzu-Bap+`roRz8koG7lk}Jr=uF50v+FV5@jnUO*G%b3>~1?fNBAq zH)v0D!i~(2u-%(Had#Rcn+81^XM&*D5^$)rW-4E7!eDF;5X-n1Ofrx}8? zBwj`|ItJTZd#@;y**r*@b%R)eTI3BSj#beNG#Jcv?mQ-m~6UcFU|&E#tlwe1o<5V4$|Nk|5uESK*iVJHR*QE%!6 zI?T$_5-PIm)U0ev1tXTDon2^- z8M5m>l)DPW7hP>@m(|2yMtTB>+S!}MHMo#Ub=FGb{a`u#cBZ=ZDCM_f5NHmxV`2a(4 zF%d-kh3$X!AP@q4h7>2$nZA^JM{vROE-72TSN7V`ZKOi(SBuCI07`<$6OoJn$l`19 zCq}mjeKmnrADM9R`ueNgqwn3jm!6&uu6>v_=2m0R3>N2}1c?PAEQCl2LfYPTtR-;m zc13Z)cN+^_jHgQ~aH)(!@Y1vK!+bUYy^5!Pqn~=1>!1Ks_Y8o-~|Xi ztd3?Fc)*c2d1pxOshqV~7&ncc({fW3y zyc^uNFb(n%cy%?>!qD3d4-NIso3-|8T13%T5u%G19;gp$0bFa`qk#~U&B(u&2ubf$2dYAq}ij?n}E2O=K!9;I&hB z|L-{G{yksECS2yGYG&s-BJKw*BC`(5b{DR&YaGw0Zmh)JyU&1ePLCk(Oj=R#`%RZ^kueG87`HLIY6l$UP{kDDiQf?J1i^{J$+ zc`qok3+K-hshb+I;U&7r^aIrJ>;dMS>_7jl32BsPc*Us-+bk{L5ZpaOZ3fHks+wGE zUnEYIg^Z<{3|OaN&JxzYg*wDGJRavV*bX2QOFgpsH*{=ZPGy`ltS=H2PIL zZLi}&D+`WDmflf&=Z@n1vS7{~dUMCWPu@<2m;1ae=MpG!0gV3z&Y$l$%gv13JRl|i^G~K9v?Os z`rd8#+r)EflbY`)ojR8J=iB?sj)osNcyJomyAgqC#MAA7xXn1yG9IVr)29%?5ts2C zJGSOUp(1q%b^Z{pvjv{%@;j=u9>Py(wnH%ll)w46a94*;v3%yh^G)@rKXTjbf=7c3 zZcj$|M-CtFO`^mu%$gfji#t%!b783gVrgpJ>yf|IWSAV)z`Q{=_ z6cN@eb{v9z%Ix9C=bqXub|9M>Z@3^ozasz8QZvrOx`050uHP=i*~ONjpCt6tTFgYY ztfjA#(p#%-dPA};q1$clxa-n^x{RL%nhWguJ>C6C2XkF|u~)CWkA!X>5E)t{`2H}T znWzjfTtd?(;e?^KX$)0RkgCjm&Ze)v3BmU+L4)nr$4jlxudqIElx!m^jby*UFJI)& zH@3{%Ed3^~j|GdTOioa_Fjz@h6{&-PujRa$X`d7l9M|zP!~$%K7kg9_|YAFcVRse ze;AX6I%hUbratR0bB~Vhh$MZQ0R-PWJN>7w_12&!z|O5aEtF}KEk!Z(7s z7&ov)`slD=l&V7~@_PEFlJ?%H|F*FrIDz+-9+L6;#3jW*o}4_WSrUk35+yuOd>5U? zw5bW2xM0!MT?uWe`mgo3m-_tneDJ-CcoX~7l$-*-ng*V4es_-=-sD-j)1A>*qwSSZ zyHHvlyq@09ZgJ|H2O|Cheq85G(XXI@l0ZTM8VJD$@#kS_bxF~~(b8dAG(|yJ$niP1 zsDGtuPF9C^T6bx#c1ax-5Gxfae%l1Hu~>YBDGO28l9L^sonvyiR55)ewC-=@{|xC0 zjYb0)gLJ)*o^R}iK7aOuXCD|pqwQ$X zYXT18cSrrG(5wuO7}5wn-u`a3b}}uZAfi#H^miq$cIcZ`+_by7mxS?GSxbnG*{#Yy z+$A`Q<#$3&O%M?zQgW%voo!IY^h-9^AnT!?8I8VX>dWRY<~`rp`-pHDNU`~Nd0*iK zvdhVe3yzaVNx}IA#*6Ym&`=U?Lk9-|35h$Auc>3i^Zi^;GXB;Jv`8zORzpB{dWYes zQt(7uwhRiXKdj#KZHeRfN99s_25$cWV?bX;MGp#l4M2cFy@3o3sC&>E;8M3<&168V zwTr)%PA}W>1nt%$(!I(wZhukAw>TbbIcIrL{_{`&KhJJ2tq*vPCAhFSHvvvzpg7Wh zE_ms0aP91PhhRR#OpHiZ#Z>19`?!))t7>)lA>N_`*fWG7&NRhpb(1?S1a^%j1gj&< zMtLkyIkn6t`(9&UMVwb=+^fQG6ebkH(Qn1INDGD*&pFUgXleDDTo2kZG_h2tK{+4W z<54-`;4x6dJdLlW*mR;+$VM_eUJfO8nB^-@K(VkhonAjbdg;h~{y0a%i?~7W@uT_nYT) z|D%h4I(iAFI^ePcTX**hISLLSK25F+%N^E(U3s>m8$f9vpurt@j! zuFz3-9mK7Em`!bav9xiIaz~`6w>R!V-VwjK2q-OFis+_cub~#OVE%?}f3OqT{4J^t ziFiRb`>xxwQ>h!xz2_131#GUT|hAV+8nXNM{s8& zrXOWW3%dmfb5%gRWI;*1ZYabNl>7%61M2ko*w|Ubi~`r54A0FObB{*%#lg(<_EL`u z&#te8IG$`F2+wmq;6^Js}aJ{JM zI=Bn5t_eeji7%vK9T{Ks-Py8p-J>D?Tf0=CMxEVspM37=?hd4Zml9yCM>u zn4FmTZHqqQV%wY|piu7{8klsOkxiI=iWn8HU=Bdw3zC!5kMY0V3GYK#jsgxF^b#v| zdp5$0=`s7!0B@&`=Vn%xskQY5{(HQ!y7+n1#2VNYw6K4vqzvX6$@( z_oOdeR=DR{ScM4dL(UlX%TClfKm%Q7|4PzZPM**sij`(6BdnN9Ma9HmEE=Ng_f`8K z^F%g2HoTXlgUmyq903}^P$6QSQCl}Rs51{T^iP8mj%vh2ThHDd@&<|S)o=~~xdp09 z*xCVmf;l+dDQnYUE@ls8p>}?X6GmgW>9DkHU)HhvLNr!|*e$#h1iT))ZzhU34t;-V zVT#V4|9T7;ER4*i11w8xnC1v3s8iq@&n}34l8hNg^v-u+6}gwl|~@aNY@I)P&?rnH@T$ z24EkyJC(mLJ`P}lgTxoc81M~^s?wsVr@W}?|5U_uM#g`A0V0k>Ql19>i>q9oFN#NQ zq15u7%Wwjf+tb_uuW`(S289aPi>}sI-I#U3AVM;5r*(W0GnOj?%?B7^yX>w)(kd)` z0@Q=8tujgtU}c^ZN+t>bfC)0hX28hc2!?!cl8I%N#1SMM`nasa{mQr$0HINh@96;- zAy&S31*#T-_<=rQpo$J|O1NVc`5Z8Uh^JgRF(auLK8|6(w zA+~iGo%101!T9?JLi+Y(iy(j%lU{Q355MCJV^p<1J=BP;kMsrnogd*r2>_#Pi@3c& z3sbiaGv2{9g`_Ef%fs|E1l(RvNJz-aqMVzJ(f>G59}c*i=p{7&D@^LCwMeSLRW#Uw zgdXG$zocXWzrsc-DJg6$-QCDwya#9ZL#vWT5Z92^W<54r4B_lBFo0M-UO^j?FA1#%gj4Ug5AK%0 z6@^Ml#b`~^*IWlb0dO3T7NH2HTmF4R6DA-(@DE3-$`~mV%2a3qU3?F2l#_ee-5rD_ zZxn6Y94m;J@m3$>Y!z%p^mIR*>Y1R43c2qH$PE9X7H}a&8thDTPFs6J1AOc&bREB< zk3{E0hG*aYZDf}&lLODp8J1Py?0!X|NH*m(vp^< zR2m8;Gb5!vR7OMzMJaofghYFjWMoHDR-w#{Hp)(3w)D)*h!8^byI*>LZ@=&Nzt6|* z{o;9D*W+=H<2;Vz4BIO`f7{n*c`yUT2y3d+gY2~xm#-er4h^aqyi*N3M!p3w6fCX+ z;*U|S2fo~GDG^iCN2;Gfj)I*MR7*OC%m2NJse{^MjggDuplx6V)Evm!rDej!!*1TJ z6Z=>XoQg_Dn0sO_z2vERV0C&)g}eEuUH;zO#T^K~cQM!6gF@pG)Yr{?na* z3~M5;F|b_gx$`94)iCD*6AKC|cmzZ6+*3>nSh!FNhm*%1YEPSBV_e=4a(Du@L5)^RQzPJ0 zkkZ+03XN#eyMCn;_L2oLW{!z9jj_!d600ulk(kv5sF|5{*SpE@V`C*4b~Kso`9!si+dqss=RpB^tKwY8VG zzus)w$IXy@Xj;Lyb!8kc38oHo438fNNAH8N?d0n#(EZG7x&x77Kmk$z_3gb-4t=%f z3~k=%4oGruhS+?w4OLxNgzLaB8BhwVaZQ*L)JkN*Fl>CZ^2H5Yn5? zxj~44fFqQcab|nJe_z2M^B#ciEC%|r@aSP^UfU!!K)4q ziq~(6Q&%ZRAHiEBN3%Byxd1-D_@hN{2w+R+(}Gi{F1_1;DJ)=x-|2qzzjOTjY)`+P zY-40)6+sK=oh!iBRD*vm7VnS>6AK#!fXP@dIJG$wc4zeL3X>RHzpfpJOf$V@ZP0=% zw1>R*LnlqQ7R?U|k~z6+`}SnXy3JUt4sNrn=q#FqfB#S_sz?A!Kt^(ieG;Ckm_k6>3W>!4bHpGTtt}eQ)Fl>%JeuyQ-i0Ad-4L~2qY=* z+#gJ(qd1Qzh1Uh~Nd2ae*9399&MBVPCQO|=f60=MZ{Cc)@GGqqxg{i$mLEUnJ~KT0 zSF%6z+sDFSGTK(JK)PIB5=U@_rS3Oj4P#;uczeOaM~@znrELCO6rG||rS?NgLD&vA z>$Bstu=$Mjr+E>h(Cc}s58tXA`GVsFo~0Nj1VAx7TcLE{P^cLB&jo0vZGg;>ITR2dl= zxgnbXw+8{Jz}Ow0TJzRuuT7Mqf&$`=(ltMaq!sR=@tLsf!a|7{JToIwulbaH{!y^gDo_H}|EteoP|RS*1>Cc|x>w?0?%qmGFe(ToDeIavU7Q5diThjA zCOutU$+@_H%y%8%;^;L@VhT*k2q(-XNMKki`puam-35(Dx7FPm-)}?H@9uAX640nj>wWzAare)qlPbz4DZB~fYa+TfzcP!BiXUFSjQ5m? zDeKYk)5lP4KpeYtYTRU&LmoIVhb*o*Xp1miJ?B%NTZ)w*eIm?LLa&#HM{KrK+2zZ* z?N5frJAJz=jr=fAQhNZpNCP@5Dz8VpjIV;HDLZ z*a9)Bpc`HR=2yHZfU_?DZh6~P5n09xy?@vI{17Z@Y2{~mIzn2C$x~i3%S?wo=$Gg+ zTL9jNp9%a?a| zRr{-v6(cFGUGnqtEScrN55=L2c~F0h(R;%yB zVISMCUrHSQ&-26!+*~pLYPd3|c9N+oV-^A+xZtA~2g0x1ym|D;vct4mmCE^p(uYX- z3Kbq~)z)V=w$e)wyY<|LSW7|k8%LKZFaI}|Sk+(l-6^JJ^5-0RH(7F27>>k_DZA*Y z6n}tGjmINYErtH4PBL!Y?6rO6^5x^)+@42a+=*E$n_jOs1dC#^;liHgLsg|*d-5~M9JZ$2+be+KPls1}_*0)3@7Qavf(!=@K(QbM_8UQ5uaW56ub-|(X!6*1 zsVON_@wE4nlajFPc!>23nRddmYjr-kIxI}8Qz!q76e3K2DoQw+041iTrkDKfM*!B& zgU0Wzz82g&eATLPd#!!PPFml1nJ-BYrgJFUWRV4BjI&ng=1wm{@WWaP&6Ps;iCEN{ zo8zIqe#^s1H#nl7ek|rOcUrC-;1%V}EY@esuV3NXS%%G;Z*J^}^OTpgI*dEk%N?fk zi^7#o3G7A!yjuN{8K<)H^HY(m&vR6bu~pXZ2)HAtt%zM!N0R9w;|#x2Aefiwk<_S} zV14O(K0Gmd9q;$8-fDMt=W=pvqW-2z+KZPihC@DM#*9TJQnYgv8br93otK`u>jib6 zInr|$vi%Rwil;+|baux2;}tc$Fin?HIbT&-*$a=Kyt?$AJLUUm<`tLG*R1)f=`?2i zctNV^b$A^1NInpbM@x^olilf;;1|Saz`%IJ&4}HFg`Xh-6}ssudq?{v#B9-T9EZq( z&f?NpY4+r*wCn^Yxg`EeQg;E!QuAACq89RbP`z(#aq%A#^XGl>9}$J!zIE%$k%FNk zMlda0Mf&`peNq00q4!Xr2CtE=jGO4f^J}GC&L3k*Ccw(SvM`fcczsW+ogeP?k>X<# zGv57?7>uA6s#?{8f$V~gZa=hldYelkm+nj0sCP$ zSHzR18Z<10%8e8Ge?81RcXqR5liBM0i*_oe_;sImH$>1egki6zClm02Ju*8hW?X-@ z$s^*^=dWMarKFTEu!$bUlEFlylQ9NL!z z3H2{DIu{YI8BZ1Pi(+||%rOCd2D0BL!J-RNM z2o2=nAUo-gV|#8m3q@=1v#n!akvh5DRVK5ep9<_mzGg$PIb0lqsnu8yEvmEro2DNT z_UXF2yXSv*A%vp>@LGM7GL`m~k0E#*Z{3>c`D?=D$*e~cEXE|`KLNAvzW&NSui}XQ zaY3RHkAvwrvK`Qh~ktKIh~u1EHWimu;lBw?}fyGIyjtC zy4~8u6_us9|60~-ip)J4iOYN_yU19{u>y+~ssy<=;HP00Rn-7og7Z8mD~xcxU>?jL zB^vsJy#g26_!jI(8hWg^mz>QD@Tle-sw*X#Na>_XmGFl>hE5G$dU$UGgD~>}0~GU4 zWy6H4uAYZ>V9|_ig)*2437SfO|B1M62PnMaHKEuj6N*ENf3eC>vCYZg(unAX9NecMega?Iu((2*pi#A#_RpL#Nj4IXq@z24iw#HxAlAgRr3^6%7| zZJ1o*m!p_hEA}OZCjcAi`sImS*WPS}?~j^G_dhGrPUxf97!NRp&LgHUvFN%}mTHqw z3DtF0#w;wJGsQ=E`ub)PR4C7@&z!Ck7B3ZO2cLU1$n-fK#;BefGCNP~D!a(}-?d0( znvPZ3pg zxBb*Y)2He5{qb*-$L@G7`Dir@?Y)egxKqRgMo*kbuuSeP#d(7F#_j|1KpeKeTYBRP zl5NyT-c6l?uV#&@Sa3YXv>L7v7suSzNY9d`^5rI7C~gHaXrK4=R@%MR9Ay73)d1J3 z>9+55+npEjrIQ`?)EEm0mOJCd8b95Tb-QqPCy zm**Wlx_ITfsj97l0VGaxyfnw@WQ||&1V?}RuGpJbU zBEGV)TDPKN7Q{0uX!~uZG2Dsj`Gsz+nsgMzwTheXLXZvP5$5$9fMzXIfT&{^iq4@U z9@^bIw-0|i^T?iNa}@fo7?aTr90WaM%#4gjS9&p2rEV3XR%*xR8H)|>9sG)^!p-$8 z7=`cKzkkxysVR~k?9jV}A(W@*F!s1j{xjm?0Zp?x$!q$^CPHWaz+w&0jbC711ikd1m)NBQ5ZTo*-`I8c0I!GLXAjPo8oOs@R1AICDBCrIh$wyo=F zb!hPXJ^y=Jyt;cxnEw63{Sr)X8CmI9PPlLhAU=OxD1jc})4r?tj}03)-ocHK$=q+7 z$w$m6kx5#U^KN+P&O6_5QLgyLOZ1C~lN0Y*OX%bxVLxQ@z~Bwhgwm_Z16!vF}Ob0JNolNlx#N_cZalNhH1kHb))S#}7B7HNp_hp|r4@h;|L%)R;Li{cO z@f~!cxhgs6a4BGgr{_J~f<$5sBe+wB6E;)8;DB)I`b)4zc;G-|(r5dnsgy+Ay>~By z?VMh}$Fa}G+k5-bqoB{|5~qQ6wkF2T1Pl~`JK*K8epNr@LbCFEjA;RhvvG0QJOB9pJ@V`8E{f}R0+dY4ziHf`fN%75K|eeJJw3lc7&EtZ zoi=?sSiSuMZ>zg|7k5_yYb5*$?0%?kB_-BFC-^P9{8;+S4jMm{!-Z~*8_|BU^Wg8P zXFz$RVd`hHC>JaDgXLT%kIr-UQD1O}$UuDljofu(c$g`wOXSqAogi%?To>?#*B!~Q zw;62it@GZ@Ik7r8)g%OqVvXGGAv5T4?yq0&MsaNx4Vc!6%z{-ZAq9SecVB1Y+WpeI< zW!CC+cODIq>)!2qTbTHwRxUJAYd&OO8SA+~>}=?Q1twmaQWX>@b1=Am+D3tRvs(bB{J zJXI<=Q?ME~>2*@{np{jCw5nyo_@KC`NJHkA{j6Efp&b{PsPXLv4IT`#Q6(t&+Li-U80-Wz1IC^y4j2R{8&-aj_fpSp!*FaUGrs+~- zRXD&=>HcD(D`kFmkEwM=eqQzt4q#4{p93z>UXPSOa>+ODoO#(2Ix^8IKhNE4#KJ&- zLQOdp*Q)SvPSB7L?bFpUCj(J?-uoyl%Z`zhL0f6Atr$44d6e6qZ&}!MGhE_asZ{6v zE-fBUu+7Ws=anGwFQ2EA(Qjq3{2zA|P3Y9L3uDz>WX zLO`KX5aO^n)Pk8DDXgTZu!)E zr2Z9t6#41jCm9CBbaS<{i}CqredpOTHiF42byjQ*c*+JGhIG*rReh`_>`o_(9og7sy?;!Y}HFGJu4?=FVj(?bmHN-R3P(};v0n# z1Xt~jmpoLZ@}0KX5;H~HTfCvTVuxUyAvQ(_Ap%%x{#wnVS3-Cw8Rrr1j*}8SyS{7h zF;PE+&6I)yCa)fraFWy_lCrj9)&2X!5hfvu&^27lJ2(pwCSNt&5V}au>G|I?X)QE0 zH2gfh0U`8HcU@ULLl)7ZfF9=$*@$kmD2oA?xvjDYhzLZI(P&6iw^bPy1TSGNjnLr! z?~qLgt=xsiloG-mdw*M-gub+GD|5w@TIuQ1bAt_iC74(IMb}dJBQyo(NfXB6<8Yj&l|X z{kc&~^7{2R{{dJlX@>EkDR``eM&vpBv^?i;-nMNn>>JO-6H)Dv@GEWP*)_Paqcq5&==)h?M=ZFgI} zor_B~#c{R#&oxk|wbaz&JwHqSFf+`9;8i=VL2%IblVVP@G`glU$|8VN=3=XHexYUu zn4dkn(<46{E#<4tEpI1*NX;?r-e)ivGGL!mNUdn&wv8KU)eap#ym8yMgOB4aU21U> z0#KvV*B@r|B;beOg^ckj!^5RYE7iNwT(|V!uPhsH{lDK2=qQ*xfe~pd4+E{|)>IUK zGU?wx6JtFXhyA6`wv(%XMgm7W>oJjLaaNw{K7t(g5jbK?^O<){oXG0u6A~^9h-GTC znVw3**vebC#zW$T4|JoEWPHeHg__{XR3qV+QPT^rN`t3A{266r`x*r@5?6h)x@a4H z^Yv@jW@X$#DdjMA>QA&$r1G`Qy*Xi+PYNQz{i8teHa(rQ&Q{Mw zlVA2<7{vZ)&qFDR+j~JZF<@Y{X}wgyHY$hbCX^Cdf8z_#lNXC!1~>(Y1aTIlB|Hm+Z> z^E`H^OYGr_$x8L^$cFcUDi83&V{~5Y4Ll8Xr@6WD=942-2%qcsiA+LmjKDXB)Cw9S zvFp*BoAHOrcUy}Wi;PSXNEu&`dWlIDp%)IjxpQM8KsX&m9Zh`42Alx{(n_};J%0QX zpQHIss90?dX+40i0buYLqjRs{F;r{SS~HNUA6b7HoLTMa+A%HJIXO~0A0hYA)(e#k zjRloSzc;B#`lDvB;!7vRm@?^quxPon-n}UkCn6*mpqPw$aw`ou1Dlrg%!qM7Tv#Xg}74w7KllI-c^1X2;P|R07FLT1JsH>x8?M*yiU*0Rj zW|VWE@20+V&Dd$p7$45F5L983@gL9O%6{O$YdUDB-}xy@K|w)>^Ml6eDI2E#tF64< z&_SEqHzzRYnl)>dsQp@Xby-YMy?p1+;Lzn?i5?=2y6BRbPEMV^4AAyt{MM zydps{?kwj%h{`AOm;Kbh+rVb&#nD|96`x@`?v-1G4#mlGN5YK%?%%h6()niRh^jGe z2@Hf{KE+y2jn6Agjw8h&J@yxBE5(kBy_1p=VC)yr93P@Ak)zGB;7=6D6!qB1hb&#% za9e4vDw$ET@?}#~-7f7^f2`mqlV0;`v$C`8al8i>kcpR2T{LJJvJvJLmj|zqR0;_U zd`W}Dmb$jiGk+|B%o@qKK%wJatSEbG+Z_Y2 zKHAEX>Hl6PO~Q;Be}xw5hMFiqz;3q>ikWdi>HA*37tHG0dkd;z!2 zafja}&^-p9kvO{&xP`ra_(+Qse*ga6r%xZOaXRTfu*@>fs_Q1!><`duIH`EHG#v!x z*>m%N=htud8Mo6hbCq(9?||uN6!J`k^(-`z6ix&UV#P+T%Iw*%II~9)Oz5B{NQ7dH{L_YOn1#6j};Z;*}#? zSQ`hns2=_+{{Zh8P`tSO-<}D;N{1UY#E~?q5e1CbX5&5?rf|T9hdmK*PqZ2=5Bv5mg9M-B zy1DP^MpqePAG5S70qE)WuZq%JvoI(q$)~fbLSdR6fFG>UC!EYc9o`Z{|MVLZPL5Vn zpK^?yOcVW^iZJ-E=J}S6uKHET@AIo4@#EP)zCS;I_m(a30gEJh#FHuzT4?m> z@g?jcwp9}(ovT*|c&gdjbkeym z7~k6VV5SYuq?wnWbM$DJ9@&(cYYX&e$pycoPwDh$nvIOy+Bg{CjkAR-zCGdJ6_;(7 zX>sQRlF^@gOUQ4+9REJ3WN4(jV*4GyUHvho)Nls_c3h#}qnUyA;r-=1o%WU6x0Uid zUI7q9Jl=CAJ9`$rDS&FI&q-TDFkYd#$FfOVaV{buf`?!kZSA6FyWrZZkU}?*+XX=Q zQ@@z6i^mW2^WtS}!+@Ba=&4f3CBQ!@-z~19qJn@tE8{3_9noVA>0I#e7+i5Xdi7P% zIj|$O9oAQ66!cH!)trAW#z#`__(*M0NSF}qDU8gS*G~mRkVL{W`dTDH{-i1B&z@@! ztL%Bu3$fwehh59eDWCTD;&3vQ_s$p&S`r^B?4^^DGG`SHlS{C9r{8K5gsVjXPaQ!q z1TP^d=u>NJ>wvveRlYFf4r}Ad80&G$Hh9{PNM7DoDt+_j0H~y%Dxb+4tuGEw2*FtZ z&zwsp7p77laDO=6ipr7B93*U%G1Mkyq`4}gt01cVi(<0>qL}hu$=3dil@ZHMJQ@)C z^UG|PcLM!JGP4!Hiwp!3H^8STIzYi-_;4@g1KO*vPn0j(iLpwqXi+;@6KAXERwL;QBt#r~r#F)l2m zl7OZcmlP?6@yI*Gp{P4WjNYbn~CV((G3u#@@5{_kP|}{&Ul6opGe!yljv>EkQffh3N=AT1^3?Oc`k6BMR=LDo(RrV~}$E116aEt2TA7k~qvz{>CW7^$JQ7kv)1 z4?1Mm9mW7uF18toR3z)qVOq`#`?quc0}aCh-NegSXflmYc#HLqKC=X*+FRYYW+B!W zex9Lg{`^uu;bW4T4lybqczubDfLx3ojh~!?jBK&wuZ4|juq_OSD;^s|BStMaAg2El z7;dLJhKKt)-@7>2h=u}O^~&w%$Zf!x}$W&+W+R(lAHDW^pVMnuTM@( z6QIfr*?n&x9+n=+7t4oPqi9VcZ`6x6&nd2{@B~+g-%q z2+V=5Fg;IpL_zE?#@t)0UQo!U#TsYVV1!+nWN+}ar^`lz-~eo*yP#{ewA89<6t--v z`1Gu)v9Y=#*vHqGiGw#pIi+k>(o{v4bDQM-HiF^BdfHE6ww}D&Gg*;EY6RP4&yCNU zns8c+%y&aRwGC=ZzN&>v*RK0T;CJm#}{Yu@mynOw*n#_5nM%{f58{68FL6a$A zZn+WN-vog-9PI$L8v5Ao!Hi2N59>NNQDlCWiCnqTwmrsh1@m)g#|iD7t2D%0$GC(5 zw>i$J+9;11Oy|ehaF9^Ovy#q>hBNexbJ3+Un}B zK76*BjgbB_3mDJIL%lE~!`To@Q=ShqZZ zar_!2JNc?o0y>vdlcRdaJ!@NhNeCr`@AnNPwz_X8BkY5<57DlW$bZg^TUG=`Psv{* z#&&MUmuekJ^$J#uexuumbijckpO3IZHx399l|DlTRrG{KRo08k?7Bmk+B$INu_H&^ z9xd)2&W_w;0Y|@3ZQ(ld_kmw8-Vzhumc0&_Z%{xKwnXGP5s-`4TPocp+{axAr!IJ& zmzAO%ra;!CRwBe7Ja+8kr%y_cwt07d({%(NI5DY5-WH*y_t4$y-ksqQ%*q0j;b@Hw zYs9f>v^{_Q@Udy9!5QT9phDsbT3h?BdX8KLZVYCNAD@DLA(tk>(!KX5_BIKtKzQ*V zE?!R|o779N^ix>T8@PFzM2u@kaaz-lLQrpdob};l$Ai&0fK$4T-^M^xGRL3s0VBh? zfrBN&S7N{By1BR9iZ-=3K=2gCe52^;uSjV0qwU}80tGLg#c>r3h%@_vD$tlTX(8PL3;Ivdosk#-KRh*6_10>$b&x^sL)`~5S*VQ^L~HrfWj6)8fvk{e0-cAw*;8lE&=}n=FOO-TW68oC z3%RAoyWBT|vMr!yl%8C$?Nd_uh)@}0(^fHK-K6bU6IJGF`<{_hS9}IR!q?It_T995 zW5es$_s?}=&W2g*89Z9Z#g;nXB+~;ce7hJAR&-Gs6o?2&UQUkSAD^?NV@`Wxd%;~z ztn#@~yn?c;5N9rP@L45n*jC$fXXZbGkwWXwpG}WW%H(Ylc*doFfA!-HR33LsAFT4{ z4G19lG!rMXxfeS5UOd!otYD?t@|e#HB@oxBGpA0)4+uiG$iHzpiLh(YLA|usZ3QXf zyjI9>%;;X{HM!xQzbX$W@k&}n-y7s$^n?sn$ly4;xsP7 zZW&1Z=J{?gL|NCZwT)x2X)%mc?Kzq$WHC$V^4E&Dh6+{C&60`Fqi0-r;vr=|pN6h> zn8}SSmZ$w1-RN%VTv|~l>)XGFk^WG6C5J=>#ISYHH_vIct3|6m3%uPh4a*gasD{0a1n=Ic16ups9i5!szJI@2 z)6<}Vv{93n7$0w|w3q%C3y@&*8g<1ULl1r3sRHpp{D1dc zhM%j~@3qned zR18&J{hlJ};Ly;Q?WK&=g13D?ZOWW|F}}3Iz?cx=@bY&y42#_eZVYyjuA6b z5dtN=VHqy8?%|~ev=CczkiCo(SyS>YX^~4N{NkdW)aK>lpCxaXAh7qk>_GjDreyNt zhs4u~^K6~4FIo{4WKVqxbkkzF@r$5&;lL9}MW%Z(X+r+T_pK;8f1Zmp zPuMi3Cru$qn6J={DnD4+|Ie2{zBjSSWH$bVXF-k3$orOj$c7IZyVpuV&*)P&v)$a)YN1_7RAz< zr2qfEkFI)0UzJu|j;W7+DX+-5t9bH^ggQ94uuutpo#ZB$F*F5-znqRrEPR&$^NXYLF^7Gx(0F!r}_ef2T-W^0XB9g z$^m;UP9J25fMYD}_7{WJ0M7KTjT`NJf>v^p%U)HQWDFWKDC@tAW(&ZGI^EXsGU_yH zCj4`6^**$sEETzYWILmj*vM{q@g=Bp1p7Gx(-4huhR6tX z0xiktVuNCgZMxY1ZUIy@9q$3v2ZBYT*;vbRVG1eZT}$n@t$-oPRpK_=?<6y>-H_Es zB#E^B$e}~f_kX3SYocwIR@A0;S@+C6cKHlab+~IG4iKg(F#@VK#ztb>YSg}UH~rtn zAQ1tiM%p1ow1nZ03jbBGImcz)`t^{Os$^88CQO>N|BLKKl3nBb_W>CUr~{Xtdnydb zqNP}z5N~*PNXoi(yM)Q~<<$5$dj_c&^d&rrA2;~<+4R(Y_e6iH4Tc8Z>fLBAOIg~Ow5Vgn!2MKd%{>S2^uFqPPcV@zG|eD!g;Jby^I92_fs+6>RKLC z51qJp(FcYgUlKO8lyicqRgKxA;RUYqpw zE)FYOKCmXp{`UW}ZKw3jmLo^vV$!+rX(hWR7%Q@(Cr)xp4xE7@t^z&ouE;dEq{^{< z6-_pfY~bwK2>ZWW|IyzJ@_WOw!l$00Z+p_J!CdmetU=NygfS|?)I5(qgnCHuPls-? zQE6usTrW}iXk+vJn7`LpdozeS5K2Gv_@ z>#a$8mAKFHP|Z^n*_MuPAcg+m!nSP>sK^q&d!yPW#tOq@arsGb9q84;{#=GR*xL(| zHkB=JC~SF2cmpD5(AgCv$-A90VL~(PH7k={wjW`*_+vB?91-oGxyxKj10TiXV7ZxTc}yf1TK(@1b_{Y z<=c-R?3{S}=5V?gDF?TE9nFib1;jTdNiSm(y)X)PM#v1^cHN62C9j^q4#M z4FcGFH%aC>SEm#kU7YX=oPb6VoL-mV&-0LTBt7%8a1EbChBM}Bw zT)LDqUhVqW*u^^;CnqJz%sv?P;|6^!Ms-y)JNZE+f#3HFx`lPJ^`J6OJ%<{zx^qeV z@HCCVvv3?tpS$A2*cl!k5-UqkjruHfIYteOy5#TPWk0mYk9ZH~ep_s-b7;30&OvgV zWeAX_{J)8ZE-?jXcE_S;8r%Ql_=C1hin?>Hr_fivJO)A;uYK>?Gi%8B0|&Y%dz$5R zKI+>kJR+vjSHH0*r7W(&SzwOio-*b^$EBYU`@8GMRY=hhA`6hNjCG4xn-K@_V1;ey zdC3jxKArPDBvFgcQ-HR&9)2md(WH9qbXA@*-7Q@7=~F3Yj`?e4Og8k)wnT=wxp_{% z|B~)^{PgA>Y*;PsU>(M{y90D?yWZc}xI89u8Snx({#}^YKJi{*WG6ZsJ{cYZK%+m< zmT*~H&E4!5PPy|)UiM8-x>z(CM~@x5wKsbRW^hnfuo6Om`-29pHSn8@pI;V>|IE&W zXBg1L#YxNLso`;jYUty^PR7H#s;CHkP`=v+G9|Lo0M9n$^sHxd8)Ed9_KSZyU)9kO zIYsDu!3?v^Yu;jw^ZVPKW4Gwb#{caA(kj{+hWEbjX2)7fTrvS8mPVf;UhWySsumw9 zE?-%hAa&g~z&HC#V92x!@_#R^BP+`PQ0>LC2rsV{9G$ivj-!q2xhjkvTgU!UsJ7@f zzz0uK=SRLh=ZTN91B~|0730pf2z1qk5wJoU%HM|E-*gIsS@Z#oJk@f}$dG=WZBIJ9 zGV;|d!@CU)Np&}uHUEC{Ugzwzf4{kdNm0>0&Z5&m(1!#`gI7!}87gs!ZmhnE8E_HF zhoSqmt5-F9^!WArH*5(y)(5TThbzRU+!<3}CZnTJ&u$yk2th;~fH1-Iaq?tgb=GUx zs@cc8)0o}FM4m3Axl@`-_~;H2vX?PNSEmhE+Ggq0lrV|%^E~o@ni2zyZ*K1zp_KRB zAK{3%L2?@PJ7bT(A5^XwYWVTaZT_Eken!%FV9Ny*u2hP8a|I3rtv^RKI8|8T4R9kC z!WHraYAP7UOEc};H~rVHWxV>0?r#?@S#s<4?Ibmc5EFnQ#iXv?y4^pxP)df5;PJ8~-K`yg|34PfAclPA}$TUW`S+;^WjQ#Wr@1sjK-2FR60 z+)l?qSXD%=Q8V)sD2I%<^ilF^@)H;MHhVg-nmpHW1g%nA$a=Jtg+aem@#tXIe3~lYEkjsC2jtx|3OpIZ_uCufC!n$xckRq+dsy}PO^Ca z{yh`|n8)iR=%`Zt7A&M$T>{W%=J`+RUlNcXlSl>w>wluY6S}G z$%S9}(FaVVc^mXA==I2($Br4(_2X_f>aaMhleF}TbD4&cK28H8Bz05HCBX+TTER)L z61U&L(JEh6d1Ug{n>TYsVqMnOJbcL3zWzu5X3x#B_7GdL3#`-MGY7?mg4S?_JgLuY zS7NX)u~<#9Kw4TO@nhBjlFq{p(T=wp%SYnkDxnAm+n$I|O2RxuU>?hOGo|a=x(6mG z1InTSl!;UTjH)?`&zQ<`S_Bu0r~~~NHc=0kA4L+Cy|K4KuzK4FjpBn}Z?C;)1&YL+ zw_+r}`UehcETQMJmE9eE&9zS4J^j-CS!%Ip5LZl%%uwL3Ogz&g&mgy?q^ET+FS%JS z*xo|1&h;@bQxxu=q*lV-J3r@MUO>*d(t%=aJpucY4?f91Y%w;q8>%F=9vqziGgYWk zz#=R|URKi#qJ1k@VwuV%#;BYN?k6)U&Q#Nh;jGCu?$yhTtzG7MncYQx1$m8Kp07}1 zS^{Z9ZTE2ZseEw0*#^(IcOE}xnhXCB!fC|kPQd02?VpRyM!7*gt2&ay`OLg{5gDVE zVzT1AOaJn{av!)UHO~Kf+RlFG{2LXQYRO5&JnFcF>oGi}?-^ILvHdw8r~)R^y?dvr z6{}=R#pfJ?U%)Cp@`bp#IAmI01`)33um$YZxl1d^9zLv3uOjXvwJK;1k?I*Zb*iAm z`#gHZPuxR^X;=27F6E_MRYZ_{DrX0 zI21ku5wJmH-)ep-dqWkcd?i!X*u~C;n|!ynwnuB`%o4NEMq#%jN9yVR6LMFJlBkU6 z%TBCWd~ycy%*?++D&Gxm(IY?LhI^l1mt*F|Vh*i5)t8k1v-$@x0d`rc zIsQJpUJBsTtA8xf_Or-QxMQ?wFgI~hN91_#js2RSmUZi*XNGp>35KYb`wz^BnRJ#i zu~Vmnv?1|8musv7tCa^`E1ERBi|R5Z!Un5r`}gl6BWn}FdfvhVizBj2N@kF(1yW5} zI9EC!(%Hvv-GARs)ifJ`b)2W1M3>_`cTRJ4l~YU>3@!Pp`Kp)P;n0qfShTR#RWo~x z;T#UGa|dy4#Zq)wD5se8i^OKP609I3lPD3&#RU+M7)1cQqmBKmCyT=0=Po`)LC3?+kl8itvHrR6S**~vh>jI_JtAq?|6I?d?gVD%@O?n zywz2hQ1y9I>`ERd) z^uo5ZqD+t9Je=zS9L&!rK}4^5=iR#b)iL{ukNUlO#a(~(nmz{lKVcatWSysR!-o%J zr)k+`xyC$crCBhywbE-xjx^BAsB_NY;Padn2p$uHRaJ8Z043K%;4u{x;4GQ(eE6cF2FAvah{eC0hd74|i1vZRJX~LNhXhs& zuAB9LKCWzTf-&7d*qR}FaVh#`T_wkL)u%sF$|Re1Pn%Dcp4x#L~u&0 z$Q22F)MiAn3Z*YkwF-9J*fEt87Q0UAb^;Nipbi_NrmqlZXxXCJ+WrVnB|TiLmnD-W{gC=vx(Zf^RQd93QNn{fdN!-Rmbj=ftBH zbB6jd_h@zkn6jR%;Bwh&x8+JU6*U1UqY(JsL}Ed)=A9vp6)QsA3X}IE*^(tJ3_r0U zWbD|ntgrVnxV~yn$nM?9QH#M|l zxSua}-d$2MPB4ZR(*dcPG-XOX5ZS_oXg0#vD<88z<#Rq%&a^_Pacm7-u3Itdw8t3! z`e$R)B#Zg1`kRVO+3OL8pe)dkF@jRkFnnvQvcXfhq*^m_NA5 z#n17ltX=bm=VAmZG?tBQ5{SQ$_hNLbwWxzw9QEt&URd_!g`XWte{W2S`*}co0M=YT zY_OxlobEXEYjf)MPmT*_v5-8HE^Lau@2JQ8t`evz9?>%MlWGQ7%~#jhs_G>lqK|i687$` z7yR@by0NJX{oq4ua+$ zqbcv%BVcmC{)cvXNnBpC%Q`_{oLde&AgtHD7r8k-UDVXH-ZOS6_V>$i;aSn-@qgZ3 zOj(&N^|7vQJNX2dnq9S*aHsovJJw$ah@2j8S+I^lN@TXSpna0Bbl<*u^$q{RbwM_` zAhdHvvW8p&FT60ckAdBJ=UB!D{~$v@Ylatf7gw2iy}b(G7NtdXfvkRGUS9O1F1}$1 z`)Z+9|81?Zp|n}imoi_}zjfxCRcAoEb z3MonN4E>UbmB$#IG;b4$O~>;rjSi5YXQ=A8qP*)1v2{ON-fm_L+0g@I}?gHE=o?o#S3&C680~j$MQi!>cvU|5r)!9Fz-wE$x zSjW3C7k#5Gdi!n0`-9C%PSJ9Dl?LJNGkpbZeAeiw)@J=$XfWxKeDw(oBc}Y=r4)u& zmd&Mhfg|?f{ebJrZS_-qe4eU2Zy(RYN5gbOIJ{?y>TIRS4~=`lI$RW+9PJ|*DthOR z;(OH?`+2(A)}JDp=6~OSP7Z&%o_SKn6Q@o+4C3}>R@dc?dH#ofPt`OSHcWj>>GJ0T zqkeYoOSh;~)$7fKY11D6oIXf3_Ds%6PmykKjaR$6ZnW)0^sV`y%1GN~HGo8c8-}e0 zz=NZo1%BUDa$dgt?;iJ{Ysxwc9hZMMh%J%I&=+~Eh{q1(PWhjB`H%Y$8Q0g>U%i_0 zp^IGL+_?_#<2|oFfB&9?J|q9cb`ma7szr~qM}mz9LV_b(qS^Q{apI{BKZSwupI@0P zVAMFdy9<&V>=(+A5{?<7T8c zLF!-57_@`f%aqbAO!@^`;c7whjM^zv?$qTd{FPe!@ru=Vzur<2u2to2DLol)>g*%6 znH^X7-1OTtno@XNrp0N7qgCElxVt&i6G5w@e|H<=tnJq?eK;+T!=7l#p)r*W6Z-TC zUAolO&8=dAjjVz~d^goRslzAm+#$sTWT>><8}#3whXPBs{n*`LX-ZS!)n7-9^!KY| zW@Gut(7#)4jC+}w9C;zb@Gg0j^P5A}eP%CuSRlUF7>A~X9RbOk83rWEEVDB4IpU8_ zpi|Q9_IVNsuO@-h8YS~--PS`ea@(*e7UpL)E1Mo06*CSM=%o_kO(ZuKFjj8*zK0YiUNM-8S)W+>-Jg0y^p;gecQO$j${=A<_ zNl{+XhV0%LL8iX=cYK(UvJJkEW};u6^tF5U)SsGoJ(4$+_8HK9kGXc1QhLv_gYbcI zr<=@LiiY9EOf4>iypku(ye6lTKmfMPZI=lidX{(g#)P+Xi>rG_Qb=(U5;JLL{O$>6 zq<@-%qJTa>3{tW(muXo{=?^cn{Y}(gnfzGtmnbn4uolwNFTsv=UQHHt>F21pL(HHe z2%{{U+hjE63N0tyz`PaRu-9;-hSPlMqSw+PCfE*Gz6)nuCgPZSf&Tt2kbP%9(M{h` z(|&FKwA1)+Nay{D_L!Xx9T8?hrv~0GbjuP8&R}RT_Q%d4LnSZ(X8qpO^P$Fu4MQ%{zaYim8m%fDALQ#PjG}jd>m&A7%rg}+ z9%o&axp^ye8S^Vk`p!s1BfVnsZZNqkAgi+qJ`;qmRu3TAVQnwv#gCGMmvEm1^$EeW66p}JDK5wH5(clY@ zA3n4luvYTF18CyO1vIcpr!;py2bKig|Ml~y4h$U&K^p&J{FY|3UW^daGc&u4zjFQh z7z$e^E|)LQ=4g$_=5Wo*+`>qSn|~;Ls8T7$Hw2g_{+caY6d3gz zKU&O@6?Cmf85+iyMxQy<_{hqob8Bk4FGsyWtaD#eQ)phMUPcLM6JfG>O?8C{yP*op zt_tz<;y3>i@^~n{n{ktK*v^dx;n+smgJH^!g1J3py??ch;AjFNy!mWeV&n=W?!24y z;SOI?S8O^^m_KcI=q% z=U4V+j^cSwmu8~Gp-&2ilGlBm_CAd>#2H7YsOz9u^RiHu#x%%XKH>;Tcks6BM(?ZP z2FHtscDr%tA*_t#VAXgRRQ3zGQ(nInTV6BN417H@Lm+NuX6oD<-g`BtlL{abn{R-i z>Ue0p(D1?YS270Lb2h^tWFYOYMvAbg9Uch!Nahxr80H{pg8RW{?>k=;V6n&lW3g*g z(=RfEj`jR5K3}@XgV`AWm9k`2$do&}KlGMRZSRMm#J(8G)B}t`ct@}-=ungt6|b1| zi!t0Wc=IHPj0jWVj^6*++}P;$P+$&Ob1u23zs z?m>G+G9t>rP-lmipnDiRygX!zzR3^0-msOC>+DHSkCwdgit~-#mbbbMX1tvi{zcfY z?WZen)hCI&%P7`thW5oW0foF;6g96wh7mAx7`I)kv~TPxD&epgjw*dieM#Eog?v3V z|1QXI`CmML1c&?dS0(5mb?X*-FVQ&fpzWLuHDlmFh#(Q7$y6AOdzkRBv_iC^0@l>f z(f^vzv65>ZLJcms7S0edn-#@Tf7`Gl)((&{U8(*&SSkAzO&8b!0#F%^YrJq2?*%52 z90%A@h1aY_`)X=ydEb@dk3(MH(1eJNTOK{}Kgc=4(9-hBl`Hs$yc7Sp5GM>M&bl^{ zV`&FFcaF;!Kx8|+@|!pN(~tgAR4ExVSAdh`G~|LM&=L80?TY|(Oc>sTI8xYTQl%n0 z)bZc1PK_;{a!&SwL2s^k0h&b~uQ_4zt}pq2S`ZdupB?Nm6ZMZ#fvYiVdCulPB=v6RE7 z3AnlNz9~emqD=SM#ic07?ym8fqF%TBV)ex#_iVuOW)w`qr zo5&p(m_ivfDa0>GwRjLCCs3-==(5Y#Y0ezBv)&k3*Q;MYOTGft+4GaVmooH#_RS`^ z1j*F>M~+~H>hF02YGG_j)Sq9&5HJ!(pazT>I#eW<34E$!6FG`;05i6is@_eVQAzSY zxMC8s$#x|-=Lg;NZnjk{(c5gF3L44^q4RB>SpD(4$e{)azH;T(`)3=bUyU(Ym z7COWL^zh+xcr3G84Y}kR?HF7CNR>l^*VcZ)CVS;aZ7(?q+l9v{{jpfyv#}Rv2R=x8 zi@p5giA>@HtNDVUp4i+vAGRuH`w}F&iq;{I&`GkKqQGPh2zXgdK*nqw>lF7S@W(I2 zZVJXphbHbvEe%X0>ucuqyMl3g?)G-&Uu(~v9TD0O12;z6gg88$1ST$~n0SJNlPbx} zOYHYxrnh0MtZ<#}HdZx*_Tc%<=VdJ<{6gQ8bH;(wM48N#iVf_*xR=3|kepus^l7SO z{4tp8Nj;qit{)_{b_Q*|Am7?b&SD}XkcS@VQ5T;P+tHh4Zj zfqC=h&7|fgm0dhmsD|7EB%o198!1Eh3DaiGcy{-$8G@KKc}dE$_8nuv*I8Kft=QK! z4Rnzb4-UecmoG&clf?FSRaUmu34lQym%D^DTooR=DH}GFvTg3{rxX(2I-U~!RC|+w zw=EPeRt2=Qgs!=kbCZKk_qOAFx*c2y+{6k^#()1*FRX^hKas-d;WN^k8MU=K%wjCu zKH5N+^k1?h?cNMYo6h~!z2wvm6c$b#H*PW8rog)aUNnX;+K!0{1tds^XmHNo6Q+~R z6eed?x=h#$DA%kMTnc&SPWy)9Wid%1zQ5b2t?qNCzrL7Qw3w@--QB-Z^PSPWiD6hVRk$9&5m;=B!?@X&cA6M^~3Bzgztl=wG&hwZW;k><5 zRD$?*WseF9{zOF`wZK1S zpu2-8Gry}sSa*#Lmg6g!=dfHyPyBVA(Cb=mQmP8sR9n^p^28UvbQa&k-wJpz=# z`b6)pAX)6}QRc7MaFBg+yN4e(s6hBdHnL*k2INn;DNArWTkorUIH|NEdbX^wKQm4q zJ!h?~pv4C#zA@({slRd6!9q1lgPBohUxCV!^)vvNXv5m$>T_u|O6Tb=%r@oz>Wr{A+%2E`TM^VhVeK;%b zDBW;;ke{Y~4|CRUf7*FNHUadL)zw+rR1+Vhg_=)DQK8%+#)>d9QkM;fhvP9Or4fI@FZ19+@JvP%uVo>pcqp)d7p zQZFecBB1g)Ym<|BSL_^+k&!8St^WblS9!RU{m3?TX;TLmNkpU6#W`yyTmqagvwZ*V z-K`yFe16LuO_grwAh8VaP>-gl-qir%$UtpUCY6Cj9a1@oVL3^1H_KxQ}^# z6i8$$n|`MBJTa+Z$@Og#h;Jp5+>dz3=d2_0^zwW>fm@ZEd+%}#Cw!1YNImwASy&dJ6bjGlE?=Qy;*Sko!I#Y(-*lV-*@~oSz z!R_7K`O3<3&VA$m-JC0ugV&=d2kW12+qY9lDO;uqHAtPWz8vr29uo3}PT0lMPp=^0 z1L%;=cg{lb| zQ~mC31w?D47(LIIW7y|1k2ucxT+2&D;SiGA2X)c;>pXtz4J_-$|oIv&poh$ zQ8B0E1|$lXa?^VBVoWY~O+MV)$jE8wgs_p8ZzGnSw-kT0%Zm**7O+U$>K%ux5UQsUt=CHDQKse-8Jc%+uu z6yz%}BGdfz1&~Tnc8xs}`kY>#UczeU&7mZs9Y>5(!0M*6ofOWIV1x4ik@eq!T=xC{ zKW-%z^-`3Sj3h}VNfKoxN-EK$kcK9miqf!25?azwd6j5yAzNrlQxi=MQ7SFIx1;m? z7I&HWM8wu%*h1qbRocCShVfsQ*hjJ`&c!liTi> zP5Zi=8yOsnO@y=1w8(jG-HS@#o|szXZWP>KJ?9ha zm^LCp`zxrkpNcS&8`+Ha>4$?SPb1aL)wO4umXSmbZ0(*IC*37C7AK8bzYg`n4_zBk zQ-Aqs5$R>l@tkdW>MJy=5JY&X+QWy{n;(J0h)_N}*7{txnlI}n(-HNa8rEXd2#t}9zz5k{Wop>q!VVUkTu9^~aonALMO7q5-f zFPQl6w=t%V-~tdwH+JS($>c#2m$mXk!GIlYimqPmPl4b(TH3K0-5dHFd>3{-?()Ex zokS6{nhX<+8>BxTPfhKntUOvZF(@l55aU&vQ`@57amO349ILuCnm{bH>DY>r-yb)B%zxB z%ucyhs?-~2puO_`Vf00!wa|i0I%H_d)jLfa*C_t?k)};k>2u*;ZzZYW5-+BTjD^S! z#oUU3?17txh zf=`0A+CM!J9op0s&^>#M!ve1QywrHT0vVbX@c=i1E(`*V4Z}W7-nEAxA@_tzm8fO9P`dlxa&se!# zTesSz-w}WvP#U{CoI%2Rb9R34;DJy+D=Ebp`wDq~8?M7y+DUJ!r1FNNwcDnZH@IUD z-{|wb&mEA{e%eb^Yrlt?E1v{s`;y43eJazhXLf$V6Iit$O=dsFaf4ss# zMwI?pg1>8De;8`TXB7>zNQ2?V(>sW1hzT@Qd%;Gfz2cq%;O%m*9YeuGcANc|n?gJm zj=O+`%IESl+4>A$$zUKwVq#vk&{B0Y)6wBIjxd}yl!AfDr_fet6Zn@6~ZeeltD|6{Vlyy2%li=TD`)Qh){o1%Q(I@Ym%+#$hNAIYp ztB>Ns+`YKC&4HPcQ`f(D%lN6|mmK-*QXruWQO>mrFO(?_pePy|8;$TCk_FvTJ=o+a zgNLq}a%Rk3^2D`TK-z~b<)(3xlrQIeg(r|w#|9e}y5s$yWs>$^QRoWZ}p4aRHf_U>@;2BO=z5Yge;;x{kFbj8X z;_1ahOu|T6jMwYL+5a}ja+%=EL&EU06u-~#{5B4%Ub{E}KR(IY;p}bi;ZY5CXt%8`)M>k;>HX!Z?8(vq+d?8c zU4Q%Dy-!KXf-T@o*RNBH5_hsY%X};0VVK_GpR%JL$07|gtU!K$AuUqMzg8>Z-{xAD zgp%#%fnVeF3M@2w0S9n1Kgn5}b?vE6>WLH2`Y*a_siII#fiQ!dxs23<+;asUrY0u* z*W!)SH^jz97<~a376%){s?VkV6zdtnw$ny|5U}g>8OLN`5NA|sW}s70YL|YNknla% z=lP#ei~v(&ZP~eKasQ?sJV;XU^~Ax}bYJ+AawZFUQ>7URSL??1F{r`FRb|-y$#cfI zDJj{cN6GsTGR~q2UG~Dq~8RpK@Z>F&t!AE%{z? z^Ss+8MAD^gNf(2SQjfvU(`)L{@jyUE9p_XACL2^|r)2BPt9{t@JWl(fQ*t{G9(>8I zXXg^lm-~mpQ<-U=!38BvkcY#y2Tx(GhzOmTLMf; zHiu!w2DiUXd8rxD$0bvs+vd(4ot_-5 zyM}+4c&r*q?JGBKJUugiGjY6>_K@z(mZ4WA@1p3#ZxL1+IyI-!0fLYdHvlKVU@{A< z>gt37;5ckitWj@=&_CU%EG@+Y6sYv<${hR2CcXs_Q6wM0S_5_UnQ5m6AVCFjX{5pn zHZ}V0Adf*NCet2Y&9=TG6Z!tttFL&^Ds0s#XV0HJxv#p9vzy!Xd1vfhj7YrI9M{*S zUO_Fd?}P1gSKb2`|+%43@Tl++HlcjK`T?y%7k$9D=XNO(A{je`0}S4&F|qCzpJ zBTd5jqcyX44bmm`kB8s(I`lq!qg@athhx*v*xK8BgNHW$8sA1y^lx8tjU7og=7jVp zzxd>2P9yo|ZrLY`jMu&&P^IF1`f(XW6$a6Lsj19!wSB1jlJ?l(G^HS3tA-~jr?BgB z31>c;*KSojuwPnq1oI)$^4Ph|AY!K5lGo?Ug(R*`-G@Xtd&@WN1hH`Xy=Kb{4y3fN zGEF&!{*pbf!tnx7V)kE5-}d}DA}x!SC4S;vlrexD33>(0BI?)s{2p6vTdMpE7lg4l ztV-Xxt`YxjnBblpUiGi;?F`J!?m|nybpHGu94Pnh*`AMUk95|H@h<6?N@xN}38eTR zSax<*7t^; z zchiU#+t1dDmb02P=@yanF_7qF3`V5s&Re zc-j!4aKHxCRq@7SSAqC~P1(lw+_`0o^+OwJvj{FmXh82beV9vqct*nu;`~_UIxerI zOK{Blq64o8@{@etzOiS=$fmKFHdgANolb7*)~#om0G5%BjZO7&Ji3eVSe zEl?^tr5|e~ggnvbrpr}yhae2HC_o$1?P|(@b#WrLCOCvGdRlYV{AldQs4jjiC_0_| zydUX~+`v8>eoqI-W~KG@lB&sB^#Y!R!^_{#Zw-XHcb8y}e-IU`Db}j1r3p2|yz8Xg zo{^XWnmN}5G@I*>STJL6i_8e$BRZtP*jQVq%#jraDshhhCeiZyb=`RI$dL|??$pn$ zsG6KX^6~Pbi(fuG+Tk2OD#+mSojY5d6~!IU;0C-Y*HjT&kr7V<!|7lb1O*QmLrP=)YtTJ=U5YR&r;uUHkKI>qJKb&` zHU{z->_dxAojZ5X>v9Rtui>%fw6?+xQfFrXE%W>5UP~N&?ctS7Ea|_Dn=Y}v^LKFB zlQ+#Yj5myF%jh3{Hl$(ZFscY7FmZYzejQ{aIDHKk^cuM)ar(EHWgUO5>vUBlFB{mt zy6x^|u*lpy>@tFK&J_3#6s;NJxpJ+G%eIXhJG|&50JWQ(sy2}!MOp+ES6Y&U3!$qF z+V0%hN>zI!J`Aol`*W{}>LB=Qi{2J$3nbGes$n9BC# zuAFSBdAOf7ax0*eNQEBrKn3j|LL-%R^$QQ2E$h<1Tl6l#+`{fO{=o4V@vj}-u3 zWXH1NB0H*G@jOLJ8o^o1K~m~2xv@;t7S#Z9SV*V0kCijeZ#ve4;|@s`OI^zV)Ruk% z16f`hzXfe_pA}L)I@o09 z@f3t}yz#lIQLun!il_w1P;byVZ|l|V{(b^EM}92wA|8?(&B8+B`nRFi%F1>ccu1vH z2*Yx$osNN9>QX9CA*3YWBnKxTX@`ufWt?Rh!*uf_ zzkT`A=X#W%u#TFVnhf6f%Z1$_2>%Jd(A~Q(SGLD`>i%8-iXmj#syv6jW};0`HMQSl zHlA*yfD*Cy8Y>;0IdOMs2^kyJ!oFe~(6wt;dz`7Hs@1yjj9J(?@vXP#><}4NlABmN^#861Dv5z0C7f2bY|h*bo2#) z8ghe&waw`Oh7;MA+J38kVn})Sm}i+D$QrnXwOwQsqe(>pb?5ZIz2>^c^q^-SJ!r$- zCumwp5B6KVIujOQV=-&K?8c3*T1&h32O+qW6Lg>)i@oo#)3^Y8FWKtk?Cj<3-R}EM zLiX%tPM}Htf53uhh-i^jk$t+kW}iNDDcQtVjH-TSFqIjOiU~1(AYA-#I~OljB+hrh z$@4VLO_u>9cP1qj^iU(+iqCssLeOf6r(M@0gyfx?r}8SbD-71Xi=e>g%1F=)iOK^? zC1{aRRK<_oUWg_BuBoVwVsm(33g}3=0Paeu8|vF74CXP=af{wRf<6>PTJyr}GxIiO z{BspL4>2DeV{@3ev+W#E(XDyN(O;qA2n00>`Hj7tGDnWEGnRJkY2t0IE%kSM>%QPuvZQ%SKsk(7LMh0cSTfsE+JFBDO-N)X7San@1$-hwGMk;-yn;*cSqhL2uAQ;I zB?nXNnlColXJl(Nh7G%a`ErDFo9ESqk$jB$K1qgiqX|Cc)JHsrk`_s*_Q}d}7<}%+ z#mkmWP!**~Kfkb4Vh1mEj?-w7p525A_sh#APB^NH8VWyt6X1lCsiG^vu5uv(xXW=) zlhTU#V&l&pzm}48P)ROznjxWahBP?z@TH%D5`-BJt5#XgBcwNMSD$t0gE*xzf>mhr zs@g=MVV^E_23{A*TAbcLLGL6@`)g_j8);|>IZ63_o$y}^g!XZ*rijzij`D5!jO!^Q z=pzzlSLpZdZH@Kg=^i5ur?!`uJ&|~3Q?~x6s8aHAGYf-rvG$@#l2Yq$Z$NsRuzmY0 zx&}#M)GYIB`mDEx=#3}IG0kv0D7?iLa}*oCeZPZZm(L}Bl6>kuOQa;$DqM{7`s$R% zaALFF&TXUz3g}hY8b{x}YM%#-zX~0Mw3e!mJ=W3~=a#B}$?Hb|LYzU=pWFw)c#I(3 zO8Z@2ZgGCQe$||xU*1Lf7hjc9$z3rTwE+2HJ}XtUrc@cu%vw3XCXv=vb4k>Ls&9dC z9|jL5N67@0nlNpULP)C?T}=g}?|&VBy+HT{%MaR2NvdH46zX$WkCWV|Wszx||zx8*8&=T&kP zmJ}Dq%i5UL|1{UPHc)xr&n5v3?JeH|YCu$zb@3u^b;F8N(gR_WvC3<@M38EEk`p89 zED0fw<_lj5!PHqYT+2;1y_XwmyHfa6XJcz`)sz;f$fE7V@YQkIpuRqs{ z3IOBIM_`78A-ao?w#46^yIP1u#fn+(E+^!xqCj|+@ue(I^Cd}R(mPq(gK)R(*%t-FJO3{EjP%IVpU znc6W@x=c20tRm8+Xwp>g+zBgG^8X=epG0mBAM)m&5n~=^N_1Y=qel;4rNoT9*Ia-) zcb8JWog=3j46!$h)arq1z|AAs#W>jQqhj&9u-Dp z>m2Wwa{Tx#7nhgJ+u4LmX{6yMJ=F~k%47?-NQ~NuDYT798PQi`hV;3$ z&i5??1(v|mQiow1puq|7psJ%yf8lU-RD%q!E$^BA)Z{hN; zcIlcI;Mu)SO``@w=yF&Wrx_XzRQ_VUO{_IRfMIJ{I-@vKOV+==Y_`xigS*&$P8I@h zV8y!*n4ukeP6bH2N3ULv>29|PJ;8|%?_XgtswydR+Lhb~DqLNKLx~Zs4b9Hqv{?VD^jIuzicL~0I5p_;n1Zg;X z&rZ6+6%2f~|LD>8uV3H){acC#jpv^Y%Kq|*JwC!NBdtr4XY1$q4Abi4@|wm^&O=|wx&UI*A`V&$iwudRBx z4xg>xQ7cCF`LJ05Pnx-$iDfLBVO(2iRQd2Qi(`y-#T@FkNTaUsPh*tj6O1{Gd4W40Ruqxmw?IBs#Ue^*|R5sJ50PD z@Z-tM?7W%GQiWhrFu9D<(N|sIbuXOWL3#8%{QifmjEu%cV{@7PyUA5E-nACTJtto4 z8LL9hZZTjIhB@#ajZVVA;BGs5G}hM#rN*V7z3uxh3_wMcHREd(JCve>K@OR_cD?0* zTEf)pj~{RLj}B1(NzaL}I%6}NYeUvDmL5{85zT8FYQhr|D)^28N#z~n_EFgLL4&tw ztZT_WId1jp9|Bx_V-Hq~_VeeVh_CM(#m(-u=&i(TQs{L!>U=!70{tC^^4v3E`IC2P zIJLRFEIa5?+t)YOE(W*Tw(ZOO5h0CIqTLpaUnrg`g5)~JUP{Y;HF-iq?IZU|7E8m2{1xq$-=k zUe5PX{2ht=eZ?YP$N38vu;ZdS3;Cz!-`l-xX~-gnqPK}*_KuDbL+&-qU$ttu|HZL3 zGxD1@b@TaI_3L72Y)~zSL>bi_^EB_vMQFqEw^zGV}g+i3H z(36$#sfMgwn=vEA$^tcW7j*mfp`G6={&8F|FP=UBjejWKX}WI^##D&2OWD~J=k}jp z?=peT&-iw#rMXh^m|ND@o8vQ|1EoQeqCI;2T?(jdh@s)?mS%JLuawYU@1CuXT-^0( z-M5?f^Tmx18Nj2jHJk0~>yYw^$IhwBx?JrOpp3B7cS4ZWYQ>mma=2#D(|KqFsZ_Cpa@Y6T zpwU?(x4pG&rK+WNF}o6ZUBvAk{eu6VJx*nO29N+Vx)2=o$csKaJV(%>oL@j*R#ebp zW&@u>4qi7!)$@7)OZyV|8G83pgYRwKqdKs|FZ$Pq^j1X(-|gGP7k#v~W%eHYBkHHS zZ2M6#-%n&2l^x1$8Oi)!;4_*b03Rm!{B9m@#`Khk`M37VlI&6DHgi%!?$>WxJP+*8 zWAn+HOW?2#{EI~yDu63x_bwG77u4|4c2S+~SPnU6TkeD-d{Y%g2j<5Rs{@w9*8FO^1@D2d&eV@8 znB3g(sc&wlcJdt~yF>VZ$B!I1a2k&Rz4y_h`o_D=kr8AVmmW%BU~ff!zNDk$up{1f zlJ!Hw-YMKLEAl8`gf5O4E9&hfnD3y)be|0m@2qife$h%jR|=wlZV^?T)zlRFbw3ee z*F_~)G3axori84NN=NsJLHi|T*ZP=v9y{MVO5Z7GSn-YSZE~C z@#XV}b!?6-ffEnzHQ#1RJ#{KARDzzHy@SJU(%v-b&P6$@VNrGpD!C1FkZYv2>Dh73 zs%lA-X(~FUwsI9cvI81I;0^UBhs(A7d!~79Vr}u|a0;-11eBWHb>jxRZLQk7Hl-RM z{1Gy}9Ir0R{cZd3@N+IqPAOo}xL9K&BMdDpIzyST81p@QF}It;>et;4KE7@tp^>x$ zL9*nA2e;tgc(!q96t^83H)D z!~eRWIJLENxfc{8SMdbHbzO4YV$z(B&ARjuHSP52tEj{IXlr5WlK1wOW=+Otok+#R z{$h)GrxtRsz!#;8#9Oq?Hf|cifjng1HI3xk0}OT_NlW|k@mb5L zb==Xux6V=tWO-CTeeT}8(A#&%Y)kRx7WZG5+saEmuHJ{`iHk7tqo&^+nnEx`)ZFr| z-!TpjgHvdu^QdLdNDsfjAyRso>rVhNrca?ru5?_D8amj>3FSa#<@BQ)+jLb{-lRIT z{qp;g$=_2%4Y}hSx1(e@meptY@STB+WX1X751q)`l{|6nm}TR=b)Pt)x%%GuZIVAk zD(n^{OZ3VR<5a?wEMvx$UYkl0W-O+Fzz}Yh+~G%DQpTFBl8w zw>1A2S9juVW_*dAm1c<=CIHmYXCyHfiRa{Pcxy9eBoQY;R}OHOv1KmhN43$Ff`M=N zKGd7yv7^AssZi{`3?aMb@i7lkEr5%oK;~Dwqc#>xTD}pIAE;2gyyoa%ss7veFP7SW zG3crYJ_N6sFd;n;$&TibV6)+xnwD=bJYp!&2ev;H8y~LWedUk}2SqMA+EA2Ms1xMn z}J??#QuY_eGRo;k8w@tvhO-++T=*z1t;Qp&FitZ{-`GE`X2y7vG3=d)d1nOYVfuOl-XLoh<=vVEy>h>J8viV=4q9$gW>;%# zVUY)FF{}6N8WuY^_>0;uF{lJXp5-v~+1nyVrgigmQL)l^{(;0MCMpzjA9e;1L6=M^B$#Sy-5|D{Ktri_Tohr475=S$K6tnbEVOtAx*t zfDQ`v4;aW)1d2=QcS7ob0YPtG(``_&!B~K05qL%pd^$Q-4)5aC(jB`5FY{x@T3SU6 zW&4WU+aCa_M!2tSlN`I(pAF6BPG5B7i#@6)MMt(3~UvfF(P=nqR~MuxTG%!V70 zYc5!zo7Vm+MVw~)=@*y3grw)1=U&s41E}&Sa$X}@-7FaP81MA8IPg_g<^&Jc>GEN*h+CFphHv_gQ3;^}+2*}5&~7Ejq4vvo@8jFJrntN`Kn^P@ON z1TmvzZP(VTT0d^riZd5Nbn~h}=uRzpWK7Lp`U6BBrMsXeOupl+RP}}}&F-Wbr(_+I z)WS?LQ$ZDY@UVP5>=pV+x|mtaZioHApv-9#m+^J=Amn7UmHoA}Jn6?Vq*HYK>mIxg zLy7P8;Ij1Crl4%8fh(L2c4zg}jf@N5IGJ+rr}8RA9jnhx#IpAS?bzL8oFqsfOc)_a0if zbJY5EIB30_%~nWNd_tQc%Jkc+sj*tcz&#J|8Z9MSoN7DGn;(ybpFlSy=O$89`|$GR zT>dnw`-KZLk@tX=z?IDVaNV*$5}sV&=e7`<<$#m#HfC+lb9p$vSmdEAETjRuln3`d z*y)MCtDCjk;<1mVzJ>R;t?i%o`=jxLD+~2xFUyWIe_U}hHFS_xvc}!0r?m#thIqEo z`q6qkOlR&S0ptnkCUE31{M5tUD{nLb^-TFmD$Bb{b7UrR@FChrG;kn%WCxIw zO)mn?v43*uy{L)k?-%p+#AExSQEcB$BJGppec2Yhbh+F6SN*aznP97N<&DN_+p%4u zp1<8!?ee^O%cM-%!icx;2G+0a81qOizWX+xcSgTDZMN)g>f|qu&)<1we(C(NgAVwc z#u#FX&~y!>s8C87CQ-b_}`#@g7WQgwd_+lf;#PH#{Bw{P8E#5VRG6RSV- zd<-&B>(5Qc#_Zj$)A(k{Rp;(C-><%1HLmBK6-H~mE_29Ok$Ggj``tSpfdR)CFIb>4 z+e%gO(rq$D7>{t}Xxs_^*4r)S3<)cQM>~B`aJRdP3ZP@h|9VqrXOxIS5YY=Ohf0xA z@7{Chc`+Mx=+tRFEuSvu4RJD?Nf#W`=RmisqM|`=;)_F~=8<>hacUav!OQKM8S*Ly z5(QBd9Zp$;E~8yQ!dE?)5$9FotBbDe+;Y< zJV-n-2TC;uN|*AI3YlAD+TI%Cl$5u=>E`Syo6o7vU+BH^{X+fqwZTnomt5<+Y3I`U z{X1-v8*8d|+?<$lJRARWSl*x}K9rg|rf|hiI$X^o*o;qKxB%kDVL_!B)Ko>09=9*y zSgj5AbM0$1A>dhpCZH`%0>cJ0Lzky5-0DdqIHf<0|?-u-c6nyMdc?JePAS-eEtegfzG?S!HilN+n;T}@Av*yYP7186ie?DhX#2#0MUDFIFn+Mve=Ez(?1UH$IBp{xW zL{>(|VvM!_mSc(==o!;eQmoU@YEn5vUN~nlR}@Kqq_d(9i*>w(`nWDO>S|#(?2w%| z+?_gmE&AwhH<5Z2JZG*EnfJfz3p=rHJ_P(mN0s;B2WeUF)LWX`#;Bylksy!pBe;v4qMWx#`oQk_v%m)+9B>-Q|Gj23 zxIH6J?RKsmPv@+y%*6~T0B#ZYVpmd9=EaM?Cc+9_WDXC66lZ!!uW zmr2pnz$>u2HODM3({hh0JNVPEh(<|}ft3T^=O6KJ(>RV99B^hDQ8bR^{+>N^=FyK; z5#ty!SgBgOg#M&IG1KX1Jt$sj3^nf#47wWAylb&&MZ}A-IcV*(HkX{UziK`vacS5CkHLWo3^0AwC zrY9N2_%r&p{bz&5k0-g=bE9}gWZ_ySnR=DRK{D*UUBBKUkrd&V#B(AE0>J67t7{?C zZVx63sY$&#_0)YXE?>W0+yl6ZzBS}#esKAk=3n2%(FQyJ?h?*{{X;5_UlEb7(ei57 zGHQ~5^;3zQ4e!~@R=qA64dZVz;1oKUF46CqU820J^Dsa0z-7MGP3KGs8&Yz9HjikE zGmWqw(d?`2=@=FqpN|>Abk?Ss8bKxi&PV@^)j=&nDh_*rKgnICUc*I`fgo((lpzuxt~zj>x#RDXwu zfu>uhR{y?mZGJHY`iNPZaxDZWFP`l9cHac?FYH+^ac@2gKo~wA9k1*fl>Q0lpANKMto)&Fc2oW19)l6mUtBZ``GU2pilX8vq!dgS6ru1D;0%P9 zd8-yITh@28x`z^NV^vSPZL${S@Up9&K6J%z;rfqF(!N!{boOmN*zUmQj#l%<{Dp|K z7oTS#xA<{aeXiU<8b9PkO)q!Mt{VK}gZqb9IwPN6F31mb?QOeBF6POT@!dUhE_5-I zi>f^5v!}za-bo+E$&vQ-E?{W0p-k25*KMa7errG=05S~5COtqi`XrM)-V%IKUj%}E z`ljEr#GeCMlcWU)PYQtbe8#J0&GC{cR_@PU|KZ&`q9F)=l%DXyc-Fav+d@JL0Btqb z5DeY?<^F6GBN;jat^7?#!6x8D6-U>%w(ntALB8p3>|FjJ4~pGvr`R2X$_H{0;5?=c zFu9T3to+04LGCbheew=LrPQx}0CguphEVBsv9jP%>8CS36dX37d!LQS>uGPa$YcTIx7Q?)zc5N1PQeQ+WTs@@BG21f6K1 z5Q|A6Q80~+!rMUGK37F?#{n~Y*}es*LTBFcJ-Ma&e6o>u1t`G9lz&jPp8fv&b5i*p z^M&B`@s%Ykh!L}n-8QtbvMQLGe8eDI%jf-rQ@KQm zJwDhcCt$Md`ses5{if|0FjMwcY0J6h(AfFGgThm3uo`BtPuDe7w|yFD2Mn>U`TOgZ zMVeNrpAj~6QB@tAE~nmlDQ7U{)`NeuSZ|z#ydNBlN&(o6;P`BpSTVmUn2tQkU2nhu z=o$0x*KRx@8HgSN-~yl64Yy$%JE&a|nICw*K@gAHu(8w;TV0Ts1ht~pxV1i2*~SFd zFr9Yz5yb;<_YQ38+}Pb|v`UBTa9OX*lfV$?FMI)9fBG>Cp3$9Rdb%DhvN$RC81BCds-Tng8B2zuPu$?r&FfVh}IBzU5iV z_iszDh}wmxuK)eze(cnDXXSk<8e$D?V0Gv~tIC_-5>~NFpz$+<@w<1?l;O1d!SeITDA;!bb(zl5+5X5@Bc97d%+rLm70GJ z0+%;prU912(nmM~sM%*YIei+Ta7A=LqM;HwwubApvkT&BAamFK)>!!-blao?f?O_V zmPUaRrFp>p=6&tz5AHG#G)`N$f%0$2(s3H%9^ep)(=+c_d)PkAs$aSLbkg!+XEdww zYc?$

TGHW1ZW{hm`@R``vnPv-DTD`PO-l@>SY*V!GRp=bJ|%kLY6NLN$V;a!hS- zu6#rBWz;9N3m#93c=KvHl@>?_5JJnZ*Q1DvkwfRJGYy7)YU6ygfFow@EO5M8R(9(A zc^#LqPOZg`nvY4(Qd2wS>XgjPA9$eH!W8qE$3BB#(svw%h{t1t8fVgIA^~xDYtDg^ zQZ;awXzE^klawRg$MHFG^LtVOLcIFK@IMNB?}$iX)-3cXiRpceMW!ba%TYycS-9+Z zUERQ%FN6eK|B^aa^ za}j>0PsmogaHnw7gMQB`DpJ~|w0V5;pX-Ot{TeaXXV`l}n#~w4`7vS-8~rC03W@Io z*f9C*-ic=|_l9N+4S@g*Kt{f9LD?}+zuTPaAkBfnicQO?iv%9j!IleShmpQpG3^|F@qFsXvYA zMr9MV@Nk3UFn-IIO?!j9W%usK9Hb;ooLlvI-qT6Dr~A#0un=$NW*7RV4=&WZbmC~0 z7-X@9nOW&_L+G}70jtRwK`XI%(IV=)O7G&`7oGzjyo>F5dFFJX3O$kAa(}8@-Xp_{ zP4BrAprgwh7Cx4etmpO-GB#OdAxCpC|D{-xGU$4j4B+3K7wh&+L4?n@Y^=Doj@K+I`z1u|lK6Kh zMF{2H+wu0I^buoEddgThnAwEL*tn~45KS}C!V>aB)GwRLgOL8a*RGyD0 zP_sRu5Ik3I`g3|wAIU8XPe^GFebG*=$4tccd4@;sSbT0$02S^-KpW~MpWk^c@V&p8 zDsy7nh3B&_t)6ReC2xL!KB>xf{qL;&0d6AWD&@Mp_2*a_KueaI z3kf9u#sa{_L9XP7I=VAFQEc-wIQOlNF4<7t<O zA{a|Do06R42l>*5F;A8jmK!qOxL1)|M3tM&#`Tu^ius0NM}0{>C$lDQ;YYGRk#YX2 zz1_D$Lxm=hikaOn{HU6N)=_uwc91;RJxzv@tjrVH1?%Pi){`OuVSV272LIiWug8A2 zpHs8Zv0+CK)8GC-zD{4%gRGooH7mrO7LM3c?0IR`=Lp39tt!GW^)6ku;Pj@kKZFnA zc9z5U{fHxTH*_NjYvzGcc^ox|7d`dDzfT#(g77S2p}YHV6#YEj1# z$4UMo3UkcVl3{*BfIEH;VtX|oo=At!3!8q}GW*ck+o)0L<{w~WQjPmRFHNNr-w~J) zp=o@rclVH}p{uN~ooJR{|MPf_^M2uzHh`*XT1Uh!93;68%^K4o5+e4(;ztPJ&}bF8 z>#!wfDwh%qQg{EvvtiHHQ1&MyO7R_C8ciPKkE?@nf6>3gO!VxSTG-MCnnPx>aUg6SH`|+OJF|AhGR%UwuwLF?RXIz) z=Ma~+kSk}2-tt}HcSCw5FAYZQ)rpa1v7??BveE` zd~OtmlK@wc_pjZXzfOy=u$fQ8z_JlP?7>Wv8Pli7FYH9Xn7^;Drb}2b7}GnQT`yBJ zGMea_zkd0`;3c$4e9P*$WZM7u*wVbL>CD-)LY7DFuTSG5TRvX7>u)-c?AP{Ab)Z|& zFT==LH1tt<*3uFp~|GS$4p|3P#Eo8w_v=| zy8Fg`%Tf6?@j-35qM(NQ-?Cgh8Mq2wiy0UQ@Ku$zYntv^^T=iK*mlz*;w)kH7Syyw zn~571|AB=m1M6)5UF7%H8jq70uF|y7Zu6>Q&qvu*8ha-#nVTR#++SyXpCCl=lnYAh~nfNnO2Q~-PEJA zE~(yjypnGyQ=pHk_&c$fPTNfA-WMBZ9XJqT)F{aNEJI}lz{g0#Ayj}mPFMc_6@Mi{ zE|f_<`h(bcL|9pE)x-s2`MpOcE(mj7`uvXzdYdZE6W64`&g$#S-!S&B+W+6L@_EQ< zhR5T7Rzjv!jSBmx?n1K7>gcGsQCqhvu@4ix3c}g}FKN4M_{`nG7^j5vi9#%avNDX4 zk-#V!?K*bk2+sQz%a{LnJ*q_zMBQJ%Rg-?~_AT2CLmTQZU69HCKZZc9sV%XIEQmSl zU+Y~`W~j9C2PGC5r@eA@=wI!L-7x*>r^AHU7%eID0oKB>0yStG+H_-U1G4x(h+ERIc0P$%U8t zNk>$5+`@P!5ZGVO3;2$?EzLh+a5|3eIWaN*E1lkMc`KLH3+K*NPq`vh@Rz9-_h7>h z!$+H>Ke4Y`<5~Ghh;P`$oJ&gGx*-&mXK-(Cc`Co~5+IxthYxGiJ*?V1z8_h0hEcHL zt^r~_UpSi2pFi8X=(6_Ku31Cef-0QD>aEv>2n`t-f+JE6A0`>*I2G69;*NODnntyJ z9tCn#^+BkMgn1Yn=SDe}ghMsA{Q7*8)v4RBU+@JJL>=UnG=Kb%7L*=y_I_||=;t%a z-#g<*a9&~Rx}BWX{{zFs)xovkY3%D_Gi~*Z2noO>q$`b2{GW9PeCk=L7$FrmKDf)z zt7Kv;FMY5fs;!;-pQNCKmuR|xnVgsOOa3#vidA%*Q~CV4zH_7gPo04)e*r6W>WL#q zW}?gx_LBO#ru@f`UE>r+%=R}PC;`<&-YFs2Ek7>g{vJPO4EMIVn$FmLdc8laSbfLW zu%pYKby&~-^fRXavqHW>tgH0sfpv0=tc`A%u621^ed|(<@>*~SEtj&+y?VhZE_YJi zPH0VO$$xLTDE|7V6F)7~4_~nhRP*)QeU{HmW=UTiodms#?ybFNPAT0aaiEbO6>b@_4|R#XPSQ3Y*lZI z|M|(K-8dO{nyoTS)3TyW3Y$*cwI@lJVm-^Mf>4bw^r&J>8PwmhOE)9~kHv{p89Rs>ebD-)2@^VN1u z%$~!!-=FCTkp9R=iy23hfW`qQ>btj4FXAkx7e}rS2-XW@~Dc+%clELSl7X?AbSiahp0Rch5+~ zcIzvxVJm?0aGL_1BBACF#B!{32P40fP6=fMwhAFPM7m;V>q3$dpF8I=W=!hYvs;Zc z%q`n9uz(jR@KME{s{#U$R!80svs1t!(!zg3(gkWE-J&eRE9H8 z$>%%#d9or$=*=dS**sY*AFL5lSnacW&mMc_+IAJr5mgv(y7lZy03tUxjqiQIr){Bf z1Iq?d7JHXpcvOYSS?ptRaC;#}o@WwGmZO%bi3J7T|-9g58C` zaw8eV-G~5CxsD2`4>Hn_>b({uFYXZ0XsD|Y$*TN|v07M8a ztFm=RJUZiq^1mrf>Jyx|(EQ2qZo4QWBJ$HJ+-MtiLCG&()f0RxVt&4ee`xo2E zT@b7lxHBz`d#S2sTAKw;=zVaIhag_9Z@p-*8gGzntjEVXvtBda+%b;f__45=|lj7 z{p1b98OUuKd+lQUC_!U@`KEf|1fOFy3cgmYJ&uhWvr#pVKzqu{y}pfjip@xMX(xS< zz_DkAg`sS>)vE>N*SxUf78zt)Tc&#oJT%HCQph*}h2)lK1DWbIdm2$Tl8fSy9wkR; z;(MFSm*9%L6^&~XCrw&~)(e=V^&*NA<<+MgIkGD`d4g)(Q`-f}2+oFL6Ts|WVKEda z3RM$#C9QQ1ObG9xgs)P3!PzU77$yr!_s9MYM@cmZ@#bs}DbI1d5=Da1pJW-i0jD6t zE-X`Ljr`A}nh<^*++Y5dXwsU8d^e6tlAr8DRah{=<4P$;ii%`<$ql>EC0?Fu3mb7? z7vIxATTb~WdrX>JSyTV|qQBG49f7YM-|tP#LR{n9_u`$h!dS`>j{&YU}tgZUAcR_h0 z&)s->#8Ivz5mM$c(GSsyWVjBD(TWfqx>wfu)5D?_YMwH^HlxFMc=v9bE(^+K-t{_< z?PpB;{nfQ~b@%SyFNxGBw@pmC_vq0(5cXHEKwG8%@x)?dOq?oHgOrpSn|}u;+A2lG z#(w?uDP)pw5)KuZ5D(`&;39_{c1op0X@;e(IPHa~Kpb75Yqd4)#EGesCli_$+RjKR zWJRY(BKe5k;pHcB+tTL%!EE}B6!33t8ruY2DHB`@v4JoOA@;28cAZJo9yCcXje zOY!_n>NzA*UER_|(K!eVoK<4WSrM$2X=Hk_qQPO-fuYIo0kF?O<8 zC96UnV-6_?-}`>wKS-2wNN5o~phd#lHT2$6O8{@zjW6H6)vJ5d0&ijquM=*2{j zpEfPA==e4QvX37*5<00!rr^G5_lZQ9RQtFqX&+e1O-kXTPwp zcy4+op!q(t@2IfXZXqGY$~y-mn(d{dO(^~sZdqsqnhIL841aq!^69jf6$?J<4d!_v z@8^J$8$>5sXrGI%g$SX{;7=vtpCM~XyHB5d8CBE#%*Z8jvW4+hb}C*Q1a}ZKS`sas zw9=ENxVm!F91QrlnP^J*VD4n1aS3j*G8}fpMmqiIQ7-bZNe$ctlboDXpQcnr2&2Bx z*b0Br`CRNRgy0;{!k+~2M^LY9d+Vg!-?wNr*4}=wp<%*{@d~%PMV&nKgj@6aJj{uC zVP3FX_Yr#H^>@f<=(XJm$K)pv;)25g1YNl%Oi*W+RiP5|P#PR8w@B?D;7Vji%C?FJ zE(5|2?|Fk#&0R`iqx+LWW0_DCMA~Lxo&?sZjIPjln6whHJx4vaN?}+Ldb^mbFB$9k z{l|}r`p>1iKIZ&%G@P%C-K;l&scmt@ui3Dd!b6=s7rP%TDW<75DRQ^=ldD%=UQY*W z5F9b?WSvKkkbEDpsPj|sVua=m)60!StHy7Pic%T2)RoiH!LE>>b$;oSbXYlr#ANIy z+Io_NuNR!vkQ?~T5rQ+w~(2Kzj#B{nN8b%NjGQRx7hvSC|+2`)g*GdOElX905wc0a% zg?z}jRGT~eO2&=tc%i& z$CKtH40uDQkIr86y|Yf7%!U&vn9d0?fKM`&b>0Z3-?nYrvOuDu&5fu{~%&jQBH$vv3nWumd(_E%F&=n zHs+`9nf^eEfQ2tl*Q#R5)Tv{>f*m>}S{%@r_U)uXzPpK&+y>F~CNlUTp7Uf9lzi`> zWoZH2`{oWFC!V_Hb+*xf?4p@(k3<+UQUQEI&o30d-#B|q9V7E)#nejx4f6m80CijkUO0MG z{?M%|G_-8-m;5cNBPV@Tz?>no3JOgU(pKC#eez^R;5TkDZF5BIUY^|kl3ncl{$5?0 z@mW-~=rf+`Ow`k!avQ&L<>HkLKysLv#>^x7Sg(k)>|QF(0;3-TICyJD%w(zrb1p5T zH~-v2_Xltpqm^lY^xmYyjQBxypxsg+W93vq@Gxy1r%1Lt4cl+HF^+xFYLF*ssO z!ns&u&Os!2e6U8Ji>|1QRYo7sY5hai9v=Jo(im49GzM?+8Bc>IqzVXUWr!ycFeZL) zM7yr9^!M*OLeC*3m{y#zoq30&OnCE`FBcq^L8QM3u)-kvjZXE9F$DIOqjJvIwQ}Ou zaN-?YzS(0{@7BLp#(%wGa6m%e?@3*I_H^iaI40|C&bf2j9A5r07>^@2#vx_)k^e4| zNeFDFki=5gZ2hVn;8OT2-)D^3j9d2)xviKG0eRVVW4QU+verYe=miZn-+sbgv*32x zj!tRbq<^1=1MCo(xNlZqU?9nLq+yXZz+UrEUCTcBbbG?6{KLBS0cK>KsBAk5o#&8b z)XrA%iP-7gg7Qa?#!Xp^cFiC-u80-dT}6d)jhuy@TG!$l+#EtjpJKOL>~y>IYykHj z!5Dl;gAy530`!=`|6Ce$F(KZKxBQ>2?Hk*$V&%&1Ka|erYox)vLlCSYh}gt|1WowjjBx;GjtZeuT7tC2y&BQzGh=b>oVfCSZV4pJ7U zr~hCqriyaxo#gmi3{<~({CG!O#bU9`J8>P;t~}SS9%vQnhp`$#sqfj;Td!MpvgbC6 zGFryi*p7)7HJZgqwy(5=WcW{?-hTKHKKzx&&xHX2CXt1x!f#Ray3dH^#YFw;WL2o# zeGN7FjqBILC)o_{ch9u;6Z4ahth2+WQ-8d`tTMe_7rWi9Pvwguf!1$Oj8;S3((#Ly zEZKwGgajA|1BLISIaLv7WFKrCQF+JfLs&^q z$gO{b0WaRe@i46=TT?|jgiEl`RIgtlH{W|{j9s`VY{2Xs#3j!;6~qDvvGoda$V7|I zntwN{{bed{(jNg4Jft=#nGZddo$U%ub@pTOI5)0A;75}52pvR&;XdOKaAs7!{$uXA z)yGSl+9X$&Sv1};wP$A`lnb2{rd8)H=uJEPWPR{Lm1M&eOBgg)+48>G$+6)YNyjemk(7-;sV z1wk0g0wV5$JH z*@p(AV;TRUo4xBYI|FpgL+MuZs~LSevR}CmLMDY3)v^V^%0GRNS!nt{-|habTZ4&I zs0<8}QqVo&&BY58wB!4utx!@F&XY%v?nA}2cfM2m4ZpNfY&V76+W(mxr}aHm4(4%j zK3HrlEXi5#Y?k8JH(YgMw#_km^DWO;j2_*_L8DgjZwV-_fo5P=K$RE-hgq*5|GXi< z>kaLpyVa|>xgU@g(zY=4!Rz_Oy#mD#)EConG-CuvZ3GE+59v7ScjIIl#278TTeIKU z9477?q=Rcas9jjm|HssM$Mx9v@1LYHN=q7|PbFC`i4-y=jS}%@7Fk9$Llx*ft{gB(XBlc z>`(lS9v*Wby3R=}(3O|bTJZu{5T&%bx-ok|<1c4t!*F{G3T1H1m~AVt%Fwcu$mnQ+ z(i7ucc;kjP;vRN#yOo_j+6?%Y$lT9PPJSnJBofY@*4pGm`@c7PPkOozfdW01YvS@^ z(^kCTU;vUp=a*J6!XV++?b|CLMG5Wbt-efgtq$EpDRAQV$Fqew4VuRMD8r*qqQv*! z*&Z3Wh0k^P%9VY64?=X5R)`e)doUCS3>-*ORh?qoTO{!~3qcN}-|jcNC}e)*m(0G~uNu;;sm4sp#1{^_saSeq?R9_RFO7?J5IweBCzrly9D%;?ii=Sdd4Ak$Aa zvXFKuSok(9d>M>Cihu`$3^6;&h?quUc`LyPu$+r7{1rFmjx=Yy4PwZ}vN@KMQ}O)y z29R4JltwJq{BK?d{nPNPbvC^!}?|EkH3x9 z@&_+po?<3fy5wEt&dVdbIM(hX^pX!o(#H_O&p-df4|@>s{5sn#0*IU1g|YO<4*KPx z)AZIoC@(ME)Y(YPZ{@34oAKlIXP@iDnaAIAq269IMBt#ZCtCmbd#82TV?U0G%d=-v z3d?ns7nD;w01Od9Bo!Lj?JuCmEplT)vFw+iFg(5E!XD%={tO5@+Toz!zHg6Eq~Q=B@e2rTKSWsuovvacr>{*-fT*j0AiLQN`R{3s z>_;&~<;g;1Xty65_@R_|fyqs0J+voh1|}O7u>}fa$|KLjQjDqqa^gr>StxQP{(0YH zSf_6##l-?~q@xz^+`unKUr^L`$n@aun)hpXy8nS-TSjq1{jb&Q`^ZM4Ww#$YMy_Ur zX-oh``H_qaio_osCZCTHh(1NM znyq3>9cIn`Ry`lBxeM|Fo(t(oF0jtE{cU|co0tyN@XKj^v(BBncE?3!{C|Vvkx8^5 z;0x35nJ&v96}Pq2?rfbbo}a#C>Czp_h9PCW`V*OV@)rA!EmF{!_?x>883EbD^Fg`v zl@1QaVZ{ects_cvhM+gJ95acXLU`p{As@5M)3fKL_Bs-PyG32cH7MX3XP0MpEy5C_ z^zcOcx}?(kRa6-4>YkU-jH{qsrWH6ZSzzcxqz3^~} zOue~S^+vJN)??Lg2a3JtE|AO#B}0`?Hca0^vx>BF{L#xL_UUiy_R>N0PfE_GN(w~r zk%CpWxb=2L#U@fmT4G^eH?hB8-pc(=Nwtr!pC3&-=r3BecyVZRsEgj;=bp^>l#15^0G3Kek|)!tZl8sR|+DRk6GN6^$mwmQF?xs&0K+g-6#yG zs_oh^P~PRWx%Jz4hO_8<1b)9evQE&8U%kjmb8y`{%`F0GScKZEz80KX{nDJklRbqJ2+my7I5Rxv?t7&0u`_ zKGk9|nzxIaT8E7o@%Y6HRXaIVB_nH}b?Xj|^j?UrUa83LZ^X@bTv@ql#fptk?Gwj! z!TI9N*)}>ZObvRJ_gqi^gLRbvo`mIUmft3`Em`u$JMY(0-WjPz?&IkE^XKD=+yY^0 zyG#CQg7@NMD}le-wW{7xB${^k@;*JTK;{|-AZI7H#C=oy%{RVDE!j(KQiN&Jg{7Bs za+=53s2J}38;(I;jc7rjNF4*3BX9V%ZM-kWT8PYI13NQJI9wAnhJ!rmNju@9F}K$%L9QLu6UH8-z=V! zlT%^>Ut>$Rx4QcJPai)fT&SoV%17(AZRv$xDf+OqwAOxNtY;9M==eAOE*d{RA}Q%P z6`pH|2&@<<`!@{@`3qVFPd4isz?#lMQyd&7Ou*(Sv)LRX+9OUtfyED5Qhv8^n-#D* z41TO6B?-06(cs(~(tJ5NzvDqZy38v%T>dh{-?*=Bw`-cs?{xiiN$Tf@AMfM9oj$!9 z(EaZ#i4jcS0BKmLO>_w%W#gyRN&EYMexyrW2agTq^!*?ww$1FzEcsWzGx)%S>FGeJeD;@aX!ppD-Q6t2{X*XO# zHZp6BxTBRMD=XV6Xhydye(z+GJdUQNT@xRUkx-ZRz&PDli=nOadI|74U>dEH&CGY+ z9Cx7--1gu#^=2@w^VLm#bYdO}aM*Z@Zx44~W-mrcL36ppMPl4%6J|3$6e2=ccjAPcaL! z5!&sFtD02+-6Z?&{^tDvh%Sn5xchrWQUPI0MmHJdc(00mAXfKX6jQ1mK7L&E#uq6@ zR3KetN@V}W-8p=CQYMoCaRtTH%hHCAHfc6>%vF6MYeOWbeer8>xgFsZsV}=!Uwxrk z4W|gj0Zy1g(t-T#>Sju3C9JFkO$=}EhWGDVstY9(tuy~(8*Z411Smz%m_1ucAxrF| z=82y5%etsddl=sGrZ%aR*wYQ=qi%1nncV0jbz!f}S?Pk>Z@3={@4D%&H>?|uJ@2{? zN~5YODjWfs{$I3u{q5v*M&oq?hdjwQ=eB6r5p9Xw!k*<_QZ7nclFx4fE}*!eqbyb- z+eLo!zt0b3fQOmz{LlB}&^q6)**UHbuoqhsEpzj}I4t$W(HTwTMFsBX)X4cJ7uhyrQxvPnrsQSqL9F=57_^am(&~IPp*u;9efn^fECC$y=Laoh9>d1u)j}DDF`6ripS9kF zd7-66kdtQs!S)xZ#&-Z!zVzh(h7-3e#>g@eyAByDYv!j)scZmK_5Aq&N+WwO>?caQdUEIb7Uw$V@|j(h)<5sAYD1 zb!u(|B|(wb$wqq4zI;y9R@zxzt9;lIMb$p=DEK1(({M@ve9hXnx0Ichw*&yKEX z-^s8+9W^%m?fy!_7v!vCs77Ka6z?i=IUs(UsLeBGrL`>O3k5EZXke-0X&n%3@b6nn zOEYlmwRfJ;@E{5cWIe;2!p3@dc|n>uPn!aqQYte2J6tFU=OymkshgX-&OTa~6IEjF z- z5wr@~g*R^~RwVrF+y8>%MyZ%+0i|K97u?OG%SX+mf&>%sxc&R@AjC#En%bp8*3IaX zi4C${$R`T##^kLxbu$|k+~)BNhv-gzSjhBri1}~5pgobWjNYi=KB3^e)k{hhmkZpg zXcG&>)&C8dtE`#MgX9(H9trm>ttuWH9_^@|L7)Av3)%kmA>HG&|7I1V^eb29jvqhJ z#6(FaO|n8Z0je8A13~P=acZakJmC#~D+2MZJfXx}Z-!X4KBLxDK9q8BV($zfh|2HV z&7)F(qp z;Y;CX6&(2CqGsh7{wiG1*C8ZxDGdVKnRN=%r{3#R$5O7REfi>6SJw7;EBpI@0H_%- znJFva6B>wCKMOxR311$Z&FMmgx$NW#|MyzqSnWhM%1&-X;@F@Hczr9KbfwRgyCvAC*wIGl#)Vve1pUe`W!|^+(s_hF? zDg#k>x3;&TBR`ple}isIw&DAANqy(d3>E^oaGhf3vwX@F9dhsCuTiy;ga5u8Pcw>s zBK*-m=_7eFYwI5l;22{Wlo&he_$5(G|1Q)yUsobsSaFG;3Ejj%H0OGjhyla50~=Nt zZfqCGR(M1y+cq8p^`ZZQ{h0RbdS5Fs@sE|Aydo)tB^riam#W(QXIS3T;8;&Emh%5Y zwCuaj@RG_r6_QdNA8aDNy7|ij6f(cOG);^hT5?HLd`RP2xJJu1c!YUa zb7;a_S99bKU%J#xdl>E%u3=$rs=Z{{vO@9r;j9)C_MmGE4;m4>1(r?bs;&i{U&zu~ z*1eWYjU3D4f!=~r#d7u}&yU}`N@-(ME#9s*b zpMW*??%dfc=mNXx7H#!Y*q3-6RDupu-mka&En2I40n(24c|ERbeXYJezE>P9hdrk* z{c*O6-!25{{`>K*LvmG0LEbpV#3nHCDr#BqXiCnt|EcO&cxlyw)N4m&Tc?!VD{o%i zF~;(owBhR!imi|Mz@-27mRlApIDk3pcP49|9Rw|*J#WR5CB0Qty0z!DR|Q;a829Pr zc(rD3A}Baia)aH(uN)n)4&g}>mGpsTG7j*wNq-(xWvR!8yemDTxpjs?d|&< zFRw>tuOlssx6NR*AuhRGc*PQDTinFK$35+}w0jk6QgtSt@^P_h4Sx~Nf@Ybp3jF^4 zdkP@TRWfgWoc972)UL;K(e}aiH9r0!f?_R;1i2$501kEx0M_X!Pi|ksD(U8J`C{%D zp4Wxqg4X$IQlZvxugFDX#~wp*<|XSjb9*)RwNn$Mf;#@>cL<9qG%xh1&}&9^U!y=o zXloWPm|UL?h+7?J8w4sPPQGFXmW%8%Ol%GvYY1a`IunasqlA(V_{;9rMIF6W$+m0?lf-P7I>3&_c!--R;!^}3~H_I zUwHlcZ%t4`UK6>XAKMgdg5cVYz7UOUHF3N8UD&_l zQA#141(Y6BQth5_c14C)BruY2`T&$P+V^`!wr>h?Irdt=Er4dbZDbCJc1Jk~neY;V z$db=z@g8`GdEVJ>MceeVMB@2^PYBW6gb%2^@S`GUYNPzPBxvL zf79E$By>XyoseRA3pB{-k}WkS&9r)yFP}?kAFD$IaDu10= zyk~pz%q{eu!f!t>T$(8f1456|QzGh?sBj_WqDZ`1A+O5Q`bU+5xLH@Y)*F&hqRZ}B zTIc<+qa5+qZ|igUc6r_f66*pwb((GNU}HJ`SX%qYP^-+f^dnGO)%Yb+i0k<|Odqv(K_Bj$dQsB?WvpU+UI@DEE zN_O_-LcT6(vWmZIBEASckNtP^kVSKh+?Du3mKWdeoW4f(d6$d#-_2EPRcGHL!}|{B zwR1;~eozEZTwbN*BC5EmX2o~|FDd!~K6Ih(k!wuBbsQ5rr9PVI=!o@W;WSa*)DJ^S&@Tl}WKIzJGtDfx(ut&K0tPwhtQJ;(V_8dLyaH?c}U7hcIkPSROUTY_YwU zRJ*N8n(j|q5WnAh){#yqngB`bX0;@D{OaF6I3Z}2dA4q7(6KtZ;NFL&N>-?aV9r1j z3pFLY+j*50YBhd+mayK{ex@o^SnM=)S+u30$H9>Hu}^JYFOpsZD=%-@tTXdXUk5c` zZkS3l4p#7sSQuDkSx1O(`Te`*$Ck~sYZ5GuitLDgvlSFT#6(?c4vL$^y8 z<6@Fn;TFhVKm5ERc@KJXGR65Q#XT3!o0pT9*H1^5pN5$m3$;eQZmT$}h0H$ZQovt! zABG9X7GSQGh1iC9FN9$=|9P)p*(oW(rOUzI?gUjmd-bXf{hM3p>|hO9vRA6|i0`#G zM`PWyrN7;|bsXHUR<&7@o2z5bCdYgxUlh0T+V{Ji!{837DHtJ<3`QDc~IXbihm zyIoMr$m^8sa;%g2xm^Jx3-mv&)HL3lYx@S#2=zXF#&4Er?9wW%uDrT+a+pNx+b)|W z`}gnPI1k=3LnPEj;}%796!&tHOP3ZT+8S4ca8r$LAu-WA=Hd3CBBoD7tSri)%p0?> zZd}XEu2f{aJZ+=neiLas3)O6CRDmYzMAqrbC~#FRuV>e)SlQbngG0CxP1J&o$_-;Q ztfarNHK?OGNXUXoNpq^Yn3dFgCXkQ$wKreIG*5VARaqX>YHvxd^S%5sPjVeo)%=E> zZhB*FZ7){H+E`l))FN~*#V4n#+o>Hv(=MhxQckO#h`zsP8}(7kH|B51hcr$J`n@Y9 zMKq_f*Kz62vFH~le)56Qb4?kegkpJh4ney(z1W_ix?w2X{#R&M(h+8^s!$7v+_U-O= zaWPjmd{2i2VY*JJI{R8#(`jPXggk<{LUI5-wg1MA3=PQEz2-#Nf9mNKCOfA1Q*2>% z2>t3}xDqL^tH zsAtjZmn7E>R($usrahz017AfCo(DQ=jBqYuNf`pogq1|M zF&E@Zc7i@ZffVoBwE&IB<<_#GhR~K3Z{6q`HQ(Jm?chQ6q?~KwR`%&=0)T-DcfcqVOThHD0@3J zy;}l$mP<*$;zWUZ3caqIM*gdC4L15{JU|~^`3mqhkjFW7{CIG;uvkal#OceIf3e?V zW0rKyBy1>@#{v1-4E4wfu9Kynh07{%7Tt|rS66rD?_bwYIjr>DgohxW^w0E${Pr+j zBKeH!rklBMkCG@l*Tsu#3oJ}rrwSFBi4A}Gw*yjkf(48jr@FJFMwt5x@(FAUjB_79 zd?@BuNE`Va14u#EuJ)UYTlT-d{M4)xUG{7Ez$Td=NJ?Q&gV|0h_B)v1inQWdE%rVZFEl2gU}zK&!ezp+JhsFgdIyN2FpEIhlvy{g@0Nj)_b7 zW!D0eFh|a(Dxz_O6Pv~_VgPhDi6%C_zKu|iT@@YJle&edfohV^8&4eFA-SV$>ZlQJ z#;MtVm)+u`uIlo;xj2r8%uS7rvw#!d<}H^JetO>B$x4@)6&VD-$E9KNOfA}pm{G4q zVK(o9=jfs}FP~MuD01)pOAGr{poSh*=daV5N|lG>8U*eT;iNr7*lW44T>0n}Z|}2o zP*PJ8t&DM$)##f1DHsCizdwG>8U|F+pJqWf*m~Jgd9GTB#-B5I9aAm2iaFH-VuQ zHP2-gjo7uxd^4~!WD&xHV5IJ-A)z_sABuTlvl)34u4KBL>K-63-+*f0%Ph!8TuxD* ztQOJ$%UUQp-fih-a}03 zAHm1yc@(5G>7EV~=FdN1Ao-Co0P~&#-S|UPC$W3G#W<$}L)=}u>lNips-lw8Jq(K} zpW#&t56IAeS9d$5_UnBUJEgpk%@4>Of*$RA*whjoF;le{Kv#mwok)Yi`*V>IX{Q#? zx0x|x#OTqgIugM{Bi5D3`j%cF(%mBBR_*!Or#KpkEyG%;1yxt=t48Ysg1dlmTeV*F zuG&l~*D|Z$HaH&fU4Sm@4M&W)d;4~X)4m@~P4T~487I4n8A^7T$kGeHxUg&r0=|w~ z#%t(s=Au{%%uoDtnLO(HMNtbW=4x&(qskr9)6;eJ^w76sSMSEVLmJuxnZtlnF(u(U ziP3A|Rlma1GiJ)18ne|kVJ!VAO!<|IJO@pbi=smbfDssL#BXD>Zf0vgC@w56oZq_e z9BR9~ApwO^O-Z?fH}S+G79e+Z>0TNYviyZ?0C$qE3baW7VpSrlF8dv)!GxpCbLAcR z*{^=C`mC)!CcdMw^~u?I;{} z2;0L#e|orElBA&R1+~59Qm}!6f&GLDA*Kw=(8zxj%uM!IOADai^{kAjZ0Q0hXmTnl zrZ{PEPIV65)=e(1yod%mgLS$e)8sXp+TUq6=`E<4L#oH9bS{a4m&-O52nsB~>j%t5 z5k-Ko!#}Rj$<$fx>l@;vEzQqL*l8OOkP9CD=4p(EUIZdFkh>OQ=c!vyIcw3l7m~5MUR1=pb`Xt&1 z)J@5;g_q8rJ&S%4gZ&USMHaEGYofv=InlqnHTE0|K)Em+XTmp2Gr zZW|ucH_u^C<@v=CH)h7z>p*;pU3&$`B;tgy(J{C5#5FE zmZrtvq?y-sfrYpX;7fRaJ47?c~%2f`@q}94AgIqK2-jDf$c~d6D}lYEfh_neZ7Zv`>%G z_7|kf0Jq%-dWTTWC#r?Dx zY)YmQ5I7+4`96DRdtIZ)dBWdh3~@i18?W#GdumzuYm#Fw2<)ewTsZo^YNg1TR0I~h z>{sIv#V3!1%+m-{-ddV6((~D2f?B!8kFHE|1;PiLJD1vS+`a21Ec;B@1zdhR$m;pU zP0g!tYg9f8?6v*Mm5p|G5{??jKYaNzHmE(92Vy=iDefv!@JLCi=0AfgUU*}J>UQp5+KydT9BK=Y2d8PkLqRY`NivsnTT;j#~xAnw!^( zil~w$`ZtvfCE3&GphF>O`JFw2$W3-vtH_zqONlph8z4HVe&O4fYs@-k8{0=LsSh0a z?mD{XR=pY>pp;sNWzoJOx3UekgMS^TRu?cf1^Z0S3ZK2WfOE~+I%=@}!FWP--RqhU zA5I)Q_O80xa?jPw=COK(FJuD+%VD-;NWh>*u}OxN3ULB|fN27kj~_`l>bP)jOM-Nm zatfY4R1~?se!jjS8dK`tWeE(-xFzp|)dQ;+#r-YV*szNm1$Y+kP*2Km4H0wi%x?K6 z#2Hw_uz7n01!3YbW9^o#P1+gMRRkpn@C3~Lqt~urCCW2@$(=_gHp%3B{#5+#ZasYH z%UdA)x0tA3i0z}iA&K34FTO_F=qd&&6!tz^VX~8mO(#-P&LexbLO*7_^FRw;*yFsU zBo!~eox(0nQ4!qmV+dQ(26S%-vt2h?O>IAo1-ldpO!@sT&qt=4<$Xbh+BkkOgDjeD zoU7aipve24jT0UdNGJQ?P`3ci*``fqNgt8=2riXCBeFPa%e_sCsrLnoWw@qP3cxjt z=9@oPtY^UVLT8X}RYJoOr$@x-O`BuzF`f zWqqKkX`V@iY#^T-2_-@iV!9>sO-GGt;rMV1e9UallMHc+fzFbU;M@4HNO(dAdwU4X ztjiQdFDQFQ_q_}tjRnXxc?*W-a*0nIsxn3AhOJR$Tek#8!p9pSf`1W|sC@`iQM34|-w5J?L9z;2C|Z7~DlHbrw?7vM_Mt zQs9Fv>~@w+W*`J@2ntWv^3qF7?kH_MRONxuLq)SmY6J*6;wZWS(L)9d*jiX!{Hx-0 zyv?58U>KH`uCUCrA0^BaRNWjMp_yFZepAWf8f8^f&`_A%q=u9MO*=s_ zw^sFnF;WAIukp^0IVK&UKepRd+OxQkPq@`rDIssf^ol~1nbxR-2yaS2yowI;KHEw!_K_Vjmu zU()j(Y;5;8_S-roeI=U_qA}09HmU5Lt6I%0z99McD!Y1I8el=3%()%a-ENjz$#j9= z@gD=72Tz}3kCchu3v5N*1G*a8C&7eE4meJk;!Q5~|D~onb@Ad*L2G5baNo8;X8IO_@;TBgeWoLO$CShEAt>mhR_cNzW13Wv& zA~8(|G;!w%zn5rj+vu|UOsR5oQs;<)Z!h~{8vaQKoKll#;Vwn*ui7y=wB*2 z#SGmB%R7Lx=jBJ;1eM)GUWcSBZALHBkQW~H|*$iL~_pXU85b-f3hOt zY!Nk1yXAgEDfUX;97Ma6$h*q6n%5V7KpavBRyU7++*RH+h*c;p$ z_b_ntJMdG=EUw*k+WeBLCW4)T$csbmzSj(CV@C}UTWyPS*4-^DbHXbPW&f7e9xCM( z74x7WtE*4i<4Zrmc_73iJxA2g-ldd1LVkp&wbaS;=Y`E`-VdRZS_)+I>-X>UYlBUv z7EF?R#rs?j?uLe=shQcw_wR+CY*C)z?>2Iz;2-I;GVH;F-Fx@y&&>j{9mZU5f1tf= z(l<`lri~jRvlR9%xBvF*p!<1ayt|Zbt*8S`llq}+(N`z!Gkq5tON1jNC885Fj4t7Q zel9a}^!hJ-`<-*YN%r4heuzv;Qy*QV*%80fAq{oURoia#tdP}2a&0%q7?T3P9eyn~Xm0}4t!+9QSqV-@4p`c{- zGzSL{3|wM1zlc*zuEK?Q;*-e++rntjkFaP_T`$3>_B1gYG^m@-g9m&_k`3l;NeLQg zlCe6O-PJ9Dl7@(zojr=VAIl1yd*?k@uU;+bk>qGw>{F;eP96yL8%N6wXV?5UCLo}w z)w5qZ|LVqna&!Bx+V&pVQNb)hULHq9rsP_jrS|3cj&@DHyW@cYE9z`_`;Iz*oI>sg z`Of+ab2Ucrj`#L8blqXf*-mIR)Pz{{4F?JlvC*2u*6Az~WmT zN&iMyz*VG!C|2#gb3)I@{F3D4AWCYu1E_%Uj8pa*m)yFA>F2iGZA)sKtc@Ul$H75}Tg7aoJ+1AhEm<KI&|EL6s!Dd0q_I6Gqxqw2-3Y6hllTdkf_l3|{nkQv?oTtG76>8jNa=GU zE;J-8xk{yXZ`*VQgKgnk!8xCMzW8zg@t}Y3aYAvc_}YT$X>WkX>y{v0T}Q^XQ~C4S z(eiBt;eNn(FQdC02Lv0E1ljTP?s?mr*SjcrT2BpgJVyrw;ebldPG*?9I(5Z-zKL_+ z9^JcZw=V21?@X)3!?1ETIsG0s@b2=9SFHF@U+*w;4!=U6AuZYx`;xk5VYW{Sd%K6m zY)bnX+1X}b=99Tl-jY_uAGw`qy3Ek zKX4shX|YL}Nf!}2 zSv8r5gQnG+P2an$_q|IF(peHjo6CCN{X2IGpX?u6U?RSYx@GCIW#jGaSPk2gD(dpFDl~XLIww??p7$ z|2CCzeT4;DhlVd-?wyic>fM~dTZfd?K zNREa`w;7KdCeF39Al(*Ke%czS~oBltkf$G(64`Yb;v$jZtJyr}ZF zVoC2z39Lo6R^DUTFa7Az0^|5u90&LK6KQE#l;P>QrZCU)p68{ohpQLOgZ6CgTc_25J;ci`hy$ZvWCzmLxo=Gg- z!%YXxfkW)SVZ$8M7(Fflevp?yLi+XTvln+H79jE^M9-~GC?y?+#X%ly^fw>s7I`)^ zQ>dNm4AwISvU_Pz!^e%MR0ew!RvQ$htyo`Rm%;SY^|0x2U7&sc0YoTH==Ao%S)_eG zRbH*Y;S{9<4UKVP`Xhgs!ZBe`SC?^>-wVzU&crj)r=mSVt2&h8Z`nCtnF`s=2EGmb zDexxs0pq`c!%dtIB3KA%W4z2Iu-9jzrAkJJ5e$cGL1ACrOE+mazz<#WXk+6atV`A% zo>;MIlvI=r{c31vmxw}8sXktPJhePFKvJi7WqVs?bzY1+tqFXr`|5oKr5NUNFy3D% zo#Ae~+fGXNad(pRd%ufUNWBl8KkgsBq<^VPtAcNwB13R^_}i&-$hUdCR`$;9Z(}&s zC+T2(;mFF#@|~ZB$BySdxmVt-{noPEjb_w+E;Q1sTyQ`@MAh-ulbsUnCTp}|BmX6L zqWFa;y%}RlG@dmiJGD=%A@ee~r|hWSE4tyxXmbGx2udK+Oh4A z;EErrVl*E-Yl(~l2=DVC#4%<}@q!sV1{j_X_WY`=OdxU^a-5U&G2kgg0dhd_JdJP5 zJw1V~#wNL97rX(CijtQdu<|sAT}+RB5v^xm$lOn|O~=#vaK|FJxin31+Hene%OCIe z*3tO^A%VY&$`8N0UAuNU0LL2*x2O0cIis~^Y5?rPQfm-9?{!nJG6;cS!tS7NYwRta{6AJ^6p*kL8=Y)5(PCmb|+JePC1~tmIT|^TD zogFAM0|LxT9Ucpv@A(SW&l+Y8%K-S&*1jQd4=agnn22p17`$!9f(60>whvw)6zN*A z7DzfDVYtcF|FwR`t-OJ%68yURn!6HVR8p2AA@y&2uEs!uo*#l5)nxq7VaLrFsK;b$ z_GxQey<_3rxf{Vy$ZEdNa0?8Fk8c_q+XXd@ccj4$=9MI^l1Gx82Usa;ZC%crO&k5| z&!5^*Zznhc)WcjDsyrsK;^k|Yd9%BuSl&AhfDdxxn3yBM$n#!*@??*r_wpQ32h3p> zB@{&8-ewO|39l~v5jPMs=!;|of?y8B=mDnJYrC4Nl<;(bAyNSNjub`WaUm9-y> z>nyaiYWX9+u9Q_B%`Z(6mFr|W5BT_)+nTvTo*T>Ct`8*XK{4fM}ZEmuV&L zIsv?snFH(2#&OwggC0pE*>R<=2lM{j(h_SSb8wh#`ui_mz+*fhFyN^{SpX&j<(Rs6 z>(;K%-xP}<>KHfXz*A8a=~})|WnqdT0LIzFTQ=d zw590Zy=qQ0#fv#dk=BU{f^uc^?|MN3f;dfu&`@z{=dD_XyuI!}v34G>GY_y*({Ct{ z0iaAx-Hb#00h1`t(ecf?dKK)@ah+4#BAM8vB!?%@47QCm-P}4VXp>vv>nK}!etcDOwq<(R6SSbhoeM5tO;9mFb8ak8q8RM$3WdGeCu-4p;L_isx zuy<`QRJ1IIiR~WuA0D@kwl;vof-CZE-8QVRC*9JSL9{`WN>y1IM(ohp=QQ;6WY!u? zKT*+SYW--VqjjwFxGc8hH<6Uc%>gjI{bOqCJ>1+vUc0gFHhzCQiJ=>o70XH!gC|^GvwFOL^Ou7x60TX3RoC_;X6BY5=GxjezgIOg zO7-jOj%f;j`JfpF!baBo?Q{gM{JvDvHUzL5%slK#Ss60DFR(EPO72+6)z{P47u@x3 zbn?b4syQWMdDD^ORT6V182gSs2jic;0-l1|r2hQryAs<%H`NzOuoN@GZ zGq~LF#hEJLBd~aiZa(-EKaTI-bHq+^S5Qy~ZeFMarL2pzVq!It_-Sf-8L~FYPafkh zADb-52@%Hdi5fK6E2FXB=r#7o=Pudh_Bsu%b7J3mvM-b1c)gj~Si+Q%-{rRg5aqrJ z>NvW`BU(2}sYj04%la-7J;$|K9)GJC@bd!=to$~M1E*F!UfI)Z-u(G7-M5@RJ#1YN zAA^59+kS6ydj`|-6x0O8dC>YZ2K{qjLm=@EjJ5Q{w0A|u;%g^ljDPa@F_+4hY{`r{ zV-h-BuF(FWXyNOvYO&e8(z1+#dq+ksrx#C5_MsUEQZO>OOP&f^%BglsSa zf*V3j-(j{p&BRO@H{Q|TePYw}Tk-2x6}eKo?*PgL=^dPvy#{Fh>8UxP*6V3K zWP+UK{u!1o$~bwMO6c}Q9KplFfP$vloReBkzHxeWqsuG6xlyD1fuqpk=gOTqdo~ER z?h2+okeo>H*eSp-Il2ZEFvpa7IUf4&YdoM@tlZjf*PpEsc!2wgJ9i*&CovLuQ zt+iFApLt8gn^!WoWu|wMy>m1@{bG8=#Qg}T9WeHYr$Nb-PJGeNXF%$at-7lBXHPGb zb@&y~eepNQ{R6f+MtL`Mvc)^RN0X?q9o?R=Z>YipM_=+ z3yWEwKX9W$+eR1~S_Yq)KBf+m2YC;Xy&OQcqqk$SE>kzwC~Dl?GcrJT*svKFRs1R{ z=$cn+)$XWdBM@*D@&yph3*J0#L;`7#Zy}Tz{$E|N9%A)kXq$mk^EG4fva`G|!G4kU zV7QUd)P+ZmY!T~H7D-=;Ur|T}tXAeLy+%BO_r&mi;gvP@@Ou+hZA~ZyOJabxdLPpw z&ZD4b2jql&bn09S%nEaFn)zE$df;Zq4q9)WdYue40U&`^vtk=_k_;VY+tybk);bA{ z5x+|0!>F*-jaGd@w-9mX-@Gxn&gN%jPfd8*GLf4@xYWJ(wSkf@Xm^{Ex{}h;<;$N1 z>eWJzq=mJO=46I7x&y6ZpG{iXU^7Q6RkakdnH# z;g;WP*1;YYeeUTRL&-aRx}sZRFZF(IKR-P7PoN3CUtF9)U&n$6yib40CA-!%-SN zkm$YnKMmeF1ZIXUMc|iChJg!IQ=%0q>qw(Un}J-LOu4L0|6WTT(i1`w%WMC=a(aKK zj{PVs0wN$X15beO2|Ba$g!}B-LcQj+CMvI=dZU?ITi3FpRKN^gM&y{8?1j*n#d%su z7m}qw$q-^;cFx&VEYX>+`|E<;`mf|1m@2l>N>L@D0u@ACVT^Om6$+CYyRdJ_2lO|# zD~!0l_#G_cMvHI1f3BRrlmo|2i{NH`XGDtE%#wrS;4Yzxkd&C=!w{FNSpqB z+C|BiDgN^pPqE#rG$2?^;2!9X_0dZ>;bHd?P;AiHv6Zi0Ent<7`ib^QDmTYDxP_lZ zLS5R7g!alcHvPAINF7-hysy7z1xyP_ z8fbw6Pi?C=r!EG4o9f?7#mGi`(9}bsYzoV9mYboP?Fc*N3DGE8wS@77(=nKH*9H|v z;cDc6BtmbA`&u;q-*VGsvL(4oG%>lh#TiDvIf@YVWZ!uvESCau; zjn(H*>W=-!Yz%8u-MR_lUQ=;-c{yq#*awU(!;;O%BgK7ZRFtXD>rQ8~knpL07nQM= z%H~6-jPytuvFFd9XRNogwH1yz5CsF4A9CZo;6QAE8kdo%nP>ZLr)K|;wbLB04iRkK z56i=|6J`>It4}SL5oFURAA$LHJ&}|gz*ZKSkdg+#t6t~Tk{p9DrR*VS5eOj`Ae$Gs z?&u;hhZ>41e?^8E7SNjy#>+*1<12l8fDbQg{Q%Yj7uFU@Zg=iR;q|$!r>nss^M@m@ zY$FiYroC3#f9TK|^SfhM^Uec70;yp@ar^d!`kH$}_ZY!g>+-031PK50c=OyF6>5#; z{DexHc)I;Fv(JldH9Az8oo{0}hOLcHswZ3t`4mEVt82p>)3PG3n52s$6Uyjq0)w_JqYxtE?K*QTfp@oX+B83iWiQi6_s@E8 zTPUIXWmYGDJ*>2Nd&Bb!l1GOfs&pJFyLKq=V|4#VSEq(>jA6mr)D40ur#HF>i1(2P zw(5#o^}4&@;gOI7PzUO(`2gW2x4pqK&1@GM{_x;S0GgtfKPr&o(Rpt{k5-^S4P)c! zj+!W+gmu@vf6#X$a2#tgKJs)592cKkB*il;@0UCsgI>UDvOIqZulmn}ehLk>BqUz{ zk!ceB1jVu4XTF_%T^4*F^q;QNVei_dx<9`vLpl*uN7t>p*!Xh3|4@LCMYCqDFNq%j z%kg2+gkF+LYopNQod4`u5AG2)xiH%JemGYUWd!yUbf%pjl6xR&Ppv@K{`#=Rkf>a* zi%;J)RsVWd7U)!VC^4)PF4DfKKaSjtdDeTtH|hcZOjh5KU_h7FQ^Q}a^P-D&}@n(FWL z;2O!veRy*uuSntPd0f^RF8AB%qE{7BYv9KD<;p zkNL%+hmps_d#Y$IXgZ7bU2UlUeEU9Y=7JmAW(n)rlpg_j*F7v!7hk^-d{A@Yd^Als{Z_v>Aea61mZjr!oe z7*afW0pXDbj%W}ZY{yH6@8-U6lr#KiA&>XspuZd z(%dNIXJ^4V{`TDpyk><2wZS`ON%b0nQ^oVkIlUa>o9wbUU7U2iAZeDSp!uE;d%dddM z)G9R#+aL6J%ekV%0a%FDdmkp`g-{oFccJ?2>s4PP!&EMdo>_!dOFDb2hZ|!YzdfZF zZ-S=+V*(_5zDOutQi2$@0ngjdnsEx;?1<-JU0o)}3ob03@{2cDVKa$fw6>b=eW z{{Gg-&n)6edzoYQD#^P`N5cVy;e@(@FE<+mCmM)ZChN#O#dp>g{kXx|D! z@9XQO>9m+*XY`6XKiKB|{=It#hCgOW@64Kx_;pmNLrSuy zB3HKxwF6TxW?jR)(m3o!X`aw@w#`hu)eg_~+fwDX0-deEx_3XlLt?bjMPnooh__d6)?!^*Mb2*EX?#((OOW{fzY0mIk z`G6Z-yHV;?sqB})La?}}7&650Yc`fq&>Is^+8PI2nx`iY_U_{YGqA7gqdNeutIbJ3 zLYnKpxyABIzqRrtqe2Hjl-8PP{eEKuW;;7;+W9hvi9h?D@G#N3b+V?BQD#xz#q;!b zkQ&jqA&b>yJM>u}CPapJ?>50nnpZt&76bhS*49d>NzB6aOQ2Q*sf9NWA zmS*+hw3T9p-4a@=UFn4=*OLL~*`#eL>D)mKdU*f7HV0#Yp2&Sa4_gwSVCNrC=HMRm1ZnbRGiUA@WjSGl(mtbc;?cFC zvSLm{PBi)lcdWY^++0saVT=k!LYj8_v6(#mt5oLTNa^5tEtkw*NN_4I@@$5SZ@%8w z@!{mz&Bz4StEyM$G~~@Y@3xhlH*PS!_vz`i?Hu?%MZ&fvQ_tKv>7I#-{cN&@`I%CGD|E#5R7s;>l^RwQr6ld^ZS;jDAUS9vhF%EHMkbi26a;G5; zU`G&ATCx+B1hvbD`Ie`(u589d(!uyRtf&@Fg-`1-V7(QObPp(UfWDO{u_$^Ej+viL zZJz`Ca5jlM!Q?)8P0p(>Vx7b-s0J9Mjk^kQKZq7%RO06!-*x_pKrcXKj~S)2lM~h0 zdKZ_&{`nKrMPoHT&*RA5oP#Q=-Tc!%pEl8Bfsjk~RCb}|xYlo{hVUvG5HPOfk1H%J zr#YjWrW}C_<$q!K9JK>T#IG=HQfL}EYSc3b>P1J{x@Of##(yR|g$hen!nlVWMR&P- zgVGeu4{L<5rT3AWEEP^vlzsB0;!-b>-in9GcixJB`}x!MhneRygRmRq6}NHXxs+RJ zk=q1)uHCj;(E{n92eb4~y&s0#jIIZz>{EW54Hkz3s?=Zjmmla6=5u655pY@Euti5w zQ=JCh{GBnTw$!ahesbF3Aw)|Py;tyrwah_bZ>*l3v(AhC(}?IUF|`@({la9Zlz+l%bLB7LTs3BA_$(kn1g2%RK(V?4bD8xp4z} zpI_Z8iE1mV^CdQ`w)D2SS99`g-tE`|yQw4c2Whnp{~@~Z=-ZJuJ&vqi?Hm#sYHOhm z%EsjMzz!}(?V6tb$M*HESu=D|Uf+{Emb$sWY=72*>s5zeWPBvvXW%+=7S5MQUw7}* zZyjqs@!$9BM!|PB?~K*MpC9#F8)HkV^8ttAZ!~ppvJ7eqd?@SxKoq$uX%Jhx-RN zdGqG>KknjRXJ?!~-P_MQT1F9cl}gj?-bBhS;*p}On{2W`npSb1F{9|bC@P~+xSu~< zZ`^Njb1r35VtPBXsyFJ^HfBAh>%IxJb~#)Mzx?@Cy(f5 zk4`6kG(N%-FGH&xfUksCShAG&4`&=dNKmXEOy&jPTA8#l_L}p2|6z+aZH>zcjn!+{ z>M)WHwy%VIqOba)cGc}WclM81xrZu~Au6kDkoiRm9D>jFp^yRzf*`48=qBFup4#)_ z?iX`7l9MOj1AIc6=G&<2)+I$nOMp3Gh%iEJ>wod0!hm}$R5CqD#3Vva?pyt_5|K_)u4qHQn4acU(|DWvO!IimY6lig5T|JO$-;(o!tn$LG4ef3DyTY#2o= zz9Ke5A$70^t?>2TM2p&dsiEhkMoj*dp#;j(g0^79`QwQ0%It58R^Flwqk~o?!aCxC zRK#kao3UUomC++B$s8dRC0;PecH;d6;{gq_k73s{c+A8XaDqC&DVE6pZDi^KhN`i zJ`(2Mow?@FI&m_gBMqh8Knz#N7x1I(OT8@O%E`V1iA)_;;=SEz!`Jlg!JfwUMchud3uhjF%RkbSNr~Yp*?{?z|e487Q zdKadJV`UF;qZx0~TiC(^BtXmHc-N~5Yf6KnJ)68QwCPVQ%&N-D(a1+=^L@uW{u+EL z&TtfR?vP_F6l7lo5U=CojE|q$&6%960agOP>2$KQ+&pSXlke4gnmiNdXqmaMz`-N9 zo8m!1L2v125`7$uSaNbQy3;Ba3rP)1D*da^VD>7M@`D&jL*^O8pnE+ zz2O0#S&?O$(gX$7Z<#H!{g@t*a;87yxDTj;k%P1oV~T!2Zy^1b8iGfHJxkH!=OXBZ^L@>=rB-u^pwz*h^v7*-1o0a#RU7Wa?0 z5oOhyO&|shjZPfd_4S6ZWWd;nsbjd{kblT}F->D%tn2T0ytG+b&23#$JKANF962D2 z!=lE%Dli_Mp%-@xU>|p-(%e`B#wC zuQVf`^KyrjRDbC-8%w3Q4;jHO?^LV)P8ZK%mz&wYd;h*wlg1ptdjQZ1YY@X;iF(m+ z?&aNmf*XTjf*UKOyssBSAaCS@@_cnFr4WQRtT289m=i1rf-eU3SfCI#9f)C5v3c1) zQ)dk0BdOhX?#8k&!LN(&{cd1|CCg@su;ga`qaqkd!7FG64jC8-|EWlGhd)K#bVF8}y*FyySQhSrqbKrH za`YRuc**1|kvN@V>H(F-_o$LsURoj2QD~8VlXk5Gw`#MGn5dfzwVy9=vZO6a=ScN+nBApL`U%PKx@NOd&H^w5|{m0gp}$%3!k4sB}2jjYX^q^v)vxn3)MR0woUGI>j*p)`5(>cgK1V;qV^g0tgLZJHu4` zg(bh>LM8{Dg%Ckbp)g&9v@>>%#K7a_rPZ!EljpY-8}`CII65zHCw+oI7R3dOq7#}! zsE=f2w-;eA9_R5;wx+OmtEg;y3lDd5_Mg9=%fffs6`0nCmX5POhQVk%Rmx0J0UPyC zt%}FR6%{hW2QM=}MqVQcXYWBd3P%4B&`7eU6r!TYP-ETN0WVIc!w&8uZ`9xb&Xk8v z9k2NL%M?-HcS#wRdHFFha325CDG&}MC`0wz(-BY7-K{YAnmPmAh0wIoew>lhFap3G zbO^G$Qx#5LnM=O4q0>eRUjr{*oQ^FCq4Y*(@Kl(hf}ZezSrR=j2ou~z1_lJZZNkj~ zj|eSF&XXo$AqDZ=^n4tcV8FBfv4p-T-k*sS9pZ(+0~76y28|R)7xd@}g_~3xbp_lT zbs%u%9B9G`T+G*zv<{AR6N>TB{akm(!uhSB*R}@dqGw(V8mWsC>U*B=O4WA5d~<7P z-?qpN8O@JLR(l#tX3`Kqr9-z-m)w)H531~Rn_AKktPnVAs!lvjy-A#Q!83*t&wg(O zI6etS7HsuTo;2~PK@1vjg*3!EN~%M^H3vny(|Oq*OOW-6w90ASwn1-9qhS!nx#%yj z>+M~R6dDp2VL(s-zpv`4*9M*#0X~BerTlW?QpQVW`AnN(zNF9KwX2D?^j@> zfDTB6Kv=ZFU%_~h3L%LmZfk%eh}J2mq-3t3ful;XR1^tt6l-;~mMN9VtHReMOsnO+ zX1^I8C^Z;jtt&Px@#Ot5cK$cr^zc`oDs~9Ee$Mw3H4I+`y)z-4voEZlhtYyE06#S3 z=UEcDc9;Ac@Z=^V47DJDVdg(z{D;n%+A+{;`uuh0RsF1xpTMRdYpr%NcZw)LyY3oQ z*QRB&@7E$2?I9c`Diex1Ap?T@KjpOl3JMH2VD21z`(+BWJSJ$J+|X&U3Np@QUd zC+m+PJOYec`Vfz+flz&7?Ow-aw^+bB#Pf=2--E1Bb1go8lfON5OYCu&p;E5Af`vN* z!2iNO$C}_KCU&WBoShschgwf8s*Xj`K3>v1A4zf}>3{&qVN*H;YO@neVZ{xeFKSJ6 zHZ>u_gg_Sg?`+dr8bS@;DApJ=gf@TmCiRahUjd0JFh>MlBp)AoT7#+~ z;DG!vk|Y)~yqd^W8WVuI$o&ghn?8J@sIdlA*6C*QS^D=-jgM;IY`woE`6J#II$k9C zf(&!24z;PWHx+#wLUE@23bcC31D$C z`gbp7#=_z=Y#jOHdV%kdWwCk88fF!&T1ZzSqj><C;GXb|u^HB%jzPTAJ;&{&W=HlC?c^8$VNcH9Gy; zF@rn1T^)pkzhyccjX^KfvYKWrHrl<%PNTCdOVF-WnufO&rg=0YTA;r~o7-WNpG!?~5$QK(TK41NfBaKEw zriVn{?+lr%BgOJCaR?DmlQz2%4fk$q86-w9e}vJ!wS$1Tj z0XUme{3S4b+*XxRr^hVG3)p@5E&s~rsVVGWQTiZ2v}_XkXGDEe;}lSZ(@96{5RHwy&M@GYJYfl)axMpn#}l#Xc_!D-;}Ml=sMLKfJ>9P1J5#JkdWZR` z?HbB8(z0_+-e`#^!h9h3&O?BAC=<5>Y>J*AmHA#7KpK;3#D~LldcC;bkG&k86m4t$`H z%#e7w8~3A_0rFPMmr6tfHaQ4_%dg$?dez*@>cQ=NIX8H5(16b@C52B)Oaul6mCwG~ zN+vJwB>Foh^Xe6t!=TZ?_(!{U9W+(Em?S;_xWJ}OwGASDQzQhfK!n7)+njMHhRM{?U3#CR>> zIZQ#Ocx3kE%E9S?Yg}V)tp0UAqN}H8sO-$`@N$$Q01hI3wRd=RB?Nrw6xc0nR}wy0h13^j;SU z7Q%(M!z)jqme1#aio}%T2pa&k#0QeaCD(hm*oC4w1laSGfZ5rHEQDNnHS!H z#&_x^_F7{OM#ap;xM;^Bk^4~;6Q5cwgkGhFcYyc!f%vXGxquOHI{cE9(QI|{rB--A z*n%Cd@>tO|gmI0Fwl&;|Ph^^r59w@)>P3jm>fidSw^LFO0F(;G*F3Heg9(r+^~H87 zC@c+wxH$wK|G}`|FNAdg5Q5v_;A6@Zw{$>m!FAEu#hO&AU5z6|b~K<7D<2|O5U44m zPH)8PH;K3r;a~XrKYuseM{0~ySKpT3cn zUc4Exzs%45X{`ovhS72*xIbNPCEyb|h5o;O(e#6rF=SuMkT^h8 zckY))%N4-oKl6}Q^5kkdF@R-dVR36x2uQ4%P&$Zj1GZf_rj>8Zq@F@ugRdMLGf&qT zJF#SKCmv4*>^U6tEp&c3oNi6>lgT#9YlRaJNZb4?_tEflWF>*MKZtm4*|oohc config +main --> log +main --> curlEngine +main --> util +main --> onedrive +main --> syncEngine +main --> itemdb +main --> clientSideFiltering +main --> monitor + +config --> log +config --> util + +clientSideFiltering --> config +clientSideFiltering --> util +clientSideFiltering --> log + +syncEngine --> config +syncEngine --> log +syncEngine --> util +syncEngine --> onedrive +syncEngine --> itemdb +syncEngine --> clientSideFiltering + +util --> log +util --> config +util --> qxor +util --> curlEngine + +sqlite --> log +sqlite -> "Database" : uses + +onedrive --> config +onedrive --> log +onedrive --> util +onedrive --> curlEngine + +monitor --> config +monitor --> util +monitor --> log +monitor --> clientSideFiltering +monitor .> syncEngine : inotify event + +itemdb --> sqlite +itemdb --> util +itemdb --> log + +curlEngine --> log +@enduml diff --git a/docs/puml/conflict_handling_default.png b/docs/puml/conflict_handling_default.png new file mode 100644 index 0000000000000000000000000000000000000000..6e8b0226485fbdb7ba5583f5e202edb2b7aa567d GIT binary patch literal 65635 zcmc$`bySq=7dAX5(kdk=prTSDNJ^tfHw@j)0D_XzC@M-S3`jRaORKa>NcSM1fV8x9 z!?y>|@5J}5_5Js*_pGxPu4jhldG6SIU;DbQn^($;Qp5x_1PBCzSVsE3DgtrR9)UQq zc;+}<;djqahaW6X5?W5C_7B}`%`Kb|Qs#E%4#rOAW|vLeE?YS{J#-XcXMbpGZ0F?s z!1k7@{R5ZwW_kqT_!J7M<@CSnh+}Y{$8iskEiUKpo*mlM>?P4kl93k@ur;}t(sb;z zSWz=3I?GrmmSukN*z_gQ;aR$~@_L_p$A{g2%*xT15Uw|8uSuQ5I~i!+XjCMb;b+`U z*`RmXDXQS1g=4$LOny9lR;rkDs@LFwgSNVm}a%hX6aZ5+N@_tApk{{%M zPp)8ZN`9F9SAnlGe(3bzWdC##y&i3LJ6G_Xw=Ala`5)h(x_Oq{>C(HrCTF~l=w8~i zW=x3V8r`KSCTEI=qOqO|)lWFDJN0DXe+-zXku@!(%EB)v-bpzAQbk-cks!ePlVJm1 zLE+i&l`l^>zH=*mmoDs2$rp1k7Kt*fZ~y+9V3nSuQnpyeE-O^OtDI~c+kWi|4@JI~ zi_qZcF}n9Rkmq>3iCm1&&A($4sVB$WG_NKa3%rx;v3v@qsNqzI{<3;P#O*!agrwXey) zk|eN}>d3BuzV z4JLhfuz}a_>}g*-=VYr$YidpwT^OFHY&@QsLP_wd^@JdU`z~<^Kf;GfZZ9nG%jE|W zX4KefG_ydHd~F{}o{;U*%O@ox(Mb|GCWAF`_XphwjHR=!;jw=O>QSO@BK(B#hk`qP@E-+xXk_p@oXngSFnO zVXMcP63ncvm%W{*$LU(_HY7hL2S1W$RS7Tmq4{w4_w!nL@|dS1iJSTIzSIKDgb5AS zFVo*Wc-^ZP@+2}E?eP6DNpj^K)}8sj^sLS;-j9-=T}IT6Ru=`$Ld5&VUgY3;*CKY(&>U~ zx^~Q$L5{bCpG#9>F}PSpAUN>4$&dcw%l{g4^bbVq z6Yf91Xhf36{O6M6Wd&TUA`nq0;8tS#o?jT5m?jJNIu7saReNezCLKd05#hd*nW~Ly zN}N_!Zq|?Hob)rUJ$+gE7((bY?zywCG0M%TWVy{3P7{TvvY$SEYUs1?9Dj_hQ@rpL z!gv23yppm%_j6T6!U`vP@uZ#Quk`SZ^^8RC@+>0ZG+B39TVjM!DDFLbuP7v=d%my_ z6%iu8FdC&Ufk`F%8oeY-}&TA_J`y zt+_$pubh1ExzE8|+6prv;oSDfW(P~6`<{dihlXgvdu8<7cN8bEC8(adPe>XaiTP8A zPXEc|0=C^Nz2g8kvb z%1NKY2K5{f39|id!jvo@owqSnJJws+A8TV5{o~`Z)wrmrELjZO1JO!JtM3*vEbQ!V zm0G(6(FwcPf`f{!1WmzR5{+LWYabqXyz-mOk|{HXSl z^E{SG&29BMD*V6G*~}f`h$2-mC~n`sng8Q;Wr52=3{2_n3A~MJ=iLjZC}LQB*1809 z+kB4+8@i=`C7wCc)?(;+duxC5kW58u_paZ8^%uNe=N0qoF3$LBZ#@J=* z;=L=S$-Q@DTiU)$#aqwM3#_xB}>N#_KF`lk4)LQc>wN__N zDR9n^cT3dxwlvas&;Jc?q3LwM`PvL||4>^CLh^tuDIYS%gAO@%XV$JEg~7M)^aVR5 zq+2YxT=iSG70TGcoAbVZ?T9~|{2(l26F$`uOHWIGi1OS+Z#`?=+REJ8qF$Z$NkEra zbR-B*cO=$$?eco4oW#Qm!R^h~` zP?S&bYz5X(nLPRV-W+xB&$a0IT<_J${K=U>O#K%Pt!>mdS;man%4kjn6(hBwOEwQj zO;~acC%<9Uvyr^jKE|w7?#_qCwO<-DNxueryj54d+%;IDTr;&>mRr?#PMJ+7-e%#g z#&u{DZ?}vtv!1hUa)e<-y^>$Tz7F zPd~~<(L1^|-b1A_tFk4ot2zhAvPGR9hZh}kvJ~Z(J{;cb-yHVX-JbT_-`}r~GmD#Q ziw$JwKp-l7{~B?>rP_4+oEL9>F{P3=_X&)Q%OX534LoNNv5P6uVHWf{GewJumy+nQ8ea7o~ zY`1PL_GX3u35@@YE|c4)kK3j1v0Gb#aXt~Pyeu>K{xqzkOTf(W7EjHMPV2C7)F)iNgcj zvR%?q*%*XWP5&oV_aIjqB;LJiXVu(asv+K73w%W@Wjx+GkSI&1=!r+X^Bn?W<=ujP z4`$Mz{(%0y#z?iX_=`0*CLt}8s7d>g!nhC9t5dm-qCK^2x^KHyo(EglMjck3_E+-e)Kla`VUx}aC#qM*ZiIjL!$|8Pc|+LS5Q&CN|UmV{QPM<`Y)RVtXK z!Mn|Nc#mmYhr4*`8+&V8TYXQD-(Uf0yc1?Td!N-mdH()D#4JXRsD?;JTiQ)Y6iS3T zQt;aVY;<+J)Im}o>8uxY@2w}<=|eRRB`KRz(34iKadFvrr{7Q1%xIc?)V;heL;dO- zY=DIuJX5t$20BFP+bGXw#O+o{)bWXKZhA`+PsSz5x>kZ}>{63!bI7J7Pdzbib`X}Bzcq9SF*{|!#0 z)tJon`#eQI7znc;G!2h411{*^)~{BboSbxC{Gm-GKbUzt@9M^Jgc1ASfCrVOmO&|z z1tE!6ia^OoOP|emCZiSc{OCN_XJV@=-j-STQ3T;jdlv`DEUjbi-DRe#U*VSEyM;F;953aB0ls6*o0x)Fy_jJOZiMNeBqet08+ZNEs!S6OU*iPx<`? z$LendmUfdvB}tB8)jU`_Jg~2FU$=Tf7r+Lafa+iOQc}B=&Rj@<_%wV42P(y5?iIG+ zFrAUaQSh8UKc=iaUx))$TEs{ARqGi1xO4dkuNEAK9}mTF-$WRlf*(J9aYILZB7h&C zj^XZz@TGuX_C0}voCpLf4h-WxgZm>88n~QFa^?syx4wD%meGePjd@fX2Jz?fDQS6m zc{#ap(n(an?2PuoSpau`?#Ley7zhd4Eols8Jy2xMrc;t+d1y_5Fv|Gzbb0owwXpnr zc0%M$1%*VBcx%Z}IyT-@hytlUZ;Dy?{z2{K%Ia#Wm;@ntdSzABMF0v33Ag_B9T6WN zA6aGc%vtJZFJ8FJ^(ok%{QLf%WQ>YwXVq)Gy&^HEiD^o#x~>+UL-@x38RIGi1;sKG z1vzXpW}#20Q~ziP9m60eRjW(+%l}bS-tV;%5#7JVs46a=br2;A$-MQ1MH$- zpVeQP<|ijd7qN!ls53GA)NMbknqUbS1$Y~V8JXtq6fvh%_tlAp;aSD!k1M-iDClPmO(wGaBU?0Oq){0?q zt2_ehEs<=2uU zokvmGY4azr`@5T(`3AynD-+k{BI887bdJXY62DPMU|U;g`n7s*Mb7=y!cc{#r}Hsi z^M8K(O-5E&5fO5CV*wD7r@MRk9r2S0vg_BcE5r+|_vPu1wGbg%pL3tuz(*0BO3w{^ zjgf7>0cF3$swwRH82T)tH6lEm%lOO59Zk=|W4_wBJ4d52|7sPKeSU9}Q&CO(3<;;} zNz@dO9C{t*t}qt|4)_(CbkT@w&)dIxG-MqwD0r*K(sN>F<}Rbjw?*}4OSYRgXqC)k zSPETmK>TxiykLFPWrP-{py2g!Y+Ec}{o6~3DCl+O!>wad@R?W@6|t!1M*0|TKW+?~ z=0t9FJ)^Gdkl|{8^@lIX#S_vxMT_AqP%~LHa-*v4t5%p-UA{JsXX)g}U}STtGGVYE zply3Za|Y4(Iy{_=nwq(K7V*hRTFodC*+b6$g!e60Dj#*eXZw)DmP${$s( zI@(z}$O;~Z4SV@QGyQ#&J&_n4P7=Fbn&S4wUqMGtP(sP($S3S=Y8uR_sj6PB<_y+A zBDqZ)h#nn>hZTG8yYFS0BU-K~Jn`aWP{>n=`Bu5z1H<|L{( zmUG|oFD7!R3`|e5qKu6@1O)|!SLHuV8_G}acNm^k7%DFpsHmv;r?r@*5Aot>S1pb1 ztda36bDQh;SR%$H!S#`%^pPch)lAN%LGRgtwzhN)w$QFr|6=O*ebqU35+bMb_W$W) ztfa19-5IHNjLYBpr;}k*kDYO930Ucz2{N=Ht3EN`dbgtisuGmS}feU-5LrV!$+k=s!8Ehta@xypp0Mg8>fZA zE7RR{IwG-Ub0+QfC1OJ>=ib{5dG2k!snnf3ngrXteOivBY1aI;By}d7% zY%bzs(RA#|ntT;5TI6q2oyfYB_%_ic&&eVC){^!>L}hg41<@#*ZM@9MXo-K_*Ik(_lruW-RcG%O2Rc6Z; z%<-u!F~R?9C;*pI`u`pa6mFpe3v#*c-bglG0DJXr^a!o*kYqISbgow)KlX_n7c#BV zYCB|YY0j(S_XNty1ppVbETwPkCX-vc&28M?;CNtvekoC7zBfyuEoPC&`eXMj(&zBW zo#$BV8zaw_{gSCGJvnljq@;V?lXVYXhwAkc*yqj%=hgErEG{+>EXb|TV(IqYFxyJx zL~DHTWG^|Ld#R@H#)N)Tpo3+EUzey!_AXQ0sd$ybk~e>Q2v zA-j=$;WdT&`ubPlA_%67YYt?FPY^}~xQwi!q?Wrkx0v+@_WE}w_xZETi-)R@c(pIG zRIg^-*YLT_pgmly!L@xOh@NN{d4G#o%=lT(H}r}Op`l7O#juu*kK^h_*B6M<6$RE0 zDOmX&UAK#)ov2_4lpTrcVjZdZ#x6EwqK~j2Hp5ou%X_Av`|CF^@U!G8yk*+ZG*}A_ z>ML0PrfNXiKn2sBt6f}2ooFif__t9o4Nsj8EC0ivVx`vDIvr|jF?(f`2LrLZ4`Lwh zi;ErY?A~p~sV~#sOda1#BT0JmYx2h5OOhLlX^%g2+YOm?U5fYqEoGs%_e{Xqnt7K9 ze}>ygtmC_D$h7|Vku0@~ed2v(i74ZiKJ(t6U+Q}ohXf1VHzeW#UJL5?S>IS1N&CD8 zP&e4WZE%~<<(|0T)|u}mD1HFjZXZ_iIx*@WuG;68*C0ne*!RD_;zdY68hJtWm~+aH z01d8qJGXNr@qJs)yvaAS1$9#%)miB-F52~6{rbb_VI+yIvhwBcDGv1N9Z%n+st!55 zGVz&|IBc{c*7eC_jKBo#b@xK1u5g+eo1K+)TzqTj9gX>763A&9q0n}+Jk{=)J6Z;#p6%$1iE}mxzR#KZvKQ5r`ga)KBAYT0qxTGgfo@8ZZWe*utdq%Tx zadJM}DM}JRu(He@u-#|pTc$XJzyLzpH5=k~-f2~g8K@{LyTh+eN@Sb0Hz~lv|8}F3 zNXw0@JgWbZfk6VaG_(RG2{o5vzJ=281nrkY5?2vudwd(k(cPBHAW8I2zv6~-e^EO9 z?gmNQ(C#{6DuuAHu+`kWBS)T>=1@RoVc)^<>yY0Hcqh4|g{MV*!la{*AtA}Kvag6| z@cb{ySY5i!(sYrE2Q?<{msU#2mcXpXR;hcGa-enn5m5Q-QuO&?^;wei^W?QMLwCnO|XU0JbcjZ&#_SpXpQazXyC{wsCs z+)~u`wOwEjySln~tb3)Elzd7e3R5{7s34VkZaqvclJDt-n9tVCKUnKg>KQfRsSHm^ zNx5y>0I)-{RF@#LS(Y~P(sk@HEfwue`zruVkMdl%5+EL?(2O|#-JbGw!}@xKI3Am@ z<p;bMPhpm8#9*anfj2zWo8qM3w4n!Mj_Hd5{7=0S5YSh@R`;VT!m+qd6P0=}T4qg$MxpI=%MwQRp+N^*{^H%Bx7 z>8R=Tu$UNGR$klw&Zed;uiXuQ656l_)9m3l6lMpCr-7#3>6T+Jq}et&+?mV2b?a91 z`0L0>$)TbLRlmOwBE#e3hw=?Y`S|!~zGteWYPp;b39H20SARxUhj;nr1vk1={QUg+ zH3tGbNwz{xzm#Pv_$H6_T1Q7jP*74fv{MP^_*6^ZyLUWKQP{)7<7-`Afh-#!XG*}g zyU)0vKQ-~IZ+xh39aH{z`Km%G^Z7?W*QFEnj(Z`sFa4s!1^9>x$7#j9gH!My_kzXT#w^z6 z=cgyooR%=e3plf^pT#%!lTlQ})ZH8E)xhSkHKV`VAYF= z!I01j++USQc32v&g5~Q@C0jm0+JgiRR^gXYyg>b|>wN!}4$&2-wXr~U@8x=`k^+mi z82EqL9n-6zW{>`8#m~co#bVXFhe}>wedfKDIl(6Ay10kcH0=8Fg7_j0pWRK=CVM|% zn-6wF*J?Gxtf&+dgqmh+4i9!!N#9tMx?>$RXXPLIX+lpt*2U}yKD7x zjK%JTOzmV3HM0)*ZZD~JyP*b4m~B$!-v=GAe8E>;OtR+68)uOEo`=jj|J=YW-P+z^ z50U-eZ1>BDSTs7#r`o~6AzLF)z;%2e9RF>7Vu z6%9Hh?%!XV#Rjq|B?xUtTPD0AkI(Eo6VZH()wK9il={}+L(8CfukI1vOOj+QH(8Aq zpp$I`$|ob{=T9S9ZBKpm|3t&;BLSSY%g<+*wRRiVfUip*|TRqeQyzs)v0jFdo$kJ z*!Z)yvArE*9^k`g_oKhcgHP$qC0fatww~L$regkR#pVQI541;K?m3p{q%lk^ze!`z zteg$GUzE<~BX2dvqG;*MST)}vvyn0%ZIK>o!oJyTr6RE`&O&SFUj_uMEH9r+d`#X= z?)x0qbO}#!r&s#K6lpnlaEAxn_PdR-T2C!!&ZZ`Q|lhdN+6X z&iBZr>c)V+E-sJP+0%`u11vnxUkJyjs;UYJ2}QCSw%g_MIZnyTQq3z)LP)=R_YS97 zd3$>Ur?-9I2SOX|*6*6q9>-t&Xk1dpR8p}`2OvqQjb?I!id{KCL0JEAZj0KmbFtNs znAXLY{k&QvE=2t1xOwyDwQCcQqbGT?_giR%-O*lIK{ErzjJtFs=EX@#ml?i7?vwaN z5!=^OTq2*E?k5Du$2>--+=-d?dNYM3Jpl;`ov?e!iZ81TI)dBiKq4QaMW!cH6mCFMcb1C)eOCc-9lXV}@%cF5BRHs%|-<`@t@KoC~5h zIU!X+jW3l9ZxOT5ZdN)&NQvhK^}cbJeSXl>?ws;W84_)?RFYNG90@Hj(~t$0RQ z$a(!IwJ(UnOFY!*#PTB7vhj(D&8JzfTzT@2S|cBVwE)UT8m*;I*W6cnAG6?6V4$>s zXaZUUIq+wX=BXohYdn@piqgGR7QSwo%+6Bd@nyevea1~eMTPQ&tr?BELvV$Tjt*!x z%0!NDP2r5F8`-OZ$VtUeI^n)|`mBBT_)FZ^XKyIPyey>z)b?lj!n={frTh8SG>cPH zsyUHge|6PvU)rC_GhM3UOH+*3xpnJsdoHJjUN*TIbh%hQpFu2kU}z|~uRJF9UC-*D zE}Qj`GOsB7FQM+lAY}ymv_v*9*-i%MUv0VbpSJuBsRamiI&WwH>f8IYoR!oqH@+mM zVh7I|XpPs)nLt?pXeXfYVO4W{Eg+)NU+IMyD<{Ca-UFQ?h>=>eSQSiqFEsd`HP3C z`era@$M9gsduJvCvh>vSbf8ky6Ml1aQ#2pgnpu(POcU zRAfuc*7%rv*{ggp1ji}d?ho&Vjd8kXZMkp36qpkT5fSe_X$_?a{R)@HvJ(Hu4vWR| zsMU*Xw({C>(WMo298WM5yK8PL8Pm{=4?98^zJ2mrqarf4sRdL_&CV6#nZ{9u zST|7|tIr~`2I}&#=PI^6{)Dy%R+#680&3w5d+ze5CskR*3Uyhlp(5`7!aUHQ0DW?| zHnm^;#N)&L(LdAWW_*!``n=Ffi-<6WL_CP`-(E4 zk6(4@+~g-@thVY+8MQ}`9*qSpJ!)!e1JU@wa3%jP9l0-C|N95A`T61;#Wa&K`k0aa)m)c6WIGf!MBTxuR`dqmgQeX~S?;eM3FmKn-%9*R zGxDk^y7E8j7e~I?zTUt3{I^UEwVOz13w9;7hWRr#x${DY?vnKZ6iSH$O|yLV+vT8# zu24isI4i+WQ(9~3tk?5-=&IE`Kc|u9>6DPzz8dRaNpWwD$Oz(76Yg3uY2{}roHyYr z)e6Ieg@aEAw1W^%=^98%5_9=>2OBN$K$D;DeTL40 zTa|-EHA7`VLv#Cpv6HK->2sGf_jPMfOwjqdCO;CI`Rb%{ur+;;w`Pyt{*uzG!B-aF z!|WBcB-W$m;+~!_C1Yc6zFk1qAJ9yzLLy%pT>#SBb<`|CbEV(o%!sdrPBYytZ`lL? z-k{?>=--JuIp2g<2GGA*)#n+W;`($uohsr5SnEEDbZ71U9+W3(V&@v&$adF7?<{)G0R5uyP)D%usxwWtykh?`j&7xV7|5 zD<``05Y+tPYSZl@j|}>~H`G+3$&lZ!52~%64;D_3{HMd-xXr|*T&9*Sc$FUCM#!pj z#BNBWLE!Lr?O|=g>~}uf#@E%Sp5w_8g?rN!c-EgE-$3@i-c!!mfNck4?Cr_>tY2#b zW}igZD_XI~K@w`1Q&%cchMLzG67{GsTIZ&`T6Az{bSAHZ*k;?E z#e-^xv@?IS%cu~E}3y;qom_nK&L_P$N96psS6 zPfEnNvBcab7O9rNJw2OvPJ?{+l;CaFw9Td_OR6uN>j){YB<2kc{gzY5y?UWZh^# zsJDlnfL!2!b3A7z43H#Y)vz`fUO2w)g``bTd?5pok#Qw=sxvt zDG^%#lQ^2g*?>H2jTdx<%!5+BK>9wzpvv8ZLI{O_8lX&->r%p&!*PTSj#I%kt}AA8 z)alFx=4ebE3j*Qc|Ch=z*+dEB!>LXzk;ksL03$`sN;TY!5g~5WZD^^d(2oHts?Wvh&gb4U~wm-?0;?Z|amt|63me{0@@UP1#aZ4+g0ABk}2v!>vHt%8E%& z&AxK^G)}A{IdwGHn_m_s?mqSNdvu78c*ykU5m~@;#QY;^!Ia#rUz9jf%TjT2DWaga z)aIMUJ3Z=)7cY)yV&tRmp{#K{K3T{U#r+Q>-X1gE*O#0BEHlWH)g>h*0OC601vmQ( zO~XuqJ`C5Il*W}Qv!?US;gB;(>4eU*lmBm!Qy|h(PBO&^1ol`zw1un!^g(lDn>WcVI}w}}u1gIM_!0AT zxCQq`!P`DnO8;~LXw;oifBHJYGl=;!M<3yD%n$f2$v#;I`7JOYpsocz0b~Tfgf9Y% za*@yOrJ4nT7?ii*&`>IN21EhX(R?*Dx3$Id*(cmFJl!dQnq3^K7^^>ta5?fLe$ryq z$o(`Daa=7uBg1~ET)i?1@#z*$(?wjcfsTJno;r#9gZ+pi=+o>s{<63s8t%n}{mlTA z(cXEO1pSuDLb$RELmA8rw1Iy%Bu8HmHCJ795s|`IR%GT+ptB|>$S+)oiiu%)k2sY_ zB)$$3v_*^P;hv+i@>yDSP0er6ta(9OP&x32SiQHtxVQ*J5a%hOYJvE`moLv8C_A${ z>^ztlzkTAL=V)Z10u(;Nq%;Ik_h1w*ZWq5_{pS6^@>-b7i&4B)=(!CH43?LdVfQ&1 zxdOX^A5c0_>E_rL!&9@rp*xsbSt%qS2%ARa(b!4dD%Et^5)%uH8y=bE6#Vf5KB}mCyWk?SeLU~2#7EbB!=O>%J7m&323 z2cPM(?IdijmHa3$78i&DiCfg^*I6tOilJV=r9m}`AYOw6(a(CM+?!HIW0)Ac{%~Ky zMq6IRuETErUl1+l!6z^~QsV6JUZ>KiI^WX!n)y>U){wfkgq z1aOtGTOC`YZX;!2hiN?mQMW{Xp{KjMgk=)Vpt7LHX@1SqCzM`PvFvF=1W{rOHc$6$ z9*A*fcg_G`p5`ZJj8V%_%;xuZHBeR-V}A1jIx6cyh2fuL^?@Z)y|spO(yFqu0RWNL z9_z0y;40{|pWYHY;z#T3^f*IZBH-aoG^)c23e= zb?zdd&@c+IMLV()EGjof^<`d=x@04>?z#C2|0p!YYrzL%R6R!n0*OwJJ;*vej^lh@ zy8_KCAtR^7AKNhOl;@BZx$S;vS+cpD{U4#`fsJEW+Jhu(U$w3QS;DFwIE4-Ev-@(S z^x9Hk8;e78V`EpFH0TAKnBsY`+~%#)Kp#xb%rG-|lYhPVOB|X<%|B7h!lQssW|u5^ z2H`oIPeVjPFVb2}{m9XA_4jWhrLM{vsLM#QOPA!CB{IUuBw-`TFud91_t=;(DBx&% zZO{-xn^PRQa+!%MDD47Acyu(s(I?*!T0z+cFJ1{kSRR}Hf-Bc`CpDZTSL&$@l{=kz zG7YNA(=LP-JrRM8;Q>?~u_-g1<3e2AbH!%*mzV)bia-z>$(`cvUXkB2-(9CB`2tn2|Lz$A0#yZu%GK7^ zR;XQtzEb+Y7=mycQ3~~8`S)+FYES-(j1e$Z*nkt z3pz(ZQE!9%7Dm+cEQN}g2UwWt1aB}_z9Hhn{u-&N`S5|SG-Lk0RH#qPHEO#sI@n2q zxZDMNPLGbK^LD?0)32m*io7?TTciF+nPhHgDm1Hi7b~2!ti&OS##*1C{nto#Pz1TZ zKoA=pAuB%duj|rkp;C_?E$=SXblBM#YzWQe=d4T16gdys4`Tg@QBVz`vC+H(D zJ0g@I7fSvU2|Dj>2WEOQ>YF%yZ#DZv-6Q{3X)1p>`X%hTTpfJ)4jAB(GDoR@)wGjG0l3>B8oK#j0n;|GSK808DmW8m3^sj3{6@3fvpI-~z%KMrOD^ ziIb-TDTD?`lYs!j$naR#^%hDY_hrEOp%5HBq@sHqrVT;3 zPO}*G5D675kdiGRH1sR`*0DQhj-I~?#8gi&KQFHfgw9wi&84O%C-874U9X)r$LaQ0 zn)rx*_M>mg0aa~3*O!NnkB`nByQB^oy^iIp7-PJmmxBZ6!F~3>-B?N7*EPUrE_n27 z{};_%CypO~j5Bj#v(^83!Bs6Y zuvN{EXMtYu@2kxn9ijh!kd}#_fRK<;DdCc7IQXz+Q2_@Oi3P-fb;7U72qhs9lu#)U z2qrux+5K_)!;hZZMP@m?4+gJk3=j#}K~ENbHWQeaqYY;pzaYi@-Cyh!!u$^g#9Wcp zCOl4o(86^y2w&&A`g+jz1K6e~C$U*zYK|8y@!a28hgK2$SRLb48O1q*~N=< zOG}6I#T}}`c`dtd#KCqJFXjy3+SPRv05wEy^`r3y5TDG}L0s}MGBV21$h#*l?&#=9 zuaA$o^w;y0SF&_JEJ8PD#m~ZG4VWRfS}K$dK(a;4t5>hcGp&+8e*6e!Swcc0H-WwJ zifqKGJZL|%A)!L>r%hsWG{MgJ1gjPTY6lyeCs2*z{v;9k#}POmA;O4eiG1)1YgI%%5y@h z%F4=q?WE3Kpo}|L0QPu#c}?aZ5JxastF0?liuoqoL`B|&2b2vZ;q_h!Gx`Tx^yHZ& zbV47EK0od3?1Z*M_kcVFsjYUJWh2(xvkZ{#{C%7O@=yC?Lx7Ku>Mt;cILyZPJ>mIZ%*@RpmB_p(_dCAR%Wp|@Ny~90_yK=zJ;Wmv&kh(zou>b(R*LOJUP+lAR5~e zp9U=c`~BkPF=P8+eyRYV4>Lq`UD*Bc!QM8stI8=>FprNP-}!#aM#y__3q*TfyC07s zfVdy)U^ue&ObfgNufdl;2EeGa_05x|{X zPC>y6=&5}2F3aE|$L}<8n*W?Zjdy%^I~<|PO~gD?@~~nB>c+eUR0s_E%OEmS*%*L# zNhL6=jEsyMy3a?vciCEu94c!8`7}R4pVk&O{P|eH#KZ(fPVcqy=1EqFhI8f)E-~2R zDiv=Y-JP&-Mx_9*eJT7a(@-19^+oo6i`&~T!Je0W!Hp{yH5$rk%sLQFA`&FyJ><`W znY1*1<;_px5lzuzMKSIzEsGi(Kd`y zB5c@IOCnck!%}OC9JCZkcd|I%el91(v#iPdMdg8k0pL_50VG;EuGu>|m2~!RapRou z8-bp^>OeE}>`jC#f!jH&ElnoeY3B`l3#+<+_9>O-+oS)UloMekC$O#~UKy32 zPekFVG#mftQ`4~C<5-yKz5V?NK6*T^%d-5OmHAI8aO6`z_g=57R+vuG@fQUPi!YpLH%C8a-TOy^B76dMW3R z3;q4`8=fovUzEH?wI7~6e>|f8Z2BeYVa?=uTyE}zRHMnN)_7P%5I& z5uh}19+H(Sp9%*ycUhts0jUC>+uq(Di+y=rP!=STFoVRwj?J!3NGJDd3_fRf#y5_rDlt0xc~ z62RNky&{`2TxOHR9cn;}zEpLQ6jcUGaMRF`*UCf_uqXCEJ}v_&(uOlbhjbS%^k=K* zK!FE-M@sX!R0$Yq%Z2J%2*IxX{P}b8OlWbKtGk$RkL34bGJorIoFYAofP%&L$HzC3 z5XM6gt{O`G)(eBBCBJ7f62bq2tR}dV1m{2=03lvk0E{mrxFZkvx1-y*J!LZgT7tU` zRJGzsXzv~a`Ek$K*f@ek{a|aHR0n9RXONIQDIRhIXyUSHJ*}8$As9>;+ z`21T@enEjc6WP<7_+n%YsdC4(0w&fRGJ&6_tsDRAe61_YQy$b(61fYAt+&Q$jjpWP*e zUtV5bz*RI3S~<7ql{q}0)n@E1v!3W&x|`7kO&OCo&Zia$wWA;0WHVg}&-KHXLUNa+ zG*pD0;3{MmDOiNe9C!rU8k~?_?;q$|OG^3e$J0-~>!KWSo2Km(x3IvxeXD6;K3sm@NfwT7oS!)7s~#T7rA)oDA}t#MoAp{{d)Qp2gt@xz zK5AaeF=i0Az@h-EC@U))bG$LeP}2OV)hJ>?XEQgtjb&Grg~2s4QJYP2!}L5hSR|n~ z<@vB#-6kzH>G9&z(C5{8O?s^RI)0 z-K7;eWLNq6r&}-CZF4wR`=H#$Syu>+)hp%c?c?wwNWhIyH7^6|m4)YJL zF))aGez}?5V_2;Hh)+RP@;h6q-6@P+ed9)Y!hq^7bLo??1Ie~uoepa^82;Mi3aPGz z(1huiIY@Zgd!RWl!1=!#%Z`LFmurt_voe4Ko6O-r3wXn>=zu?B0B?cw&xoeeJnJvY z$jC_h&XzHVZ^(hci|2(i{-i`iX-%w$6SCF(#^2C*!MAx7;ErTi}8k7HLO!}56< zN=Jt#_{qXL#QbII#gLvq2H?T30AdP1oQkJK2VG3|)W@1KQ6KnCt$#DjkabVJAmEg_ z_KR9ZL4n7@b_Gu2ya18)*V(QRz%i??9Q=8xatA}Cf0{b`d`P7wcB5flE(@=1K0boG zJvZ%kl&Tafk$h=zyCi@06;pwUd_r)O@lq$b;_F~czc8Lor@P7z(aE%#)_okyTu<8# znoND7dR4^n{RD#|26XKmsPfq;}p?uFi*B^52g)N95=>RFiXa8qrNQN`^++l zk}cIOQ|&=!30su{2EQ(BCMx#}&j+-mecLJFoJE=uTDNK3Itf^dk(C0)Bk&Vo${V5) zzZ81u-a~-8YdyW7C(Kx0+^v)&OhL!HjiCZKMHOnS%5UV$1Feg*9>>0rtD^AQ!v3`)w$i1+X;mtU-l&MdD}v_qUTf#7ZrzZN63ar(_ZQlNp4A@q$X^*=LQFes_vW5F6z__A@5ud{<&YVW^z}Z>P zPr#nT*k_DAHatSj0O%>i@|r&=ap{aY*Zd7D@S;}l zw%RTleJ@`!KQ(oO+V$lGAkR;V(r1v9I5rt_#W^@%nd5ch_I>7xwYna(Ork>5+X3_< zn6FCQgZ-^|r$$Gwa8qV(h*xio5ysrEpegv?pZT7AjQ*6UxrR#nO*wsi#p;xzDo#rR zbdFW<>n?#8*>}GM6)yYhTnr;}Tm!;EP2u6gf|VQMVJS*^?s->GSQ}?uYmYoH=Jg~Q z%K2Oeq+~UL?1Q8pNt;xM?6;8`{wutAX6$0y4q1P#i$63lHD}Z3v1CeWX)smIOA5sF zB5{=9+?XI^pS%_{bP%#X@;mG;JOL zw!cCFNKZ?9{P^)r!|J%99(&N2p;;DinU6BtIxPXoP-~+z<{1Ra>l95z#SpVCf~&hW zLprM}GO6@BZ+Ov>k&%gsqJU`Pxh32;=9{XwKzb_!K`k#grPTfL<1NSyaIi)4+&l2$ zAH~zxn$m8#0u4UX_cKA4u4K3q$$-t!&~1_;=4?;aHo)yPsg9RsKl&#~)%osM0sIRI z5t7hu#sZcdn{#NsZG+7rk;yUWr{%|Pq%Q^{2k#b}SzZt3Vnq>Qdbk!ZQFV!49CQJFkDd{pt{ezYe9&T<@|9?_|Bbv&~$*W!*Qo>6h5;N>_ES*BePZ_u(r&2t1OM*ZHtnX zp>Niyr7DWm$*O|A?)tyYP2b_+@_KR`{iUtj2&R9vJOd(~+Ok^mLP(&FsHBpp#sLb;eN)!@J%1~r{q6`@l4<#bg z!#?g>Yw@i0+k5{%`@i>#{o?!PyPj_MeGTVzp2u;V$K^J3>BB;%Gn~hXh2wrHL#4UA zI(+>o;!PXi<8an@_iS#h6d(D-k-!R6N$+ zei+#3t3C#lCTMAPg)Z{1f8?dgPrkz|qt8#a9!EZ4H|~sr=`}I{VWOY=cd0@1Aul)P z#=_A2H|OY~aUPjUMYgU%sPX(q(Ao~HXKcf%1RC@HorW*}60Gsa($It-R#Q_`Qd-k) zxAhMK(k)U8D4aOAdw@wxIP`xiJns{pV^S)G))hd+kCFTUF`jl>1C0oULWy-B)6WQ~ zr>qf_X?9s@*{}98vumLFVW&1N&9jA|qg{tOE{!|>^$OgvH`c9Nho}6)|ME67GmD;j zyFs@d_!MBehdn@i>9q6I4Gj$ukb%bP)~u)5`tVK2d>z@tZ8S6w1&%!uAQIIajo;XX zNKq5)G|JXw_NRi1T^9 zz)?mPmPxdg`Kc}1zb-%!>a=ls=zHFrI(5odgoB1A5y42nxcI1>XDz`K9Ip=*sRi$g z78!0Ho{_s?fwya=s5>U~fLVtz!|in-KL~Q8={Tr0gx;RP1zUM~L=A}n*tCBAFErSE z7V@Myo`Evw^{ZEB2sN8Ut@q!L{*9QoOV5m7A-%t6H$LODx`s`3yoPB+^zMrma6k9`4S8ueI_{~`NW#8buwVfsYoS_H&q%}(RxW+} z1%)SW`o&x3GxUEC+@|~seVXAZV9vd!)MO^E&HtE3RU}e`M}ODMTJ`J`U+5k!C2<~I zjS%EDX!Ti z?k@g~${u?_3~GHU#4-9&ii`GZtE=A!vhDln53_w=Xa48!`3&*PZ>d_Lh}T`HBT#Lk zCcF```UB^@hkEhYetvNcTp&~GTbdRe%6sfO4kac24a=d49z;x3nAGdstX$9-XgcSpp+m$dG~8mb3z@7uSj{r1ADcQKNi zJU@`a=TFrBodmx?;F0M&y>1}?0#U?kYf0&a+{qtk)TT9+Q0FZZ7ppsH9u+ad9Q1E+;++ z0Gpqm9}bdT;H@8MG~YKHGdX&cS69;P@Z;C-`)50zU#WEczkXUjw%ye9NKKi+;mYdj zZ={^q$jHdl)Lx?D)b+&3C~$_iRaKe5Vo;?A+`5(AgVL^km+cU++&!n?KWl6pMU9*g z_?oO=lV_M^AZid7o6pRreQwVoRn>Rz-rYj={I2lHlhTPVkp#ByJrXmVE< zlA!~l8JXpelUvw!?aIY6Fa8MQF5B+%pu8O@oLja7wgCqN!PRvqg{fmuY_z<=upRb2 z@$g7pUEOH~-Pso_gok#9N^B5se0SJtQOL?>?-ups$B&&hcg|xF4*Rps;a8WN}}*bf@O9)M{5e=*tE2?(*!WRo^_w2z%hhq z5!)fO>el;$Xa}L!I{u?e|LHE(<*~^tx9%Uuq4E?5C7?L02XP$8GfXcfJbR;^0Nm(v zsv9Y^Wm#PTzM*wBPRStUIVtOy6P}X?o=at8Zd{yK6CDFX?=hl!P2Wg?F@j>er}zoh zFiRze9rhWKAq`6p@+6y<`3gKYhKw&as=7bpqt2i#Yh9?|{P5#!ib9#-p_K%iSV|Vk zj{>qe&Oo}SPms}lpWP@ZqJuxzZ!ky+KLZ1gTVfsjM028>0%dntWaKd<3^?2#_<;T! z8XYyS+Dwi*z;ZDypYFSE=7RVE;^h3hHT5lDA;@?%1u6x6vjuJoI4pBFhiczSZ>`p- z&8!>BI)JuUM~VfQA2{gXkTsa`KTn45skYs-Kn&OQGi;IIRMW>1?qFvZ#B}M>HTH>J zf*VQ`ii7R9+)8>a7y1r)$5zJ?lLI$v4X%mRTd{4NS$Fp6(G7wtQSG8GzBi11w77xZ z-)*8UdS~>-&xprA2u!)K^kuru5zS4v%?@RtxJvbaN_jhnWmfh>BXfqyz?3r^slrWS zYzpVgUn!?^g|z8nRr@?&ahgaCeYMcSl%EBUD7abN{%U`-QyOePDp@Sd%!b-N(Es?r zV`Ol3Lt;LiZAzmT@&JF+3N%si9*5w_WS5)TO%~g>;t7CPz|D1+@{|M&pQr*r8U6fR zMx@r34T?Uyf6MU{tw%K<^MC&zW>ra5Yy0-?F)=aiZX@&%uVG^}OHxo<5v~q1qp1Wl z$}-|Gu}fJCjC=|+@bGpZPKfXt?y|C%GBe2}6VK?_Sa2<#O?&zI{a7%LiSJO6?#FHj zM@Ki&k*Qk)OsejvLYqqdg0)C7*Q^-px~{8hq_3IwO-s8r|jnHTjj0M z8rA>oG#RXIB8%@)6gf5xX+cf>!LJ>4E<*SBuK;*kvmQN-_pei4NBH@zSi)1eAL}qu z%6(FOafbfGFX*b>zUpwxh=a@B41p)9Ci3!&r}q1*C&QouVvjRt1WbOQ5KA6xO4I0q zLL-r8&z~}k#Ty?5Z2IUXco}Nz1&L{$eFm;MP*X`;n<-}k?TTO>c=hBJ70F54`)W^& zG%BlVNnAORw5EPa zw%e|%`vygV-!tCoS+1^e+U!oy@m2B&6_u7xc*j#JzV)JDdOolYi z`86~cmoy(-?GVUGOS>w!k|q2q*VR*n>%X&&c=A{X_-W}xA0ZvqjCPY&liVLxpV}s+ zL*X7RdDOR|`;4>tZbFc0!&jbWxO#Ut=gNf0h3cmXfhehL;)YE|nuPo0?pEzUjY9U* z<%c)Oi!F(JFF&lOPoEIUmqEy9#F)*rvuRoE*DS47UBJ5=DMV|r%R^(9MJ}k(AGuA~ zOFm`XF#i{*Y3fteNXjSK0=U&;+`fGh|JN^6UivUdy9_58Z&|dus=WO6Zp+}9qqJ-I z^cK^LFTe6Ng@!2w%_K`!toyoT7vlpgOGMNIY66Rwhw)$bj=Cy)L5qg28I#?}YP)3E z3TYz1t)Li9+70ubkc${Zzi}5Q%Q1V110_u^P_BYv%sS!p~?RN1EN?loptYz5$CEms7w`B8P+)nQ9X?r!q z*(!!my|C_gRlo9UrE?+kd-^~f~{yf4ejo4lh+-( zcl<}|mU9G@Q%W#Ldc6fB)%z^o-i}vWXu4o-su|^{etp+-4^6|m0Vgssz5EsLyjh4F zuvPgnvG%&ryaADU2TMf2LnE=9vk*Mp92p$W6Ci$7be(V7yo#@ z2|fW0es-@)pk7`HwX%1{*LnL(;IOiCq^Nbcv3Fuxptlto-=~E=6;{rTy6Nj#O8e~L z^sMzAyatYl6v)vj`HBK_t3l?iGbkCzct-m-DO4kaM!}q1I(=P#ZX&MJuZNq*x5D0^ zMVl0AQAU83%(S2}T{rLVyab3naVx#^B=o@3C)(Ms%$tG}(Agn3CHdJfX`n~Lq}cgE z%;M{+^Zx+wvPOOzCZklf*e?%xeOJz{j91?9rP0VK;J6RK04TpU>$?@RtL@=jlZKLO zmw6>TU%mz?JdNE~n*MgCSzGLD$zx^$?wMwD9+iEtjvYLR)f`tR>D&32RLIsbFJ^E-9666=KY73^f z69|#fS(q7ZyJjfgk~l;N`@7Jrnoj8MXZg4N8FiTxa|J_k9Zt0z%T} zKr%v66_PJG_Tgj(vxc3qOHUX?b?=VAJB8O1HySS<7{ zK$sI6TQQw`^Jn4e_mrgf5hVwAhUI=V_7+Am6 zC09MWw5ZYf@n%rlf!W9LB7B$oRg$=#e7S4A$F^n;3a-hoHRcpZVa+`;_-KiWkNRZ{(k355 zN6}uW<&;%aR`yJ<(uAcVp*%e1<>|zYHUO)-qzb?bgAC0)9~80oAT`b08f+mPEB8e^ zUDF%{!`K}we}@7AnQBw~xBe=0_?LqAvhsfwB=t(7a!m>Gw)+I}w%O}>UM$J91k9!&YQ0@c{KO#W)0W|K{ZoekS9?cuHqm3oIj^{ z7+oHoE2yFpi>KfA6W7sOOAZQM&21 zd7(^ zk?pU!I%$1|W^(1_B`X?K{3zRjr%R)a7eylQ%+m}N8B14gcn{DcHegCD>&I6}3Pz_C z04%bA(LL{PG;YJ!?LL3mo3|`t*dLeDdZAwR-!qZj8j7x#iNVM1Lhn={q)E<=1%s_8 z#%1mf0cG;O;5$GIVt1Ul%o4G#6DdpxEgB2JKxioHuTHuKLC3Hnt9w$3c9YTcuaxeg zh6e4yZKJ0_2sVk&w1qY~A9`2|vHcyx2c~8C;ZIb))#8qb6vh@-c1wRrymGNSuv*v| zI{Vo`C$;k00}}$One?xtX(KAGYUy8B6D)uNppJfT;_*sg-suqx<|d<*&oANc zGCic4G%?&%xw(Oo+rE{|d90-I7 z;t=py-b6K+Q~HdTZ{E*8`frt-M9ZRT`OXsF;3p+gOj8YHQKxU8P(JqO@cM3^`_TV? zrm=8M5m zk`d@fyOEbB&P|w?m(|BdS^X*Kd#UVIV{@EaK{N|Sn~vG0+hjn8=r*(1bp6~mU^K*= zg#RxkvjvBEncy?iFn-+m%N>jEY}UZh@?G7XUr8&<>E|9ew4!g_QiiQ=uwPR@E})+) z&&tPXuixEy^>kXG;@mOztn}JtlF|}-dgcU~xsjSyzOKu+Yqxee|J>U0(&e*#A*PL^ z7ruN%9%xhW*&B4(J;OrHTb@&cj|uLjF6l7L^RP*Uq-Yg3&b^KA6UHp0tjdDM*=g z(lr$v0YBq}9Yvh4m_b7lD3d{-xYP3z+8@jbVZ(3o<27efWAi=X5j7KHli@t?2RoVR zJ}LpF%rdJ8#yO5gBYm)UD4QPH_b3g<0|62{^I_-;gH*xz`-SXPY1-+*QY>_%5F7x= z2RKDy9ePeKJYYi~W-T^a6E#~;VDY&krmwC8DLFc#(LFcsaeL{molQOCcj4#&d}pwpNwinZbCO6VOkDet(W`-!PLAaUHU;6E zzE!6lWl$XoMC+WVx?5Rbj(+nVdQy@7g7(Ay_*i&8RipucU_U7b&^1Z=$E&plKIgx_ z-*zLXO>C2->xJ8=tx6g>(q#*|>jpWwxrIRYsnU)Zvh?oL_L_GpmnrjJ*5&JNQib9+ z9)G=mTjPU65t_JZ1o&2~lxmPTms03$5exsxVPu!+ekf^)6+fS{TidD>(}CniYYMeu zgeTV*km`c_kU$o}Oi#{Vy-rwvZPXV3M`uWdg~p|Wx7wY%ZdK3L3yX{E zJDSZPSz)xlidrh@?hY^%7CwopjtBR7OglqG4Bnb&3P=(|z3Mgli$ePh(%Ij7w)V~Y zM8)_|EE#A0)re+^u(7~Ek3@8ewZm~hVcEW-yC@o@#yhU`*K>Eb!#wn8JzzX2ljW9+ zVyCFv#BSO~I1XB87p9%O{1N=I96V#v1 z7kW(Y+M)MK%q}zJBeSb%9{vTcjnmbtPss7>Z2s3!3Chbc3mT-GzM%dFr)?}5#ff2j zQq!d984j9LLBEkJn$u~+-ntn_f0#H;2GHDA_ygsPM=;Xsn4M1c-8AcvI!o)N{|D{! z@?24?-h-cS7^eQY!gcS;Obs?+>(gZgtJpE}@qhQ)1Zr-~PaMg<*=|yv^ zgl6$wO189J=c_kcFRBb$oH}Lf(5QprCX84UC|fcS0TA+LMsqyf(JCxz0evnSI|~5& zm>K%#chUrnP<PQ{G`K&VBo)u(d&rM!DZ*r*DJ+0ZN^G^a1Q2 zF6WqhDYKY*q8Gk%Az`n(13g$TXkc~=B!wLW7Iph7W&)&V3ShL~ERG}`a`T3(aMp#* zL(N?V`RY5DTzckq0^9p2Ckm`5&>;iM*a{YL>>xZdBsdt58TagwySooBWnh*1gjy0g zY&p@m2}$n{Wm-1V>XmQ#=TvtG5h`&40pW^n^_ym3Y8 zxPWM6VlA%#RV<-dkSyhQ;M*iJHo73{Z$6h^lh853qkf%gHOZVH>;h2gNNwD@bk$~F zC(3$bXIW1g@pXa*7XUO$W*!fvdJgYq97;$&?(R5oz{;v0{5M=_nJ-78VE!scdyVr1 z=UJE&??AT*P%t|b@vh#_8L4)FI;|9%R3OCJDskaZPSB-daHMxpqx@A+rLwqD@7^V6 z;p&1G&N_b6XR%0TP}b}g5z$iHJKjY#Y;+A+ScI3%Q^ut#dm#@Z&GVeGct=+b8aMfEqo2t$0yErkMhPa7c@a>g2dx za@zspg1+>hO!C^)gS%kkPSdN#BMT=u`1!5DCA!kF^HI%L9NWOh#{uoP56IqyF)<_t zwA4ez2#w@J_bJDx9ZQDaAuym<=^hXJa5eR-T*YAaHwp#D0wos#UMjgd$mw5yrWkC8 zYyPG#BsXD!f$9*!AvA;vn;0L&q3P6@9MM0rzuPi5q~;|u$+)I7y&|Ww-Coy_Kcj6U zA#&Hhj-acDiqer1;P)JJR*Z5}zyS=TUtN-=h-7Tu_!xcREV-U}e z;X4rpA^>k9`|b-FHqq4$m+C$RbiTS$O0CBuhI`+{{!hUqo65WQ5h8JhA0kJFho?w# z=5!E%CFQ94_>53^ zk$8YRxdRx3G`rs61L8)P-UdC#`GY7S^!9T&wxCmAox&gqb!2vYOGE`^9Qpa$+qd8Q z6$&q$mv1hNi7Z;W5~-WdNIaxEmE>8@3$Mi-Dvw;yI!F~;ye>1AL@I#*yXyfcYI0G$ zIY-U5^Hl#6#Yy9wu=ZP6}TLfwml_zm;5Tx`7 zD=dY|=!r?G2jp#`a-7#Mq;7jGnn@Yj8QrRFWkuX()F@|vW@7WMR1}jHoWB2%uU%{D2Ff!<|Fj+;MGq*jb^%Np#< z+{suMqm!oo+*P}FU7Q2AhWz~_&kKvykBB*=SXPb#{&%%=?eVNJi6b4QPhIQM{>gLQp_l-PH>UV8Y|P$?RfCoK%k5)8?MC{N)cE6RyYU zb{6fgLOj3)PDbds%6TbjF9~F^1soVApduL(uqjm980f0N!66hf`_$wI_nov_ue2Yh zk8!CO!r)L<@#gFw?_zSj|Ho(m`@|58^-};L{UQ{2tZ9wj@qkJa{?MtTWHnR{^e*e0 znr4ckY;g5}v3&;!7}(Mh4XYd5ADr~L32t<=q^tH;hryJq`%fahs;{qa+=hnzN75|z z0y2XimyNK~HIG20Gzp99GgR>Jg{u>e?rFDArG>A>F8)uWU!EVi26i%f zv*#mz($ww6syntc`3Ah8Bl8v@#<=~gRfI3s)?uN0S?;3GCsh!pCiP?cexXAgqSM2d zq@Y5|u~Blpk8Bu}je@QwegR7U11r7wx{iUAVV36-OL~Wi(jCNz_==4Y>v~Y0Ti<>u zN_759ot}Z*>HCKVi7k&=XQtX`jk8?%qI0o0}mzTIUujsx4vND#?HvH#TQN1x2k5FWf zvCTrXJf0XJRTsigOPxb6QRHid^x4APyl+(acsW9Fn{yYvQ7fk$sR(890FO} z-p2QCo~AAumkh#o8)bB_Q`&KL1A`W{^dm_~wb=Bp#OzuZEbhW%`oY3{Mx(A37}Ho} z6^QA-o-bpKlJ+!K+jYBxI!DvOxd$mdcIoRES3Zr7ZtD)rb0KFv28)qn!F-eYdk7%{ zui=cy`~T1COei0igl0E>onIIgkQ2{M)X2;)aUh_$#_zil$6%gky2AUdOX!T5^y!`B zBNuudk6)t@C$HfI&G;WF1V?$nek=FoOuoFiEJFvxme4#uz~w7*BjMXyT;iytlw0?~ zmyH8ziJ4u@yel=#L_4n8(9v90uzClOt4aTckU`#cC8j09MMWd9f6RtEhSEu9*)enZ z*Q4n&m;8h>7wAFk!nhZdNCRIjdbXM}bgZ9wEg%XP1wE&floVYYO3<1Fzg{Cd6Xw1# z$j+Tly(6K@>KQYk^{9sDYR{>hw)o&JmdQygiC-ER+?8T3x6mL(1I&5~elj>XnDg>9 z|J1GowS{zY4f^zZq4SYpM8TAUgA`c~ko`POgxRwY7)UKI_ryPiNCG#V7FC{@(8J5d zE@tvhd&qJQ$|5f|SGafdczC`OB^gF{Wk30L7j(s6{|dDPT7Wc{3z)^WPs+*2M>l$5 zztSwOMS_s2lbKTa9;lKDs$uN}UM7!zh>tEBw1y6l85Ui8rKY^1E@~4tqv%?c>X;(?>)1QWZh%K(cLgV@-+v1>_SeX+)^qn9J(PkgQFPLKW z=|2$Pca8w!|0ONeT%`ynxx;k46DtP9I5bQd{d zLVP1|D+qWJQ^T8q$(2T)(3?&!k{yK#*5|-BN&-hXRZ;0>DsPaU8spjzHEsNd2XG&7 z=Y57eXt!B;AOM@yom88~*YD_=WYg97|EUWe-z5g=m*wSjLaCFugaF-)4#3x|&`5%FMBD4V>wEwCyc`NZxBdm#k3Sgyi26PefBldkDs~`e}F;lFW zC$cNpI*rNwyAlmj;AdW3?X5(K^=N{fpz(xSQ92EE&{fd7tqa29EFv;VaZC3HEZ%eq zErXr!{RR`-#H;@n3X^yHCkIORZL9Sa|H4zc31sqNSV<`w`= z2(t1D?*;0jgO;vd=^Fbt&4GIM{jaw7FOqZF-_)NX01fFaKfe&=_(4VX;XCo=O_sK4 z#YNktuCd>y4*D8vOTy@k3@NBZM@WX}Z)p!<%dp^*2=^A8gJihV5t<~Nh5dnu*2;yk zw{I7q4PO)pmn1CB)P2C0>8CVvT=nn}>b)dfGEOk+fIiIjqdA7&%I}9n6JfuAZv!_? zj&<%_-l&?Mm~=6S&5A@IIpZFb6|1ir~_tgl$xBcXUKc-{V$ zEEz|sOKsm!WcjX0`COSjcVy<4)^|3PaNRI};BbSSJ+Zq6TTO8b6dyELV1 zy^%VL)KSmSP65>}g)!e&bPr5OSa+FS-=8=D5&0PafzqrL+w5gq_nwn+4TF3}c8tKi zmPY&8N$S=^DF1-SnQ1I5)3!p%z6PXLIfo7FkV$o@BkEVG)aBtF;RGLDwC z+1BjCA>3d;DA+3bcYus%AA6iVx}C}DecK0cKEw%6*x_+qp56zD%VbscXNL{|F2^9( zcNv2!@*us$daQ_6XcjyW`4MqUaZyWo!w)2E zcaytzS)O0#`oD%|9lZ`C-dKqGjWrI+=@cO3M9*gIq@6?5uP4Uy0i%}fD79L7;2(^d zck*-^-PSB^neiN!D~$i3+rCR!GVm9fH&ee!=VkAVv$W^sxxY=|;h%kGrlvgr|?x$pU{ z;Pe=;iJcp$|6PbYZhXi@bm`nmXL~>3M7ekG9&*MQT-Z8>(^L`~aM3ofZ?JpM09I^f znP!HL;wPwgQTeNPHLdbxrkV|!g{n^oD0mk95e2rx8c0Ndgja$CE`q;GnIiVTF7LJ~FxIVJQ#9 zA#J&J(>=6n#E7z%`G|N9^UWW1AltA~w71hz#s2&Mjs#H z5ocK^zw{WSWoV@^IS}{Z+!r9#wnv2Y=Z^Y{p^?qZN2NV=MMX~i5iOf7tAYCvhde5_< z@CWrHucH+rW0DtV{4E%7AXU45nQAN2EFjFf8dVXI{gJ0#pbi+cX)saIchCZwLTDbg z{Oi+{+}B-mwN>(5dHp8!L!Sdczl_H7iV3jstHWBFy>5V3FlTk1{smNH@AU#1NTKYN zrL{^6u783|iVz0P?zW=}8D1~MPQ18^E!m&HhUV(%Tag-~;+wr>zmsaHdCl-jFHzg1 z=pCjBB+6+*#V@TT^o45eUONNdPq)jgUSIbkYnrSH^;6IgMW=2{X#@M|fP?J5qBIk} zzu8Ec6B0m>;g2yoNlU{%f_oUZLgG4GVY*hK@izLNf`d)nEd$uPhSNsPuO=K_6qxge zW@REs+aElx3*^sY7bE;MdggwR#~=B7&oIj#G~1u!DK#Fek+q;)?+;T-SATW++Mk`Y zG{XlxZhn>4Ka-Hjf&=z5#3X6nN(zC$>np3p3C9)%dj9=)4h*FH4XP*B`broyER%4# zhSNbCZF&SteNSHts=B~mZ*Oz(*q1Tul~)7VZ%DuPsaouS?(Vbi8}f4xOx(1T*1zJG zox#|nC>D12t^pK$!0r#P#!f?;aOood)SuWe4Q7gIBUeP6j=kwUcb86;Di;)go1Di-oBr9Mxg+>0UOmGhx>3 z!Hps-LOc(Ol}9yHKG4(LIm7$secd(95{wrGs{K7iUyIV`B)Tx{tUDPX86Fmv{2ER$jmX6y zlZHErO#fC3bAew0YaqX<2#Sgl2r@T-k~aB>!2{E4cX53Vam@ulDWbrEt73z{0qmM$ z65tyBlT1=;x`XlutvamY_OB!iAH>FyTyV<)%tt6|;$bz%Ao(+pbhxTJ-dlKJMg2@D^OJCmsg^|J_`+@)%%2XY!R722<-=u#PZ!? zo3g(?kST7gcM`N0Him9a?*4q&Fddnk5qg){6zknjrkyX{i$-bdgj?g^tw%H9SJs)K zWn|?t7q0xh)%VUD1(2BUf2e6F9LHZPE)g~4c@lFb=E!qp4C$XzxoFZNm$j@{jaMUQ zK{?M~YgFq(tM)%gy^3Wl#7#TF!HIgHPf%*;*`i$~C@l!to}0PAlqM~4TLezT30WV+D$DYgoLdWqvCn;&v$1suG%bx_q1rX~M*605Y%Ng{o4k3pWMPnrzJ6WdnI@ zEom%C;!TxV*1g?rIfR1C_tN#b#rd=W=O6sSPJG@HYp{UmH?hM_iy;tvboHo zFsUKjU0gNa1WIT151HG#2!(VP%r9gCa*`}v6&_XvtS>JqH=*?Ffk3e5<>|;L8C&oB zW@4C=BpP_21vFtY;h74{stZ43@?w0Npt8A}|nzm+0o za^xwwvrOVWxflS6cCOiBYePGu)DZ*AE7g4agPFbUt;oRO(d~UyC`VubvS&!7O>Dm!l+)UPRuaztej4qQbdEiU4d5jnc#sGm2V{{t zAyAWmma)CV(bECVY5%>Oh^smY^2;0T3d8LnE)QG|88VXmLHHuG9IeN}o|3pbu$(_k$xBdHo*kuwafM)5(dmhUx+7hZND< zzJC}3{a0@Y{F=S{!J0yD9F*}l4~a{eSB4UO8w;KaMYy+aW0C^838)l`gLfKU-SOx%Mosa zDc#Hb<*!eU@G!+%)7<3vtuIr*&EA)yWOxRMfo7E>qAcAcuG~eOO@sxUUI+%I8PEbe zRB(FXj*dcjPp)4!712IB^9$z+ze&mTzVF3d2CpSS{j8DgFC|o9SDG~XwFU$V4~A4e z@tmC@?o6;r0w+>ks(U7f`)Rc@UkAGP)M9wIZHosoVD-N6NY+6;!zU}Ij%V|0L+)$k zp4Wmr4wR50d0Vg2-^9r3jkd7pHkqB&v(D|*k^A`TX}0%d_rXy<>)1Yv`QfjHKOa4n zhX-&h9phL>;_qX5$9kM_ujupW@h8fe=yu>bxJO#`YSa58(;-^mjCyV4?L{XFA4j{M__O8hW+W$UKgD{vU@L;6I z6CAUZB{#o@uy_6EiSj2fZy?j|>12lgd0v8=@6G>w#p-M5u|*D&>B&8%;Wl%%^S=** z;!#RhjB9H0p|mmP8q6sx?ouSo!ajLCF>-N<^+eeyVbn z<}w!`F7z~FU9^t2qX`eNa;)b*+A``G^rvz8&%$UOO;wE~Shqio%lD}Q5e;n=X#9$I zB`Bxs)GPe+Gy&ab{>{44R#62SFSZO=H0Pi9NVEZ?X^S-?TR!M%_SvEuc+^W%8g>u}*nx9?Fa-4*4Q#Dbn9AfEsJVzO;}xpzOJ_xIwF z>ff=7mQdAw8ioITy0q7AuJ$CNW7xgp*#gSNy`EGd>&%&nT>bWaOWv!aHx4dv)1Z}z zd*5gks=>ni-1e#f^-9fMQDuW3pc23Fi|{>_^QXl0WL3nwnWrJ1^o=-vaW%2{_VS;m z^fCfO%M`@kFwf8Nw2h>S-47o0RHe9o?^eW($+I({c;KT;Uc%yad-&8U%DzvGa|@1F zrnNzgEG2|CliBI-y1jBOQL|HTj`YfD*aq#l4Qk%wHgy=npP65u_Vmh~IVINn%fHzh z#H3c8k~v6zhmBqve&VvO?s|(`^S;~VTgq_Guj`!lIQThbr&-s?9jldk!e(3jJ$l8# zO&O~>4i05AsfVJ`rkjuvls39k9f}F!uNoY`y&0^QeRynJjM2{asP=tUB6aG-Eh4xn z&^HF>1tC$G_5Sr{Zm4!OshV${_!4tbB7$FUtdgG}U5KRqwMU}crhTM;mh@-vgT$&Y zu%p`!AMy z{K`mG@0z8IbuXpwH}se=w-<`n^7QNun!<%;Z2X~?7S$fY^~QJhWVnoQ^0$1GG(pF_ zeN9`AS-V3C-9FFf7sU1THLJo^OQxoF^)9IxH8biuo%(uR1$X;C*NJ^jROa$9w}b~%4M zn^n(nFNhK?eXk{UCVxO+#_S8jlahi!t@l-zT8k)rD&4EL*yOUqkNjC$NUr%va~hpao2fcs~@-}|v_>-eqB4r88=$`3&y z^{wPdh}-tF!2%sW-b_^!$4JBkZ=0{H7zwzXX&3rFB%OLGm zb4%>e{nuG5g2@%GWhX?QsK)HqpFQPw6yz1r0m-*gCWx)I0s4cVwij9FeXK}p=!F%# z3e3qSjS=79U7YC8%q*`CN+g@Pb)|3_Fiwq4WH@zpDw$jt7@lG+Hg(VnT4s0Tp>5MP zV+*^l=lUU~pRCN&Xyy^qS1ep5@lql&-Of^=alG~V16lw8XUR z>TnP4xTz|S@{DTVc%plaw3F8ArFPM$Jn6`cz30;(s>`-Kw&nL}zo6x(V^X~7l==$t z&6F&4qlBwh?X5x-!iC(+CSpATeZmD}Iil+VW*>Jw3_IVmZS-I`$&HkKSSXb2d^^M; z`uiD^W;f!60fqICuJ=mlgtpnkLu&6lJogle+FSlE-%WnDg^qbXPj_-?bN?>ff4)f1 zBWA$h*OP;f+9HSPa&0K=7a4Ob$5Mw}pS+e>VI9K8p*Yir4caYn`dYqXra^wLs7=-m zj`LY=E<@_Rjs<7xM&YiZI&O`~FBCo>XM4fFt`;3HZz46;aXy@rZ1at8?UA*kFU&+sxwq8REaRCM6Dw`_rnj(k)_BbWI3v8lHl?S`K2U9wju zGbiBbWq~_%bVhSLY^_M>=bi_MA5}YoBR++}@_!NJh8S8T$g}!m}UAjb=G){iHi3 z?Ze->pIhIRETOm<)tLT@5tgP544#*^k)Ew*Sj$8I6d{V(N36~rV%VsXrZmtmTZ9+oX7z`7)AJ?Qf(pH9NWXI!HDBdf-b<#!EKZ8>%CMpm4Fpl=g zd&H4dNYB5@ui=}{_vO6Ir4~XN8X0+BF&AZhb0c#vevN-3)EzqN)Y|v}p83!`I1fCK zYHIhPkDk0ZJM)xJFM9*)Y);Kjtx`Ero9PZd9lnY~ z@yM1nGmJ5!n4GIO*|O!-UB`p{#L!Inht-u;95*!VTPMhpIh$VN@P4DNdx{B8qRF3y zA7?&a9IM~Iak9Edo_yneby}Jqzhb$c{&mMVsL3WH-E#5a?o;nw~*RK23 zjl}grZnnMEXD6MEY#_7NejX7WEqVHV969^Rk=WJROOcEQ6t@>*`_}qLLkehqwD+?Z# z4Xa(wGoKLSLJ?_{Z77}}8x&Lv`wO77T%4TyK}8^Kc)(o>K9Z1j8$TuRfrf5yF@WVo zgoM6%Z?gGNV3+8A3gg-xk9!y{ps^)aSg``j=Beqomi$cgAYpx5zK^@KzhB_oAb&gL zuSQO-Tz(Ntq~SCHsmnjs6*dzZGug3B7N;{v1&kST#To zIvX%M9)Oog_c6R;o!aF(nVpkDc?O==`dW2}LkVL`9QnloDdW>+WCL-I)9as$KNkuP zQ|91Yuf(qX2f;qE^krP+!r@K7`~QZm6C+Kl$m>r09=@Jd_<<0@yo?{93g>j%=g{1Y zK9MwT(X*y6GG^Z= zc#p(B7-c;Lv3YVvxJ8hAp*(Oe9iC1W)hFC|Z0PbjzPz&^n?@H_hDC78zq4%2XoNR( zgIwWK2A8N3a%P_^mz)hxBzeW>qWgiR>m|C~v+QqmL>-SHSDMcTD+_7Ny34K;mMkV6 zcfOz{aEYMgN*OxFYl9vI>VuP0^WY)hV-?= z{wYQwXhFa@*g`g6T=W8)w^tlfVBQoaz`QiPhvsYA$Jq4REv+#4O%6*OI{ zu0TcP;D78zPE&%Q%CRgGSMEc_A%mpXg?n~lX94T4tj4AR{}UJJl(&QQYII@IQoF60 zOBh^Y|Czi^Gk-p%W#gU~uNt8`6PWutkSO+erkXu<6+oo`*s8WERnvQ zn*CKw-|mrc&EeK}iQ8(C>AiI4b&MVtR}23XEIt47?8uqe-B99mqL+CCnyLqWg5pKH z4Dg`bQUnxSp=TYEWuU(`OhonYVR2lti5vN%%riynOKeq#?lvQdf_D;FHF+yo1Uwa0 zN#}zo-jvd&j2hT`)s~Fch=?9gWho0rhJ}sdiYy=Yqge{aZlBN3jwrVQQ#EX;tM)bj zkRQ2)nC(X}v3hcCRvz$tQ*rl9v1v+VC3*TqD;=L0``OstF8wJm7v=wT6VXk%8Fw_yqKXIw zeFP8~rfu)JkLBk&qe%(kucU!ZBByRXJdBSfr-`=D%zw2p+V|6x?q^L77LK zb{foJ{Q?4VV7G(rAcnbwQ7K%l@A^ITAv!gva-|w7<;dkk#NwLU!}88_g_T1 z3L4TnN1;WWltTDy1s?(M`RJfwVsk{Q6qAP0vqMt2+N0E?$zl?5&d5z7JN5ghmBF$S zgxsdZy%k?3iM3U;V#{!eZZ&PZpYUb!V&!c9c3hAIO93{Y^?wJb1Xb+Q)aoWK*lQaqs$Xx-^A5?K;QHKk}%;VTSu{ zq?j5-?evcF%zT-*kV*eOW856Ha@JBlSi z*2dX}&HOp{b{v;!N7|?-YiJURYtb4JjYkF}N#|AXOrE*LqSGUqVf?f|sp4f`;tR7- zvZ=j$^1$<3-3POg8E!xLBpsDwM*Hd(csWB`Nr*Symrln=^05Fb#s!8~LgpZn?#xJu znEZI#_KP+pg~_9*|I~5y74MxiZD#uQ#H44OO~`?ex5k#YjhFU5lAIor6l?o%;GM~I z#amWKce`z=xe*g6BP;K=XOdoujfMkDNTzU zL`V{C9_-o75B1JjhtvnPm?Ndm6}I3Uog~{9^G=;K9v7avA^$;jbYQ|-|9;+Dh!0)+ z)7#U1?2WFhQ+Ky?(5l9Gt$AOhwChB9prQ^VX?@S-Upk`(NhG%^7z11!^WX}RJ|?a= z#x;=gJ={w1S_Lnutb0CRb{f3#$+`XXvUTL{!Vo{4{}}^3|$;C1~tvMzF6(O%RN1@o|~5MNZG-nvLElQ zFC%9l!?P|K*?#YxYqp$^Rx+=fc}v~DP+5qn{aK>$a9 zJGP93pVKfcNBAJjpk~f~zcRNvn9RkvM0-&ACa1VN_k=MYyy+mf!X3;yd?R9(qR2_IL)KuYT;=H+rGnW?KHWT5R*ZFR4jF z=d!w9>b*67p3nBuoATrNvM268e{3=;$_tr^*G#5MJz`tZ>04FWKAa=;Vx#Up(J7^Y z72&3o%fCi1Y$X@#8t=@SsLo8{HXL%+h#F~J>q2~T{_u{PZCrd4PNXg;K>KcLSyNm`#jB2!uOPh2$56% zVVcA5KR2upkG-n2^`N5f`JUJLz0Udk^ZsP-=h^pjKli=vwXSuoYiTbo zd3^VnQjfe{7PsE?rnkrpDGcOjMm#MeW-ahs^M#_z*j6q-K@ z*)Z($Q+z1K*|AJsptGuEOxGzb8`-6~)xS4={D8aD!6d=_^K?%iWhqp6-~YzTHa zqTJ$8r6lrOXSBUB0jO!*;bS3N6 zQ41X}89Y95QRP>C#2RDA2ZJ@Ld2jPoo=xn$$?&abi{{yPos}=rbS*xys&r47zB92K zEqW<;W=M)ZC}I7kfqiX9%g2Vk%cAALl1hoDO>5p#(o<`n$@t&%&%HbLX2sxz)ip7W zq~&vOFs>2fWu>?8gj~+H zYjF!lWZl{u`^bwe{1&eAF`|Hip>z1~4Ly%J9%s>olHGa7?tVQHTKIxj{69Durb;vME)&A{faNDs;Hi&la~vSJkM1 zbEMOGkRyW7qcK{P*B`8Cb7v<#eE{ha;dgIT7I?-9b2T6sgkfK~YE_5GT9UIkw8^Zk z2OsXSFmBo~e0kP4&Wh(ZDTtog5;!BP>1CN`mxN>nh9R141> z7t3O~_5A0`{QBQANg)Ay*#7<*k#%MVC8<<(kK_G=exmIlzI__M*|6<@{95_J!2Ov` zsvGodL#6)C3Nem5(ad`!so&g&el<@&QtT~ZNS+;Ax$^#6t)r?QETOdCHzT8c2bf%r zf0S9Rb_o4}$^~X$;(=xw+3#Z{GkYbSM@#0OTxqde;M|L_>FwbGe>Yhg$_rHICODGK&DgT^(kuxs3j0-;fbAdVKynexwj*T3$g!r;nK@y}H2dvMC~R4h?L`;R?)_P9z* zH@eurn*D56LjBxP2Uo|MTUa1%iDq~)aO51`7G!0e_9_2E$b=NTJ-ayx)KDpp!u zP3?^1nuU!`MfFMAv~V_$|9Ate9UY@-Ug zKCGo6FE1#U%7DhuK|*a#Jt_ha*7n!4Wd78%vG4h9LowD&dS2G3t7(lvyxiQYcb+>| zC=0}9i_&ZH9;=w4-v)6W$%;QinssdyKPHlr5bx*5lCMV3Yx^y$d!XTuZ3ydD7gch* zsbY;bgEkFzTb)fA^lX#Gvxfez6}Jy_)vqfTsOag1pn>p0`hg|V^qEz`4GzCKFlKbL(GFlwx&v*m>Uv4+UQkDzHMj2IneGvw3lD$ylb9u$n5Vmu0ePS$U{9$C6tCLeMNcs z^`a&Z8(3mb{WI%^Knwa$C1;1$5+Ji{WEacde8kpobHV%hCTjg!6rjo`d4`?E;=y#qEWee&#SoL z-&K?R=FR?Jq0;+i)_h2xxFsmd6Yk0@Dry4`8`QW1w8II7cyXI8vadJK9-q~FIX@M( zFkW8X)a={F8gbwd9pqX4j{z!z?12%^U;F*>(FBK%i#zN_+lutS%)cfmb|*gnIb5$M zBa;vEJUXc2KraXB7B5`8q;dvc&EX}}o|n+3MQRmZD$>D^{@(Hm=asNc zvKv<_zj$Lh>AK=@pEQXc%4&~$*TJ)%bat-K=Z#0w3^5qQTO)zkiZ2b*I-`taf@k?GlJcQkQi+c>8An zgx%B1W4rNtLJ7AoSM0~OKf{4d-CVid{qJ1BeN6G*!6s;`D-%)|fg1`e~&_`xF(Y{R{}p(S#KBIidzWd+th_3*4;96gT&UcVi>#@C+n zM`VRs!X(kjMh}1H<49Cg6zI^rvQQn}BOwtyqR4T0#$!u@1E}vG4bj)4vB|Cue4+$+noY<_%QT^&)_LMi8wlG1OS zU%vA13J&vD7nS8XMW=3O_X}o>kO{eam(YfZV4lF|^2{TnnE#?@e$-^+n6h;<~-|7M9u)a6DDlL6qg86uIE zO#k7?n+32gB-+UMD9bz~5$v z^a_KXG*`|tVRscBaZ3zl>Xxu4b6)W~J)W`!aJEP%S4wIAi6BYaJVO&EXN9mt@HUIp zr=9~=g!Hkgm3Q*CFhWeEbA1-9zvI{iq^H9#7>LREDtYFurkyef^36z@4TLtmlq zy64xgUzMsOG1-X$NtLQ3X_o1S8Yev1kPwr3ydA2}pic&iS=~*VxX3_?m7IQv|24oQ z#cuii6+--3va>=|tzSAq4TmHc-6-OS#hXxD@q4!5zeB;P9!&jTS$_o0nMKjX2jq*pAz-^yVoko=s zNOeK#?J~KZ5YShXB#5ZdrqUNxdP-2OfdW``>riuEH#e4bYIvt>5pd}_Qq|*LtP;}8 z#Ocq`1V!e|N6Gs;Yw@JOD!I1)TxTZTwRv*L%x?+7Tj&?bi9~A}lTPQNMgn|dr>5LR zV_i$a-rt#ARUYb|eC<(dT(q8o?ueeLEUc{UrsgD(Ym;wQ`j|af>4^8J5scCo#q0-S z>En?|>oBz?WsM@EO)LrBx#jCsmU|*6V*nN^l0i$ zg?3b$WNvp>%0eFgNg9cqqAhI)DdyoVa>FuV`7y3Vb0 zx80grN&ImdfhvR?I)0353+a~wHyDj@5vsPkU-n3<(;?q~8D-M(`w+wkEBT-rB?J(b z*cHrP)Q9^b*bz&{hq=8SZxQM|I5`xK9(@k35NtxcMtAj7LwL?m8b05coh4jSPyp=0 zC^=iFqcJDPie=;*j0(D_@#V=XC}ek~)e`zTJ^OskoG=@es(HR2gT*xi^9z7#p*XwL zI5Bp!pw^QxR3$?WXL&K_omhGAxk_EF#i5~rfi=kFZlJc9-+ZI5)#X~r-?oF%t0Es> z_C`%I_O$6TM(g&{vnVAP6fsYwjBt}joJaGV_Y&&WM&}uyEZrD&)Bam7s(#nI8wPuF z3fmomvi02miu<*S%htKl8%QhAf?eHf(izKDjQa`u6wt!(qV~XF(%YO+#bLL>I^cy?IB>I#nUwuC0=bT0Y2Q2t-9z*kU1+YEcT=CG4-?_tzea zKeTvb){^yN1E||U&fkxD_o+AZ7xY_De|B$<+}sQDZAZ_ zjQ!Rc;{_$^|1e{7F?NP(r_AX^OLln&aF4o?r50XVX}RXcd&KW=pg>^ZmFC0zM$t2d2(GE@b>M>Noq$HmBpL|&?z4%yePyZ z(;#mtc^m>8O9c4HH1&jX^gSw)9Jv_Rqkav|;*~1KtZ_8mn*&{#V>%n2c}2#?wmq~c z^0j#8E+}qQS!tKARAKjGi8Q(D^vxz~Vs#~*sS13tuwlIt&)-K*An!$uZvaQT%jc)fAr{+kQ?#c z9p|$IP3Bqz{a16*@|dc!4f}i}hFGKDCXL0YfMz1g7n~HaN9$J*uZkd{`iIT_2DM8Q zP!J&aU=&TCH>>j zfAjtO_t6ZdhId0$A5r4Hc8F5J2d{bQD($p_l*me8HVhiA^?k4kdrnW-JIS%Thk7iWC@2)T?8>rSDh)Z7D``9;>@ z`^HwY*EOdRg|W6VXzljXx!CK8Nh%+guzF9}fX2SO zqi{|h!;PZN7e!ml-d`t7ols&-_-0?-&)|lx!6j(@GSZVXQ5?({w^@5=;NI}5`m$m(Qn(vA zxS5N=`P6|8UxMbHxDoC<2)D%gN&nI%%jY9V`F@2ZmfR$%+Z?zU2%%k+kV^lVjxD(~cj~u<4UtR6qsrthu z_XC!!T?a8k6zgtuzW-wgBMXG*9in>+{k`yHFRk<8Y@VFzV&jtjp=ZyX)7GC`$?A}i zuwv$^G!d?ohVO7m#u;2jMAO8Agn?P^Knk=oCnjc<=~mKXJw*G7jGwRH$lM; z5@V%}y`*E)ZnXnykM|grMxs=^oxIxR0U3Q=0LXRpSpV^fkMZ*GZiy#2*g;{_U z$4VqezG5qDq3^|b!+fQC)21vFg6e3nQ1stUcd5?BxwoJnhpjBvkcNq;>8zDrGCkt0 z*=T4228E2-E~u~Y=PvtIUZuJ5Tz;*ELKMmr!K-GX)vlIK`W@>0{}nYJImXS$hmNod z=U<~Czxz9aB$AsGoU#PGixZokHj<9NMS9*u^Nf&G>M4=^n*Fy% z1}~L=Z?QFtKq1w+hmxHnE2a)Zv?J#)pSNsdqjRQJskp9Vz%b5@;%5CmEkJ; zo@29bU)ws;qZy-pwDNmvD@sYn5pMBwbC;x#X?<6gy}3;(35yaADiPB9w$+l#W?o~R z2+cLSpFe<2VF@@Om%ctHKHkJngxE)Z z5p)%BmH@SsE5o5Y;JX=3}JSe|9dX`wSqI^VVL-~a8kGgnsr48ho~M9xfO>Ba@dIOULjZIqwmfk22v z>UCE0gCOz=NDP0^$@%rkv@=K43QGF1bNz3r?5pjOmnpWXGdqGIKrKUHf=lW5b#N%g zhwvau`ZB0P6>lgLno6mevryIjBX{3ar{^VYcL%g1?YMP&G*IZ80A=qfkVbq@Hxc19 z2GKEF0IndFUvQP?*t#{ZuaNtQ?}GS+kNgh(e#Nhni7$lNPl3fGX3xog)=XkFF7{V( zds4(VB56jVFaabYd8nKRf;=ZyllUHoJpl{TDK*}SCXrmW@+#r=cyiAR%if5|BPDYx zVPoC?^Y6Kw^6d#Ejt0;ING@|QH&g=9A+44Uq*G%0Kl`J!FftBfardL2C6Sl|CWo^2 zH~Qibc$b=5Vb4J7Lr8_uEB}UG_~a3J6FrnusiE?7I24GVl=dT<4FU*bja3WOhExbT zLO|MgAezMNGkIHCFoN$gNBz$v6)AV~R5Z? zar_}61#-${LW*QIH8RcVJXCoxR7M5{EW4%VgpG3$Q>Tb!8dHj0_2~Hoy^FmY@gRGZ z0o6iytZ94&iNZ?!b}s?dl(GPdnSKMB6g?K;HPVg;Kz_)53MU=fnMN8>Ta^t6nK>C% ztj#jfSIi>r=$E0VtD9JCPO>7FkcT3GE0rc8ttTgusoqzfiG1u4{dr}OKO;lfQznUM zA_?SWK!z!-l9;!?ABZYY-x2Eq+=fumFhhX`Ks{USjH8o~MEj2SNFWh)r3e z^i4%zp`#a9M9u!--AGNjIV#m@74Yg@-EICoU76AW?`l!7Rn@IG~*Ezuyte z-yTA0g%&i2FwJvFDDZcZ%;9sR@t_?}V&XApIZ~`0){^wY;Vx5Wht4nCvp&*OTme|% zL^$&2)4kk0Jjb734%aiFquz*&nxoH##cLVanS5bC+Bksw0K^FR6dvX^;t?vP^zg0E1WdAA8Twkt!4oC9>6evtb@}%Z zs0r=HgXP&PlXvi|S94guD~NM*o+kiLV{r04gcS%!-TDDA0{Z2;5Gt}?yp^COU?k-D zOU~opzV?!B{clDT%unIdP!Jg9JCChN_L+R_Wzjr+5a>j+6D4+p^iUg51LzFK42rgE zAu^J9|7UA4(zk@mx_tTa@&Ij6*#eSK-m@OCAw=!YBoneBXSa?B!hu%>szz%^IfWiX z**0A1eGO>pyBUVxlH#~cqY#LQQX2d}4X?}4mO?j&JJ-OP*Gm(-|5;Ij>SrnV_T@B6 zVY+nwP-pc{Xg5~e6iyRcRG$L$i^BE}TjIdFShK%~19W&sP|8tnzgVMkz+5bK2NzdM z{?<@6zV*pz(Xp}Tv3*K8czD_grT0))x?XxBmNG%32@pu=6!AjQtfc9==icDmW~YxIuSWC{CF%4#6t7wE zNSkO@lvd!3qJziY=3V3}h@N|y`w>F4IKRBl4ggu?I>&}`g`)k&CXN;@N&wP$f{!XU zpbb>Qy^{503(Jcf&3@m*2pw92Hm76oVg~^;6nLSlTXAu*rU2&T+-0t6Ilbt*I$iRP zk0Moq!Oh`GPZ#H?o3TYUr(9KdG={i`Yl}<)gGQ0C%#qM5{WmLKEVqAGN3(I5_ZE9^ zdfBK)^YCchu-sqDB471S+putd<7*94VR-TR4AGkjwI9w0^2hmB|tDmbV&BS zfBy$&pdOi%z4zzl8&oSxn4kk%mZC!;RDdB+seL46{vZb6F7&7>NvA4OX2qo_${E0+ zly=->ZOu*cqx;K~^C-~N^Ow1@_jknDfz z@eAvAHl|C#Cx(;u4eb+SFEo4BL`g_?LQHP310ozO0#y;ych8To6pRo6~gSDzWNTGf#vA@7#PC~?bRjR4on@3(Sg zk)sVs6FIguT->uw1B(2ml`9N<4nGk;wBtXZfWvrf&&gn*Be|Kz!yej3KBSmMkcv93@+@Qow zDy#g16P<3^)Gx+gzwEf%6OB4gK5-~Iy~NCopxgQWb>|x|5;C{(Ud`UX3e#?U`*O0( zCnxA`1IS3tm0$iu>g8c06=~vF)yEBk&#pgo^Lt>eQ0i$Z%QiXFMq??P^r<=A#JrGm z>sGm86c6re)5zod@g=90Z#6q0666m^(tBe~0+%D&WJ3)JO(1|cHQ?AYT#A!eFMR$E zHGv;qU=Vz!^Xu}OzKmngmFB=BMG=a?-b4_2DZt4f8LxuyS}LX7yUhsrvA84@MR~QN$U&zV?S4OK;ORm(BIFRY05LAI+YPlxq4m)t>I>g-3%>zZv#D z=A6&Gbj&VFq^*5U9z``5A?u6~4~Xhf$GWh6bIA?et_#@j%wN6;!WZAP3=`JPjaTTj zxo%O{7vj)OAifB0_4M@4&lOSV2yudYNA7N4RWppuKmog2ST1(7Ob_vLwsAY2aJj+Z z+p9NjmH}*wULUg_Z|!K`3-P6;eOmhhKHq{rcJz!k&O!2f!2#m=1{rSb`}2HnV&c9~ z3;2>_oi*IG0QM}sDIGp>`pHT}qcxZXQtmZdS%T3J>O5}uBvo*tMabC>Eg3rmz8WCJ zCFHOM$Yi*TW)P`_1oFfQ@tra!dizk)l2upVn^7r#YFH11g%;0U%(zO2$ zp@~O+&gZ`i{##i&{9>s250FJ(eRX{nq;v4K1Ct5kV5hC(QvH<1HE-lOFB#AIv-}o@ zM!!k8Dc0Z+>eTky>vf2H6pznK_hm9F42{u0-beqvvC~mi9Ft^rhn_MfiRatDSe%IB z8S!M*8;*>;ZmL?Ja>(n?V_a8M)y^mMbM&P)#*$i}DyDE5KiADqE6+j({*d)38{3rqSPgGVE_jXVt&T?N5 zpg^Ic%_n1)Jbvap;be3tC}^GK8sid2TW^Wj?MDTPw!Y0?KLAQ*0JF-6&QT{IB9z2k z?Zy)q7p1s|mD%PJCu8f>$r$00#n-XVKR|A&C1g#AzU=*4G@O0uJ&$*L%Q~c7?Ovs7 zjhes6PC?%|ynJt?f+YC|7M2%?RP%51r4b1AgV4Qt5PUg(I>Pb7lm5b65CwOD??-8e zfYlc`R(4A8%vdwYR z*i$!Jzg)Y03Q;8ZO5?Byok1*CD*BF8e|ug7IXSt-8EiF0V&duSY@)izMQRWWb4~mkdQNcf2;+yMM=B`k=|f zPp_oqmRpYQ>w*DubO0`Bho7gTqSTxB@}-xogYu@#2bC8tt&0A4%6sod698`#kqgZ0 zZ!CK`e&9h*G+UWpmg2I7^x*46{Xj0#l^T4r1xXddV5|FS4Q8<>Zx@#x(9>(k87<7o zL}GAI{?_1n?}IEI;txv>)E-|_XMdTk7-^fc$w>VSK^q_8g|GFTt_4e+U+}ww2Lo74 zUVh1qOb_ozrF#77o^g&n12q?t-Sp<9@A7UvzahwAF4Ei6Zn|rLFd6#}KKff(+26-o zR`B%krqY2n&9*;3iEcnUK!zqLMQpkfSW06lHN>hJ8E7|1N_h$SRVe(=kNZrpyrC?< zy=?~$+}<|>z1Ri9=;X0H%-BfudOu2-bw^}mBUdcqgpYjzoPurNuZnxiiXSW)J6+ICvH5;x2zBrO>+^6S zr5!>T95~G_Ei#T*h-%l-^M6h?HK?ur`VpV-SQho1fOGwl;OUl#m50=f_{Big%Xf!8 zbkH>n#Ch7#U43I0=%a)bTiA$F{f$-G3-WiOpVl|rBQ8Xna}gu^-rd``_o0_a4$x#D zwmr)&rJlWOcr~?$=Q8c(Sf$iojLzRUfwl#7OrLRV${Ca?R5-b^R_J+i?EW&p+IMP0 zrfSLLT8Xi`n(HA3y?F)}7f;gv*}ymeZaZN0!MS;qyyODBAe95|<1csYZZbdQ`e$#H z?+*lhc^Kol=I=HxH zYQRD}Oc#+vCIh?`M3*yk2Orq6!ovoM1t0^le1w-a%FWIFH9bZtnN7@+d-LXI1SEgs zYgub+YZH^ldggR}M&Ps~}eHd$6^uj_Csw#4n*j1eqK>BMKd z2Vm6DRP8B9UCGj}Uu=*Y#gI@W)yp*MA~xwj!~lUu4jO`09k8lp3CbM1h}s>jU>O>K$1>DqGk>lX>)}-m6LY=@m&vmBvf*u)#x- zPLhlmq$LPqBxye2sPY52j7UG6{Fr+QN<{!Pqf|r~4F@C2%gbSN+krT-Pa#eX zcCG6dDO1+*+HolJa&big;3lM>gXpC9PTru6e5rPj$lRlkp)i*+syFbYXblFXm<&ZuTRYy<`yrn{<(^E#acjDhbi`NI!L+y$ zJ0@zAOFXzbM0?Zmgq%2%Epiw7AA^IYiBp*hx@z1_kvF|_xZ_+sGG z-XlnAxD)+#RV2^K`(IA{_&8Q<@LI^2TM1O{PY?H~3Ng-6YXryW1<$;bD~%YO_643k zb+i6Zso!Y&4LT4aE1{@Cjc1moB3*b9MvCUZ=&~S^XLz2`EjKRfRsPYu#vDq)h>$>Y zv6orMp$(2#Q;*Udq)dMQW}>kADKRAg77h^Lsp{BA`dkCZ5!$+uIc21SAfx4Q$frEX z?LMuj%6fQi1(Xq}rQQBK#5{Kd|w` zollG5aG&`ED95S)UT9m_X?Lr`I3{Rm&o>KI;?zh;rf%4LkN569|8)Oxg}KZEr7WWn z+D4S0@(D$|n{VfgS{-SNUKo&PsBp0~(hLqpNU_uY@sX-~;#mXjXZ2T(L_GdJG1?VW zB7X|aF~`ydH;H9$^5(m6e!Ep7MUOdeFna!qbBp9vSutarIl@$Ew-Tw>jT<*^-<|?e z8X?fp@Mhhfd2wcw+##dLUavcS275x3*%oiu7H?&jQ1LwDwk7-c8CE-;_8wspu>99T z@F@H~7|R^ptyr+HBQB5fgj4A<{6rIeEhGKoNs{FE6*Z!_2C0T-jB|a3B6+4pVRlTF zbqk%kzYzt!^57K(?=&m|`=g+s0M2##xle1ed_{HAE-p$k^J2+fw!?`(Zzn_ZBxmTs za>!W>#z3_m1ZCM)DA}x6wD-<&S%6Y_?BaDr#!SXE|g{UDIJi`Ofrmzb5U} zpWoJ-zv7#kuW(D+Xsc;n=Zc-oKSwlXA%_J^KADW0=#D?8N{BK&Jb~?&y>T@^ zFE63d4@6Q$rBoojEN{f(EoW|_)-X1sHSgoSadyPr*j@(wCC_cN3>$z5&A5J~@htlLbU?|^_}a{6Aa1f7K2Ze+aPIes?1kQTc6OAMvnY&2xr3ht%FDBE zS2C;)-&`eVdVa2WQc82w)2GEN_*!HAA%qUw3nkwsB+#9Pdru{$=oEXuwGclpI{hX# z-4dqObS%bm2Hm-=hdflyaG8U#YVUAp_j7}T_qKT5ySENm{m$Y>=Qt~J|EJbO(+s4* z_vg{XuWW6}ORLV|(|OXHvaOjYrA^(jJ;l{h#Io(xHLi6BY|?GUY3p(>G-i|SM?Y+C zj;!vM*ihK)HIMJ!-Q^Xm3%gXwp+n`)Y3I#+Ey0>NWbs2G#YV@B79pS`sY#P+eK>I> z!+h^2<6zH<@rtUIqXRKrYlV-tRdyMcj(<`wp6E1Oe`ZMVgj2>?A=%i5DB!)q;rz;_ z@T`7yOHK&1+`=nVT0|B(E?fWen)a_ZO*h5XOMpU{3` zPc@EHJ(~@@dVwoP{i&xz^XmMtL+X- zRXuXt@MYMtlZ4&qQU_o@|lvV%wvlAbS_wFNs8#Z@bM$Di&Tkxhg;EY1tMZ|LE_DT$F@w*kh2_ zKX9S5@@aE#okAs8&jj~1$77Rx{gt28vn@Ik_U5gZ(wB5qKlQ8;zA_xP&gA(It=`2v z^^dbNhKBnL>g>zn^+pPK#my3v*600%>&wzT9vhJwdH?Zzrfe}wn~_ZtA7a{VMk)&p z+7A{~w}yoB_!#aT538w4DmEQ*+T`f;VC3|n7OCW#!FMmUiz#8E4a26X)RCMk>fg)* zKL;1Kea;cn9&9Ytlf9JBnqIdf*WUhDOMQ8_xBI=8_yl{I`@8#EySQAh77C8Ijk#&a z`B!lc9J0)OW8Ct=cY-(XZd<>nzU9N+eUEz$$_B0ERKp!>YUSK37s+0`ecM5Ot9RAY zH)B_y6Q$m}Z1bwhQ;HYH-10qCFerVsXT7zh~U(|qYQQz;PAb#?WNYp!uH!D^8oUp zc44nkd}QQzoLvnsEMkm_)G|b^`Uf6QaF{hshtIilM`#EwXh5v}7w~B$9#vGdxnQ)x zrS@B=;l?$`*b7;eeAbB=5BPX1JhkEe^gRO+y(T5@HYcnP*;gS_EPc>it>rAE#1Zkl zth%D@TE9ffAz};Vpi+QkNR_Eu2VCGf-el=K5vE}L2fALnt#fnw0#470SAuut3}fWk z9RgFW{k|Qq(iQQJG`wKz`!Eqi6N0<0f6zBt%}$_jYiPFlV6~L*f+%;E0}OH9lrfsK?8lv zoBx}z7k-vf(dv3_-FE}q@vH5yTW-4&b~~1sGE~LIn8d89f1%YSbt0!}qTu=5UzKIO zLSCGACK7n9Gm|TmZLPA$+|us%{(k*YR$ye?r5%47&Dw3SxV}n2-fhr`T%}NA941gn zwR|~gH%xo#x2)xXnqB+03^cH3C@$<7mwu$C+V23}W z4yd5AXNBC1PDdfzC)B3WQ)9H`nJ}E-T`zQHVq`12gf95SwhT~SE`!mZQfs{I0l z6SRTWWoLx82plOBfv&li#UIulSF<}@;|8ey02~kUuJBDcw-L^p)fINJy+bWYcyt?*mmEw1hLc7RPRv*dkv z^79$5X}?@;Qf-6p0+q`NTs$(87mFmQId0xrFQVd(ikY!Vx7qUYPWWs4um?PJc1j(% z_uz|krCdrZx#~WT>iAf6(EOYilwXj6lufFOlxc3>nnvjI>N`<=ZdIo!lJ)3rB(_&1 z6lVD7m1a~drC0~D7r*QCXZTq8lT*`HVB2)eyyk7z8wFB>C_qD&5w(9ehutY{1_DkXKiCp*=qM=wqT2X#FlXh(Eb)-YP9J@tm7 zx_x4@7si(&2Zv@5s;a6LWzrmX(Br1X6qw+%@ERSHhRo`<*k5%;5d}L-v*#}w3*l5F z3mQDIV9e-jvG#slz;6ah?$|(#R6VvWqZJxjf|fT5K{)ccnko^rNDHJ?n|&WF?D+CZ z!9jpcD7b!=e@<15lYlW~dn(CdZwKrB7?a_d8E1*au{l_?h1BK~h&C!XUJKohjJqrS zdQF! z%Rb`-bNapva(cR%gI68#epVkJ?{{8Ap~i&TU8zjW3$Z&FBC9kQ%81kT%^RYvPeR61 zq&C4HAsT-MA-rMjxej}??Lh-C zYVAIAk94Sa5Oy(Y0%W$03;Z|Pf`ko{P3o?EZ$Oi2?>oPk1G?LBu|q$54|`D;fBfFf z+X-YU-DDb>D%oXSk#)OfPtA+27F)c;m&=G8@>wRkvxs9KK6s#|p%I-{lP^<`G$tjI z;NjX^v%MJW7u)ruOmA_XoFSlrWoy2H5D@hiXk&Gm-++*~(@0l3HpqhkTC2SSJ1%x*f`Tv+5R zzh6JMwF8vJkBFydu@x)7Xp8Vn z6c^S53jz84C%dX;I(_ALd_cbvbvHq_{5=z@noX36HHEzb)+pS=GF&`gM*&ZHtBWN^MWs=f3pP4~dpu!^@q-YxlJ@ zZ*)wLcOHJ$YxCc!_dmZUx}&0H)0nPxwUAT#`5WYhY=`r$=wR!X8$OZrMZl-g&gNz( z)%~nRQj)i0H%&~Fw;dR0ym-6%FQGABT?M@I<Izu$L_Dg5swEm4*fmM;yw?!lE?t^eBa(y-)nEyepEKZQI2$%h6gS(Dzo5!UmlDu2rJJJFktXGX?>*R+a-rR^Pu%Xu(@ItY2-nP5VbRo~rc zf?o-2uY-y&2^>$21PtgLGsoi(3R3k>E^s+krrp!=Zo04V%}LC2!QBB!x=>BplIXHS zz)C%Qq!0qCOW8+PaezreO-n1b>L%%pJuKAIr%$OiETk2aP?*^iK^1N7y;v^Q2%P1Q@&R-D73H;GybqtIX6vhCzN zUOxFOBwYSfFivyXEQuDm#uc*jc>SY8pl0+xQBQ{+iABobt_MhSF>%VGodMBN7g49Z zD#_yr@np^ki7SI4qd|{$iZ>fSxHjN)Rn02-2M%(w*QdLUd)T(XHQ5C+QWR zl@J5*@g$wm<-D4j8Vd`{$A^1nHX7^F&F?7gQgSb^vlo} zTHf^ zdBfD*P~TRaVU<9AhD5Zz6DryQV|;z($-)Y{XdYLxOR6xUqpN5WoEKmddi{wiiLHJRY zP16=QD^#s%|D=*6edT|?MjUBiPv`=5;FPC!*E_tqzG%?->(?cwN6&#9Qfo_xVKM(N!iT_5xImdro&J$|{>wNA1yN};bFq!Oc>M^Xj Rq(odMb4cOf8&E`~LFp8bE@_aK1_6<7Bt*Jn zvx#qRJkNQ~dA>2e@%?yzypG|3!Cq_aweEY~b6)ehu6ubulM=spp7cBdfw=hik;rod z;&eI!af;#W3HZyEA6~-ngU&|ug^jkkg`vcID;>nDJP*wTI+ue5flteXJ z&QdPOeI3R3cpT8;@FGt%f+gXiSs+WGfILe)ZcpH2nZrTN-#-H^f;-(l|Nihwqdz|3 z7eZo;P`LNnkX8ky#|RhGYmUndNAxMb8O{_2k=FW-c%GkC?UeofoZaywwc^)1O-*n4 z%zqNr*4&=xC~uW;H!gFMPzqV7x!QOZhyIE>u^E@W+jPOg*It(Pr{SOFZB7<#aLXFJ zHVm{>Kp6DAX4MmR!r#sHJy&sg?ZDT-J-D#*Hmx+r>lZg8Ctfh0`pMu{|9N-yzHoqt z{HYHg**?Aha#ttW3dh(x<^0k~+mM(}{aTN}@Z07N_ zQi37tkT@=N7GGxd_R`B0tE60xr#`5j*|fq7TV<|gn?I@&a+u$at@Y|^BlG7IsB)tc z>V(DESA2GE3D4T(ZNL4|-+OucTH+=V(|ARtgY(v}ra4r1Ese{YHhT;7Q%74F^@_G! zg+Tp=&62IlZ-XU(51;^tK@21G)5 zf#jM{#N#_UN7@oR2!uQ0vB(2C2ld5KTo*a{v0b;QCr)Qg&sHi1yI*^tN=S&4?0e(+ zrlW|%{?CksCwb32?_b`mzQ-|{bAygL@!X9^S6|(}N1H%{L-a&g@@?YBYe)LlVddf0 zE3p#qAG2K-Tp^ z7f7#|^DTy_;Gq}(mRWzkZd+H9cxp;Y1_=W3tPp$CBPYrTURyXekBHmdOcmlZ8yQHE zB%@wP=r6UL;<5gf7LJ2>_U0c48k7Yt3O{vYP%cc6X@i67lTz zKaTuujU4n0I^6x~g^ZDKc|=G^Xha05_uTPAO%#^}jtXV3uB{zz?P*RuMEg?kRWr(G z^e|UbvI)x71Th|MV_2siiXs&}EYVJv@r7GZI_38cESH9gzJGtlQTg-P!QR%+%6Km4 zDa8Gs*cq|F!?!{%^tp{C)E&6G3ch-EDBx@-=s=`=w%nb9}+8m0qQ{SI=DlAmvGK75;Y5k_G5__S@1ELz0b`zE9=5lG5f48+&zvsvs${Hu1}8-3^mQ?pPsomPkRf~r?>I}tyyHOxU;oI$(fZ~yjGwwcJvib z8u#?+GpBaF*E9R^4Yo)a{7eQucZ}MPQy0%{2*%tRBI)-rLhae`e25E*Eb4E|tVsJc zHN~jaG4gniQMYlBA~7mHUb?~ORg&1nP!&}rO{GoMu8`7V+l614Z8sV(O%shw^C(Qj z`q;QgLH;N0rtdo`RSC%i66M5}^ERrp9R!Zf&P4`YzmWxZ47MZ{YX-Crf9dv?&b