diff --git a/src/main.d b/src/main.d index c3250047e..311b53331 100644 --- a/src/main.d +++ b/src/main.d @@ -66,6 +66,7 @@ int main(string[] cliArgs) { // DEVELOPER OPTIONS OUTPUT VARIABLES bool displayMemoryUsage = false; bool displaySyncOptions = false; + bool monitorFailures = false; // Define 'exit' and 'failure' scopes scope(exit) { @@ -606,6 +607,7 @@ int main(string[] cliArgs) { } // Configure the monitor class + Tid workerTid; filesystemMonitor = new Monitor(appConfig, selectiveSync); // Delegated function for when inotify detects a new local directory has been created @@ -685,6 +687,7 @@ int main(string[] cliArgs) { try { log.log("Initialising filesystem inotify monitoring ..."); filesystemMonitor.initialise(); + workerTid = filesystemMonitor.watch(); log.log("Performing initial syncronisation to ensure consistent local state ..."); } catch (MonitorException e) { // monitor class initialisation failed @@ -709,6 +712,7 @@ int main(string[] cliArgs) { MonoTime lastGitHubCheckTime = MonoTime.currTime(); string loopStartOutputMessage = "################################################## NEW LOOP ##################################################"; string loopStopOutputMessage = "################################################ LOOP COMPLETE ###############################################"; + bool notificationReceived = false; while (performMonitor) { // Do we need to validate the runtimeSyncDirectory to check for the presence of a '.nosync' file - the disk may have been ejected .. @@ -726,40 +730,19 @@ int main(string[] cliArgs) { } // Webhook Notification Handling - bool notificationReceived = false; // Check for notifications pushed from Microsoft to the webhook if (webhookEnabled) { // Create a subscription on the first run, or renew the subscription // on subsequent runs when it is about to expire. oneDriveApiInstance.createOrRenewSubscription(); - - // Process incoming notifications if any. - - // Empirical evidence shows that Microsoft often sends multiple - // notifications for one single change, so we need a loop to exhaust - // all signals that were queued up by the webhook. The notifications - // do not contain any actual changes, and we will always rely do the - // delta endpoint to sync to latest. Therefore, only one sync run is - // good enough to catch up for multiple notifications. - for (int signalCount = 0;; signalCount++) { - const auto signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); - if (signalExists) { - notificationReceived = true; - } else { - if (notificationReceived) { - log.log("Received ", signalCount," refresh signals from the webhook"); - } - break; - } - } } // Get the current time this loop is starting auto currentTime = MonoTime.currTime(); // Do we perform a sync with OneDrive? - if (notificationReceived || (currentTime - lastCheckTime > checkOnlineInterval) || (monitorLoopFullCount == 0)) { + if ((currentTime - lastCheckTime >= checkOnlineInterval) || (monitorLoopFullCount == 0)) { // Increment relevant counters monitorLoopFullCount++; fullScanFrequencyLoopCount++; @@ -837,8 +820,8 @@ int main(string[] cliArgs) { performStandardSyncProcess(localPath, filesystemMonitor); } - // Discard any inotify events generated as part of any sync operation - filesystemMonitor.update(false); + // Handle any new inotify events + filesystemMonitor.update(true); // Detail the outcome of the sync process displaySyncOutcome(); @@ -881,8 +864,77 @@ int main(string[] cliArgs) { } } } - // Sleep the monitor thread for 1 second, loop around and pick up any inotify changes - Thread.sleep(dur!"seconds"(1)); + + if(performMonitor) { + auto nextCheckTime = lastCheckTime + checkOnlineInterval; + currentTime = MonoTime.currTime(); + auto sleepTime = nextCheckTime - currentTime; + log.vdebug("Sleep for ", sleepTime); + + if(filesystemMonitor.initialised || webhookEnabled) { + if(filesystemMonitor.initialised) { + // If local monitor is on + // start the worker and wait for event + if(!filesystemMonitor.isWorking()) { + workerTid.send(1); + } + } + + if(webhookEnabled) { + // if onedrive webhook is enabled + // update sleep time based on renew interval + Duration nextWebhookCheckDuration = oneDriveApiInstance.getNextExpirationCheckDuration(); + if (nextWebhookCheckDuration < sleepTime) { + sleepTime = nextWebhookCheckDuration; + log.vdebug("Update sleeping time to ", sleepTime); + } + notificationReceived = false; + } + + int res = 1; + // Process incoming notifications if any. + auto signalExists = receiveTimeout(sleepTime, + (int msg) { + res = msg; + }, + (ulong _) { + notificationReceived = true; + } + ); + log.vdebug("signalExists = ", signalExists); + log.vdebug("worker status = ", res); + log.vdebug("notificationReceived = ", notificationReceived); + + // Empirical evidence shows that Microsoft often sends multiple + // notifications for one single change, so we need a loop to exhaust + // all signals that were queued up by the webhook. The notifications + // do not contain any actual changes, and we will always rely do the + // delta endpoint to sync to latest. Therefore, only one sync run is + // good enough to catch up for multiple notifications. + int signalCount = notificationReceived ? 1 : 0; + for (;; signalCount++) { + signalExists = receiveTimeout(dur!"seconds"(-1), (ulong _) {}); + if (signalExists) { + notificationReceived = true; + } else { + if (notificationReceived) { + log.log("Received ", signalCount," refresh signals from the webhook"); + oneDriveWebhookCallback(); + } + break; + } + } + + if(res == -1) { + log.error("Error: Monitor worker failed."); + monitorFailures = true; + performMonitor = false; + } + } else { + // no hooks available, nothing to check + Thread.sleep(sleepTime); + } + } } } } else { @@ -893,7 +945,7 @@ int main(string[] cliArgs) { } // Exit application using exit scope - if (!syncEngineInstance.syncFailures) { + if (!syncEngineInstance.syncFailures && !monitorFailures) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; @@ -907,7 +959,6 @@ void performStandardExitProcess() { itemDB.performVacuum(); object.destroy(itemDB); } - // Free other objects and memory if (appConfig !is null) { // Cleanup any existing dry-run elements ... these should never be left hanging around @@ -917,13 +968,33 @@ void performStandardExitProcess() { if (oneDriveApiInstance !is null) object.destroy(oneDriveApiInstance); if (selectiveSync !is null) object.destroy(selectiveSync); if (syncEngineInstance !is null) object.destroy(syncEngineInstance); - - if (filesystemMonitor !is null) { + // cleanup hooks + if (filesystemMonitor !is null && filesystemMonitor.initialised) { filesystemMonitor.shutdown(); object.destroy(filesystemMonitor); } } +void oneDriveWebhookCallback() { + // If we are in a --download-only method of operation, there is no filesystem monitoring, so no inotify events to check + if (!appConfig.getValueBool("download_only")) { + try { + // Process any inotify events + filesystemMonitor.update(true); + } catch (MonitorException e) { + // Catch any exceptions thrown by inotify / monitor engine + log.error("ERROR: The following inotify error was generated: ", e.msg); + } + } + + // Download data from OneDrive last + syncEngineInstance.syncOneDriveAccountToLocalDisk(); + if (appConfig.getValueBool("monitor")) { + // Handle any new inotify events + filesystemMonitor.update(true); + } +} + void performUploadOnlySyncProcess(string localPath, Monitor filesystemMonitor = null) { // Perform the local database consistency check, picking up locally modified data and uploading this to OneDrive syncEngineInstance.performDatabaseConsistencyAndIntegrityCheck(); @@ -973,16 +1044,16 @@ void performStandardSyncProcess(string localPath, Monitor filesystemMonitor = nu // Download data from OneDrive last syncEngineInstance.syncOneDriveAccountToLocalDisk(); if (appConfig.getValueBool("monitor")) { - // Cancel out any inotify events from downloading data - filesystemMonitor.update(false); + // Handle any new inotify events + filesystemMonitor.update(true); } } else { // Normal sync // Download data from OneDrive first syncEngineInstance.syncOneDriveAccountToLocalDisk(); if (appConfig.getValueBool("monitor")) { - // Cancel out any inotify events from downloading data - filesystemMonitor.update(false); + // Handle any new inotify events + filesystemMonitor.update(true); } diff --git a/src/monitor.d b/src/monitor.d index adad5be54..38cd86d99 100644 --- a/src/monitor.d +++ b/src/monitor.d @@ -7,7 +7,10 @@ import core.stdc.stdlib; import core.sys.linux.sys.inotify; import core.sys.posix.poll; import core.sys.posix.unistd; +import core.sys.posix.sys.select; +import core.time; import std.algorithm; +import std.concurrency; import std.exception; import std.file; import std.path; @@ -31,6 +34,118 @@ class MonitorException: ErrnoException { } } +shared class MonitorBackgroundWorker { + // inotify file descriptor + int fd; + private bool working; + + void initialise() { + fd = inotify_init(); + working = false; + if (fd < 0) throw new MonitorException("inotify_init failed"); + } + + // Add this path to be monitored + private int add(string pathname) { + int wd = inotify_add_watch(fd, toStringz(pathname), mask); + if (wd < 0) { + if (errno() == ENOSPC) { + // Get the current value + ulong maxInotifyWatches = to!int(strip(readText("/proc/sys/fs/inotify/max_user_watches"))); + log.log("The user limit on the total number of inotify watches has been reached."); + log.log("Your current limit of inotify watches is: ", maxInotifyWatches); + log.log("It is recommended that you change the max number of inotify watches to at least double your existing value."); + log.log("To change the current max number of watches to " , (maxInotifyWatches * 2) , " run:"); + log.log("EXAMPLE: sudo sysctl fs.inotify.max_user_watches=", (maxInotifyWatches * 2)); + } + if (errno() == 13) { + log.vlog("WARNING: inotify_add_watch failed - permission denied: ", pathname); + } + // Flag any other errors + log.error("ERROR: inotify_add_watch failed: ", pathname); + return wd; + } + + // Add path to inotify watch - required regardless if a '.folder' or 'folder' + log.vdebug("inotify_add_watch successfully added for: ", pathname); + + // Do we log that we are monitoring this directory? + if (isDir(pathname)) { + // Log that this is directory is being monitored + log.vlog("Monitoring directory: ", pathname); + } + return wd; + } + + int remove(int wd) { + return inotify_rm_watch(fd, wd); + } + + bool isWorking() { + return working; + } + + void watch(Tid callerTid) { + // On failure, send -1 to caller + int res; + + // wait for the caller to be ready + int isAlive = receiveOnly!int(); + + while (isAlive) { + fd_set fds; + FD_ZERO (&fds); + FD_SET(fd, &fds); + + working = true; + res = select(FD_SETSIZE, &fds, null, null, null); + + if(res == -1) { + if(errno() == EINTR) { + // Received an interrupt signal but no events are available + // try update work staus and directly watch again + receiveTimeout(dur!"seconds"(1), (int msg) { + isAlive = msg; + }); + } else { + // Error occurred, tell caller to terminate. + callCaller(callerTid, -1); + working = false; + break; + } + } else { + // Wake up caller + callCaller(callerTid, 1); + // Wait for the caller to be ready + isAlive = receiveOnly!int(); + } + } + } + + void callCaller(Tid callerTid, int msg) { + working = false; + callerTid.send(msg); + } + + void shutdown() { + if (fd > 0) { + close(fd); + fd = 0; + } + } +} + + +void startMonitorJob(shared(MonitorBackgroundWorker) worker, Tid callerTid) +{ + try { + worker.watch(callerTid); + } catch (OwnerTerminated error) { + // caller is terminated + } + worker.shutdown(); +} + final class Monitor { // Class variables ApplicationConfig appConfig; @@ -42,17 +157,18 @@ final class Monitor { bool skip_symlinks = false; // check for .nosync if enabled bool check_nosync = false; + // check if initialised + bool initialised = false; // Configure Private Class Variables - // inotify file descriptor - private int fd; + shared(MonitorBackgroundWorker) worker; // map every inotify watch descriptor to its directory private string[int] wdToDirName; // map the inotify cookies of move_from events to their path private string[int] cookieToPath; // buffer to receive the inotify events private void[] buffer; - + // Configure function delegates void delegate(string path) onDirCreated; void delegate(string path) onFileChanged; @@ -75,10 +191,10 @@ final class Monitor { } assert(onDirCreated && onFileChanged && onDelete && onMove); - fd = inotify_init(); - if (fd < 0) throw new MonitorException("inotify_init failed"); if (!buffer) buffer = new void[4096]; - + worker = new shared(MonitorBackgroundWorker); + worker.initialise(); + // from which point do we start watching for changes? string monitorPath; if (appConfig.getValueString("single_directory") != ""){ @@ -93,7 +209,9 @@ final class Monitor { // Shutdown the monitor class void shutdown() { - if (fd > 0) close(fd); + if(!initialised) + return; + worker.shutdown(); wdToDirName = null; } @@ -151,11 +269,23 @@ final class Monitor { return; } } + + if (isDir(dirname)) { + // This is a directory + // is the path exluded if skip_dotfiles configured and path is a .folder? + if ((selectiveSync.getSkipDotfiles()) && (isDotFile(dirname))) { + // dont add a watch for this directory + return; + } + } // passed all potential exclusions // add inotify watch for this path / directory / file log.vdebug("Calling add() for this dirname: ", dirname); - add(dirname); + int wd = worker.add(dirname); + if (wd > 0) { + wdToDirName[wd] = buildNormalizedPath(dirname) ~ "/"; + } // if this is a directory, recursivly add this path if (isDir(dirname)) { @@ -193,54 +323,10 @@ final class Monitor { } } - // Add this path to be monitored - private void add(string pathname) { - int wd = inotify_add_watch(fd, toStringz(pathname), mask); - if (wd < 0) { - if (errno() == ENOSPC) { - // Get the current value - ulong maxInotifyWatches = to!int(strip(readText("/proc/sys/fs/inotify/max_user_watches"))); - log.log("The user limit on the total number of inotify watches has been reached."); - log.log("Your current limit of inotify watches is: ", maxInotifyWatches); - log.log("It is recommended that you change the max number of inotify watches to at least double your existing value."); - log.log("To change the current max number of watches to " , (maxInotifyWatches * 2) , " run:"); - log.log("EXAMPLE: sudo sysctl fs.inotify.max_user_watches=", (maxInotifyWatches * 2)); - } - if (errno() == 13) { - if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) { - // no misleading output that we could not add a watch due to permission denied - return; - } else { - log.vlog("WARNING: inotify_add_watch failed - permission denied: ", pathname); - return; - } - } - // Flag any other errors - log.error("ERROR: inotify_add_watch failed: ", pathname); - return; - } - - // Add path to inotify watch - required regardless if a '.folder' or 'folder' - wdToDirName[wd] = buildNormalizedPath(pathname) ~ "/"; - log.vdebug("inotify_add_watch successfully added for: ", pathname); - - // Do we log that we are monitoring this directory? - if (isDir(pathname)) { - // This is a directory - // is the path exluded if skip_dotfiles configured and path is a .folder? - if ((selectiveSync.getSkipDotfiles()) && (isDotFile(pathname))) { - // no misleading output that we are monitoring this directory - return; - } - // Log that this is directory is being monitored - log.vlog("Monitoring directory: ", pathname); - } - } - // Remove a watch descriptor private void remove(int wd) { assert(wd in wdToDirName); - int ret = inotify_rm_watch(fd, wd); + int ret = worker.remove(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); log.vlog("Monitored directory removed: ", wdToDirName[wd]); wdToDirName.remove(wd); @@ -251,7 +337,7 @@ final class Monitor { path ~= "/"; foreach (wd, dirname; wdToDirName) { if (dirname.startsWith(path)) { - int ret = inotify_rm_watch(fd, wd); + int ret = worker.remove(wd); if (ret < 0) throw new MonitorException("inotify_rm_watch failed"); wdToDirName.remove(wd); log.vlog("Monitored directory removed: ", dirname); @@ -267,11 +353,17 @@ final class Monitor { return path; } + shared(MonitorBackgroundWorker) getWorker() { + return worker; + } + // Update void update(bool useCallbacks = true) { + if(!initialised) + return; pollfd fds = { - fd: fd, + fd: worker.fd, events: POLLIN }; @@ -280,7 +372,7 @@ final class Monitor { if (ret == -1) throw new MonitorException("poll failed"); else if (ret == 0) break; // no events available - size_t length = read(fd, buffer.ptr, buffer.length); + size_t length = read(worker.fd, buffer.ptr, buffer.length); if (length == -1) throw new MonitorException("read failed"); int i = 0; @@ -410,4 +502,13 @@ final class Monitor { log.vdebug("inotify events flushed"); } } + + Tid watch() { + initialised = true; + return spawn(&startMonitorJob, worker, thisTid); + } + + bool isWorking() { + return worker.isWorking(); + } } diff --git a/src/onedrive.d b/src/onedrive.d index d18ed49b4..3e60d9ac0 100644 --- a/src/onedrive.d +++ b/src/onedrive.d @@ -846,6 +846,21 @@ class OneDriveApi { void resetRetryAfterValue() { retryAfterValue = 0; } + + // Return the duration to next subscriptionExpiration check + Duration getNextExpirationCheckDuration() { + SysTime now = Clock.currTime(UTC()); + if (hasValidSubscription()) { + Duration elapsed = Clock.currTime(UTC()) - subscriptionLastErrorAt; + // Check if we are waiting for the next retry + if (elapsed < subscriptionRetryInterval) + return subscriptionRetryInterval - elapsed; + else + return subscriptionExpiration - now - subscriptionRenewalInterval; + } + else + return subscriptionRetryInterval; + } // Create a new subscription or renew the existing subscription void createOrRenewSubscription() { diff --git a/src/sync.d b/src/sync.d index d52a18ca0..335283096 100644 --- a/src/sync.d +++ b/src/sync.d @@ -1642,10 +1642,6 @@ class SyncEngine { localModifiedTime.fracSecs = Duration.zero; itemModifiedTime.fracSecs = Duration.zero; - // If we need to rename the file, what do we rename it to? - auto ext = extension(newItemPath); - auto renamedNewItemPath = newItemPath.chomp(ext) ~ "-" ~ deviceName ~ ext; - // Is the local modified time greater than that from OneDrive? if (localModifiedTime > itemModifiedTime) { // Local file is newer than item on OneDrive based on file modified time @@ -1674,11 +1670,10 @@ class SyncEngine { log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", newItemPath); } else { // local data protection is configured, renaming local file - log.log("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent local data loss: ", newItemPath, " -> ", renamedNewItemPath); // perform the rename action of the local file if (!dryRun) { // Perform the local rename of the existing local file - safeRename(newItemPath, renamedNewItemPath, dryRun); + safeBackup(newItemPath, dryRun); } else { // Expectation here is that there is a new file locally (renamedNewItemPath) however as we don't create this, the "new file" will not be uploaded as it does not exist log.vdebug("DRY-RUN: Skipping local file rename"); @@ -1697,11 +1692,10 @@ class SyncEngine { log.vlog("WARNING: Local Data Protection has been disabled. You may experience data loss on this file: ", newItemPath); } else { // local data protection is configured, renaming local file - log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", newItemPath, " -> ", renamedNewItemPath); // perform the rename action of the local file if (!dryRun) { // Perform the local rename of the existing local file - safeRename(newItemPath, renamedNewItemPath, dryRun); + safeBackup(newItemPath, dryRun); } else { // Expectation here is that there is a new file locally (renamedNewItemPath) however as we don't create this, the "new file" will not be uploaded as it does not exist log.vdebug("DRY-RUN: Skipping local file rename"); @@ -1988,12 +1982,8 @@ class SyncEngine { // do the rename if we are not in a --dry-run scenario if (!dryRun) { - // If we need to rename the file, what do we rename it to? - auto ext = extension(newItemPath); - auto renamedNewItemPath = newItemPath.chomp(ext) ~ "-" ~ deviceName ~ ext; - // Perform the local rename of the existing local file - safeRename(newItemPath, renamedNewItemPath, dryRun); + safeBackup(newItemPath, dryRun); } } } @@ -3371,105 +3361,112 @@ class SyncEngine { // This is because the Item[] has no other functions to allow is to parallel process those elements, so we have to use a string array as input to this function Item dbItem; itemDB.selectById(changedItemParentId, changedItemId, dbItem); - - // Query the available space online - // This will update appConfig.quotaAvailable & appConfig.quotaRestricted values - remainingFreeSpace = getRemainingFreeSpace(dbItem.driveId); - - // Get the file size - ulong thisFileSizeLocal = getSize(localFilePath); - ulong thisFileSizeFromDB = to!ulong(dbItem.size); - - // remainingFreeSpace online includes the current file online - // we need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value - ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal; - - // Based on what we know, for this thread - can we safely upload this modified local file? - log.vdebug("This Thread Current Free Space Online: ", remainingFreeSpace); - log.vdebug("This Thread Calculated Free Space Online Post Upload: ", calculatedSpaceOnlinePostUpload); - - JSONValue uploadResponse; + + // before uploading, check if it is really necessary + string itemSource = "database"; + if (isItemSynced(dbItem, localFilePath, itemSource)) { + log.vdebug("Skipped due to item is already in-sync: ", localFilePath); + } else { - bool spaceAvailableOnline = false; - // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated - // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true - // If 'business' accounts, if driveId == defaultDriveId, then we will have data - // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true + // Query the available space online + // This will update appConfig.quotaAvailable & appConfig.quotaRestricted values + remainingFreeSpace = getRemainingFreeSpace(dbItem.driveId); + + // Get the file size + ulong thisFileSizeLocal = getSize(localFilePath); + ulong thisFileSizeFromDB = to!ulong(dbItem.size); + + // remainingFreeSpace online includes the current file online + // we need to remove the online file (add back the existing file size) then take away the new local file size to get a new approximate value + ulong calculatedSpaceOnlinePostUpload = (remainingFreeSpace + thisFileSizeFromDB) - thisFileSizeLocal; + + // Based on what we know, for this thread - can we safely upload this modified local file? + log.vdebug("This Thread Current Free Space Online: ", remainingFreeSpace); + log.vdebug("This Thread Calculated Free Space Online Post Upload: ", calculatedSpaceOnlinePostUpload); - // What was the latest getRemainingFreeSpace() value? - if (appConfig.quotaAvailable) { - // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? - if (calculatedSpaceOnlinePostUpload > 0) { - // Based on this thread action, we beleive that there is space available online to upload - proceed + JSONValue uploadResponse; + + bool spaceAvailableOnline = false; + // If 'personal' accounts, if driveId == defaultDriveId, then we will have data - appConfig.quotaAvailable will be updated + // If 'personal' accounts, if driveId != defaultDriveId, then we will not have quota data - appConfig.quotaRestricted will be set as true + // If 'business' accounts, if driveId == defaultDriveId, then we will have data + // If 'business' accounts, if driveId != defaultDriveId, then we will have data, but it will be a 0 value - appConfig.quotaRestricted will be set as true + + // What was the latest getRemainingFreeSpace() value? + if (appConfig.quotaAvailable) { + // Our query told us we have free space online .. if we upload this file, will we exceed space online - thus upload will fail during upload? + if (calculatedSpaceOnlinePostUpload > 0) { + // Based on this thread action, we beleive that there is space available online to upload - proceed + spaceAvailableOnline = true; + } + } + // Is quota being restricted? + if (appConfig.quotaRestricted) { + // Space available online is being restricted - so we have no way to really know if there is space available online spaceAvailableOnline = true; } - } - // Is quota being restricted? - if (appConfig.quotaRestricted) { - // Space available online is being restricted - so we have no way to really know if there is space available online - spaceAvailableOnline = true; - } - - // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) - if (spaceAvailableOnline) { - // Does this file exceed the maximum file size to upload to OneDrive? - if (thisFileSizeLocal <= maxUploadFileSize) { - // Attempt to upload the modified file - // Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result - uploadResponse = performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); - // Evaluate the returned JSON uploadResponse - // If there was an error uploading the file, uploadResponse should be empty and invalid - if (uploadResponse.type() != JSONType.object){ + // Do we have space available or is space available being restricted (so we make the blind assumption that there is space available) + if (spaceAvailableOnline) { + // Does this file exceed the maximum file size to upload to OneDrive? + if (thisFileSizeLocal <= maxUploadFileSize) { + // Attempt to upload the modified file + // Error handling is in performModifiedFileUpload(), and the JSON that is responded with - will either be null or a valid JSON object containing the upload result + uploadResponse = performModifiedFileUpload(dbItem, localFilePath, thisFileSizeLocal); + + // Evaluate the returned JSON uploadResponse + // If there was an error uploading the file, uploadResponse should be empty and invalid + if (uploadResponse.type() != JSONType.object){ + uploadFailed = true; + skippedExceptionError = true; + } + + } else { + // Skip file - too large uploadFailed = true; - skippedExceptionError = true; + skippedMaxSize = true; } - } else { - // Skip file - too large + // Cant upload this file - no space available uploadFailed = true; - skippedMaxSize = true; } - } else { - // Cant upload this file - no space available - uploadFailed = true; - } - - // Did the upload fail? - if (uploadFailed) { - // Upload failed .. why? - // No space available online - if (!spaceAvailableOnline) { - log.logAndNotify("Skipping uploading modified file ", localFilePath, " due to insufficient free space available on OneDrive"); - } - // File exceeds max allowed size - if (skippedMaxSize) { - log.logAndNotify("Skipping uploading this modified file as it exceeds the maximum size allowed by OneDrive: ", localFilePath); - } - // Generic message - if (skippedExceptionError) { - // normal failure message if API or exception error generated - log.logAndNotify("Uploading modified file ", localFilePath, " ... failed!"); - } - } else { - // Upload was successful - log.logAndNotify("Uploading modified file ", localFilePath, " ... done."); - - // Save JSON item in database - saveItem(uploadResponse); - if (!dryRun) { - // Check the integrity of the uploaded modified file - performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal); + // Did the upload fail? + if (uploadFailed) { + // Upload failed .. why? + // No space available online + if (!spaceAvailableOnline) { + log.logAndNotify("Skipping uploading modified file ", localFilePath, " due to insufficient free space available on OneDrive"); + } + // File exceeds max allowed size + if (skippedMaxSize) { + log.logAndNotify("Skipping uploading this modified file as it exceeds the maximum size allowed by OneDrive: ", localFilePath); + } + // Generic message + if (skippedExceptionError) { + // normal failure message if API or exception error generated + log.logAndNotify("Uploading modified file ", localFilePath, " ... failed!"); + } + } else { + // Upload was successful + log.logAndNotify("Uploading modified file ", localFilePath, " ... done."); - // Update the date / time of the file online to match the local item - // Get the local file last modified time - SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); - localModifiedTime.fracSecs = Duration.zero; - // Get the latest eTag, and use that - string etagFromUploadResponse = uploadResponse["eTag"].str; - // Attempt to update the online date time stamp based on our local data - uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse); + // Save JSON item in database + saveItem(uploadResponse); + + if (!dryRun) { + // Check the integrity of the uploaded modified file + performUploadIntegrityValidationChecks(uploadResponse, localFilePath, thisFileSizeLocal); + + // Update the date / time of the file online to match the local item + // Get the local file last modified time + SysTime localModifiedTime = timeLastModified(localFilePath).toUTC(); + localModifiedTime.fracSecs = Duration.zero; + // Get the latest eTag, and use that + string etagFromUploadResponse = uploadResponse["eTag"].str; + // Attempt to update the online date time stamp based on our local data + uploadLastModifiedTime(dbItem.driveId, dbItem.id, localModifiedTime, etagFromUploadResponse); + } } } diff --git a/src/util.d b/src/util.d index dfd348f85..4b0790032 100644 --- a/src/util.d +++ b/src/util.d @@ -53,7 +53,7 @@ void safeBackup(const(char)[] path, bool dryRun) { // Perform the backup log.vlog("The local item is out-of-sync with OneDrive, renaming to preserve existing file and prevent data loss: ", path, " -> ", newPath); if (!dryRun) { - rename(path, newPath); + std.file.copy(path, newPath); } else { log.vdebug("DRY-RUN: Skipping local file backup"); }