From a3f4d63dd12a71823ca5a67fe2c066a6c22ace79 Mon Sep 17 00:00:00 2001 From: Joe Reuss Date: Mon, 6 May 2024 10:57:00 -0500 Subject: [PATCH 1/2] Remove the need for =true for ?recursive This removes the need for a value to the ?recursive query parameter, we just check that it exists. Added a deprecation warning if a user utilizes a value and informs the user the value will be ignored. In addition, added the ability to use the ?recursive parameter outside of the plugin to avoid any confusion. --- client/fed_linux_test.go | 447 +++++++++++++++++++++++++++++++++++++++ client/main.go | 21 +- cmd/plugin.go | 4 +- utils/utils.go | 17 +- utils/utils_test.go | 27 +-- 5 files changed, 483 insertions(+), 33 deletions(-) diff --git a/client/fed_linux_test.go b/client/fed_linux_test.go index 33019f5dd..e862ef7c6 100644 --- a/client/fed_linux_test.go +++ b/client/fed_linux_test.go @@ -391,6 +391,453 @@ func TestRecursiveUploadsAndDownloads(t *testing.T) { } // Throw in a viper.Reset for good measure. Keeps our env squeaky clean! viper.Reset() + server_utils.ResetOriginExports() + }) +} + +// Test that recursive uploads and downloads work with the ?recursive query +func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { + // Create instance of test federation + viper.Reset() + server_utils.ResetOriginExports() + + fed := fed_test_utils.NewFedTest(t, mixedAuthOriginCfg) + + te := client.NewTransferEngine(fed.Ctx) + + // Create a token file + issuer, err := config.GetServerIssuerURL() + require.NoError(t, err) + + tokenConfig := token.NewWLCGToken() + tokenConfig.Lifetime = time.Minute + tokenConfig.Issuer = issuer + tokenConfig.Subject = "origin" + tokenConfig.AddAudienceAny() + tokenConfig.AddResourceScopes(token_scopes.NewResourceScope(token_scopes.Storage_Read, "/"), + token_scopes.NewResourceScope(token_scopes.Storage_Modify, "/")) + token, err := tokenConfig.CreateToken() + assert.NoError(t, err) + tempToken, err := os.CreateTemp(t.TempDir(), "token") + assert.NoError(t, err, "Error creating temp token file") + defer os.Remove(tempToken.Name()) + _, err = tempToken.WriteString(token) + assert.NoError(t, err, "Error writing to temp token file") + tempToken.Close() + + // Disable progress bars to not reuse the same mpb instance + viper.Set("Logging.DisableProgressBars", true) + + // Make our test directories and files + tempDir, err := os.MkdirTemp("", "UploadDir") + assert.NoError(t, err) + innerTempDir, err := os.MkdirTemp(tempDir, "InnerUploadDir") + assert.NoError(t, err) + defer os.RemoveAll(tempDir) + defer os.RemoveAll(tempDir) + permissions := os.FileMode(0755) + err = os.Chmod(tempDir, permissions) + require.NoError(t, err) + err = os.Chmod(innerTempDir, permissions) + require.NoError(t, err) + + testFileContent1 := "test file content" + testFileContent2 := "more test file content!" + innerTestFileContent := "this content is within another dir!" + tempFile1, err := os.CreateTemp(tempDir, "test1") + assert.NoError(t, err, "Error creating temp1 file") + tempFile2, err := os.CreateTemp(tempDir, "test1") + assert.NoError(t, err, "Error creating temp2 file") + innerTempFile, err := os.CreateTemp(innerTempDir, "testInner") + assert.NoError(t, err, "Error creating inner test file") + defer os.Remove(tempFile1.Name()) + defer os.Remove(tempFile2.Name()) + defer os.Remove(innerTempFile.Name()) + + _, err = tempFile1.WriteString(testFileContent1) + assert.NoError(t, err, "Error writing to temp1 file") + tempFile1.Close() + _, err = tempFile2.WriteString(testFileContent2) + assert.NoError(t, err, "Error writing to temp2 file") + tempFile2.Close() + _, err = innerTempFile.WriteString(innerTestFileContent) + assert.NoError(t, err, "Error writing to inner test file") + innerTempFile.Close() + + // Test we work with just the query + t.Run("testRecursiveGetAndPutWithQuery", func(t *testing.T) { + oldPref, err := config.SetPreferredPrefix(config.PelicanPrefix) + assert.NoError(t, err) + defer func() { + _, err := config.SetPreferredPrefix(oldPref) + require.NoError(t, err) + }() + + for _, export := range fed.Exports { + // Set path for object to upload/download + tempPath := tempDir + dirName := filepath.Base(tempPath) + uploadURL := fmt.Sprintf("pelican://%s:%s%s/%s/%s?recursive", param.Server_Hostname.GetString(), strconv.Itoa(param.Server_WebPort.GetInt()), + export.FederationPrefix, "osdf_osdf", dirName) + + // Upload the file with PUT + transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, false, client.WithTokenLocation(tempToken.Name())) + assert.NoError(t, err) + if err == nil && len(transferDetailsUpload) == 3 { + countBytes17 := 0 + countBytes23 := 0 + countBytes35 := 0 + // Verify we got the correct files back (have to do this since files upload in different orders at times) + for _, transfer := range transferDetailsUpload { + transferredBytes := transfer.TransferredBytes + switch transferredBytes { + case int64(17): + countBytes17++ + continue + case int64(23): + countBytes23++ + continue + case int64(35): + countBytes35++ + continue + default: + // We got a byte amount we are not expecting + t.Fatal("did not upload proper amount of bytes") + } + } + if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { + // We would hit this case if 1 counter got hit twice for some reason + t.Fatal("One of the files was not uploaded correctly") + } + } else if len(transferDetailsUpload) != 3 { + t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) + } + + // Download the files we just uploaded + var transferDetailsDownload []client.TransferResults + if export.Capabilities.PublicReads { + transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false) + } else { + transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false, client.WithTokenLocation(tempToken.Name())) + } + assert.NoError(t, err) + if err == nil && len(transferDetailsDownload) == 3 { + countBytesDownloadIdx0 := 0 + countBytesDownloadIdx1 := 0 + countBytesDownloadIdx2 := 0 + + // Verify we got the correct files back (have to do this since files upload in different orders at times) + // In this case, we want to match them to the sizes of the uploaded files + for _, transfer := range transferDetailsDownload { + transferredBytes := transfer.TransferredBytes + switch transferredBytes { + case transferDetailsDownload[0].TransferredBytes: + countBytesDownloadIdx0++ + continue + case transferDetailsDownload[1].TransferredBytes: + countBytesDownloadIdx1++ + continue + case transferDetailsDownload[2].TransferredBytes: + countBytesDownloadIdx2++ + continue + default: + // We got a byte amount we are not expecting + t.Fatal("did not download proper amount of bytes") + } + } + if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { + // We would hit this case if 1 counter got hit twice for some reason + t.Fatal("One of the files was not downloaded correctly") + } else if len(transferDetailsDownload) != 3 { + t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) + } + } + } + }) + + // Test we work with a value assigned to it (we print deprecation warning) + t.Run("testRecursiveGetAndPutWithQueryWithValueTrue", func(t *testing.T) { + oldPref, err := config.SetPreferredPrefix(config.PelicanPrefix) + assert.NoError(t, err) + defer func() { + _, err := config.SetPreferredPrefix(oldPref) + require.NoError(t, err) + }() + + for _, export := range fed.Exports { + // Set path for object to upload/download + tempPath := tempDir + dirName := filepath.Base(tempPath) + uploadURL := fmt.Sprintf("pelican://%s:%s%s/%s/%s?recursive=true", param.Server_Hostname.GetString(), strconv.Itoa(param.Server_WebPort.GetInt()), + export.FederationPrefix, "osdf_osdf", dirName) + + // Upload the file with PUT + transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, false, client.WithTokenLocation(tempToken.Name())) + assert.NoError(t, err) + if err == nil && len(transferDetailsUpload) == 3 { + countBytes17 := 0 + countBytes23 := 0 + countBytes35 := 0 + // Verify we got the correct files back (have to do this since files upload in different orders at times) + for _, transfer := range transferDetailsUpload { + transferredBytes := transfer.TransferredBytes + switch transferredBytes { + case int64(17): + countBytes17++ + continue + case int64(23): + countBytes23++ + continue + case int64(35): + countBytes35++ + continue + default: + // We got a byte amount we are not expecting + t.Fatal("did not upload proper amount of bytes") + } + } + if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { + // We would hit this case if 1 counter got hit twice for some reason + t.Fatal("One of the files was not uploaded correctly") + } + } else if len(transferDetailsUpload) != 3 { + t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) + } + + // Download the files we just uploaded + var transferDetailsDownload []client.TransferResults + if export.Capabilities.PublicReads { + transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false) + } else { + transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false, client.WithTokenLocation(tempToken.Name())) + } + assert.NoError(t, err) + if err == nil && len(transferDetailsDownload) == 3 { + countBytesDownloadIdx0 := 0 + countBytesDownloadIdx1 := 0 + countBytesDownloadIdx2 := 0 + + // Verify we got the correct files back (have to do this since files upload in different orders at times) + // In this case, we want to match them to the sizes of the uploaded files + for _, transfer := range transferDetailsDownload { + transferredBytes := transfer.TransferredBytes + switch transferredBytes { + case transferDetailsDownload[0].TransferredBytes: + countBytesDownloadIdx0++ + continue + case transferDetailsDownload[1].TransferredBytes: + countBytesDownloadIdx1++ + continue + case transferDetailsDownload[2].TransferredBytes: + countBytesDownloadIdx2++ + continue + default: + // We got a byte amount we are not expecting + t.Fatal("did not download proper amount of bytes") + } + } + if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { + // We would hit this case if 1 counter got hit twice for some reason + t.Fatal("One of the files was not downloaded correctly") + } else if len(transferDetailsDownload) != 3 { + t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) + } + } + } + }) + + // Test we work with a value assigned to it but says recursive=false (we print deprecation warning and ignore arguments in query so we still work) + t.Run("testRecursiveGetAndPutWithQueryWithValueFalse", func(t *testing.T) { + oldPref, err := config.SetPreferredPrefix(config.PelicanPrefix) + assert.NoError(t, err) + defer func() { + _, err := config.SetPreferredPrefix(oldPref) + require.NoError(t, err) + }() + + for _, export := range fed.Exports { + // Set path for object to upload/download + tempPath := tempDir + dirName := filepath.Base(tempPath) + uploadURL := fmt.Sprintf("pelican://%s:%s%s/%s/%s?recursive=false", param.Server_Hostname.GetString(), strconv.Itoa(param.Server_WebPort.GetInt()), + export.FederationPrefix, "osdf_osdf", dirName) + + // Upload the file with PUT + transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, false, client.WithTokenLocation(tempToken.Name())) + assert.NoError(t, err) + if err == nil && len(transferDetailsUpload) == 3 { + countBytes17 := 0 + countBytes23 := 0 + countBytes35 := 0 + // Verify we got the correct files back (have to do this since files upload in different orders at times) + for _, transfer := range transferDetailsUpload { + transferredBytes := transfer.TransferredBytes + switch transferredBytes { + case int64(17): + countBytes17++ + continue + case int64(23): + countBytes23++ + continue + case int64(35): + countBytes35++ + continue + default: + // We got a byte amount we are not expecting + t.Fatal("did not upload proper amount of bytes") + } + } + if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { + // We would hit this case if 1 counter got hit twice for some reason + t.Fatal("One of the files was not uploaded correctly") + } + } else if len(transferDetailsUpload) != 3 { + t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) + } + + // Download the files we just uploaded + var transferDetailsDownload []client.TransferResults + if export.Capabilities.PublicReads { + transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false) + } else { + transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false, client.WithTokenLocation(tempToken.Name())) + } + assert.NoError(t, err) + if err == nil && len(transferDetailsDownload) == 3 { + countBytesDownloadIdx0 := 0 + countBytesDownloadIdx1 := 0 + countBytesDownloadIdx2 := 0 + + // Verify we got the correct files back (have to do this since files upload in different orders at times) + // In this case, we want to match them to the sizes of the uploaded files + for _, transfer := range transferDetailsDownload { + transferredBytes := transfer.TransferredBytes + switch transferredBytes { + case transferDetailsDownload[0].TransferredBytes: + countBytesDownloadIdx0++ + continue + case transferDetailsDownload[1].TransferredBytes: + countBytesDownloadIdx1++ + continue + case transferDetailsDownload[2].TransferredBytes: + countBytesDownloadIdx2++ + continue + default: + // We got a byte amount we are not expecting + t.Fatal("did not download proper amount of bytes") + } + } + if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { + // We would hit this case if 1 counter got hit twice for some reason + t.Fatal("One of the files was not downloaded correctly") + } else if len(transferDetailsDownload) != 3 { + t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) + } + } + } + }) + + // Test we work with both recursive and directread query params + t.Run("testRecursiveGetAndPutWithQueryAndDirectread", func(t *testing.T) { + oldPref, err := config.SetPreferredPrefix(config.PelicanPrefix) + assert.NoError(t, err) + defer func() { + _, err := config.SetPreferredPrefix(oldPref) + require.NoError(t, err) + }() + + for _, export := range fed.Exports { + // Set path for object to upload/download + tempPath := tempDir + dirName := filepath.Base(tempPath) + uploadURL := fmt.Sprintf("pelican://%s:%s%s/%s/%s?recursive&directread", param.Server_Hostname.GetString(), strconv.Itoa(param.Server_WebPort.GetInt()), + export.FederationPrefix, "osdf_osdf", dirName) + + // Upload the file with PUT + transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, false, client.WithTokenLocation(tempToken.Name())) + assert.NoError(t, err) + if err == nil && len(transferDetailsUpload) == 3 { + countBytes17 := 0 + countBytes23 := 0 + countBytes35 := 0 + // Verify we got the correct files back (have to do this since files upload in different orders at times) + for _, transfer := range transferDetailsUpload { + transferredBytes := transfer.TransferredBytes + switch transferredBytes { + case int64(17): + countBytes17++ + continue + case int64(23): + countBytes23++ + continue + case int64(35): + countBytes35++ + continue + default: + // We got a byte amount we are not expecting + t.Fatal("did not upload proper amount of bytes") + } + } + if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { + // We would hit this case if 1 counter got hit twice for some reason + t.Fatal("One of the files was not uploaded correctly") + } + } else if len(transferDetailsUpload) != 3 { + t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) + } + + // Download the files we just uploaded + var transferDetailsDownload []client.TransferResults + if export.Capabilities.PublicReads { + transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false) + } else { + transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false, client.WithTokenLocation(tempToken.Name())) + } + assert.NoError(t, err) + if err == nil && len(transferDetailsDownload) == 3 { + countBytesDownloadIdx0 := 0 + countBytesDownloadIdx1 := 0 + countBytesDownloadIdx2 := 0 + + // Verify we got the correct files back (have to do this since files upload in different orders at times) + // In this case, we want to match them to the sizes of the uploaded files + for _, transfer := range transferDetailsDownload { + for _, attempt := range transfer.Attempts { + assert.Equal(t, "https://"+attempt.Endpoint, param.Origin_Url.GetString()) + } + transferredBytes := transfer.TransferredBytes + switch transferredBytes { + case transferDetailsDownload[0].TransferredBytes: + countBytesDownloadIdx0++ + continue + case transferDetailsDownload[1].TransferredBytes: + countBytesDownloadIdx1++ + continue + case transferDetailsDownload[2].TransferredBytes: + countBytesDownloadIdx2++ + continue + default: + // We got a byte amount we are not expecting + t.Fatal("did not download proper amount of bytes") + } + } + if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { + // We would hit this case if 1 counter got hit twice for some reason + t.Fatal("One of the files was not downloaded correctly") + } else if len(transferDetailsDownload) != 3 { + t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) + } + } + } + }) + + t.Cleanup(func() { + if err := te.Shutdown(); err != nil { + log.Errorln("Failure when shutting down transfer engine:", err) + } + // Throw in a viper.Reset for good measure. Keeps our env squeaky clean! + viper.Reset() + server_utils.ResetOriginExports() }) } diff --git a/client/main.go b/client/main.go index 03e2ac4a7..a30d8749e 100644 --- a/client/main.go +++ b/client/main.go @@ -491,10 +491,13 @@ func DoPut(ctx context.Context, localObject string, remoteDestination string, re } // Check if we have a query and that it is understood - err = utils.CheckValidQuery(remoteDestUrl, false) + err = utils.CheckValidQuery(remoteDestUrl) if err != nil { return } + if remoteDestUrl.Query().Has("recursive") { + recursive = true + } remoteDestUrl.Scheme = remoteDestScheme @@ -562,10 +565,13 @@ func DoGet(ctx context.Context, remoteObject string, localDestination string, re } // Check if we have a query and that it is understood - err = utils.CheckValidQuery(remoteObjectUrl, false) + err = utils.CheckValidQuery(remoteObjectUrl) if err != nil { return } + if remoteObjectUrl.Query().Has("recursive") { + recursive = true + } remoteObjectUrl.Scheme = remoteObjectScheme @@ -688,10 +694,14 @@ func DoCopy(ctx context.Context, sourceFile string, destination string, recursiv return nil, err } // Check if we have a query and that it is understood - err = utils.CheckValidQuery(sourceURL, false) + err = utils.CheckValidQuery(sourceURL) if err != nil { return } + if sourceURL.Query().Has("recursive") { + recursive = true + } + sourceURL.Scheme = source_scheme destination, dest_scheme := correctURLWithUnderscore(destination) @@ -702,10 +712,13 @@ func DoCopy(ctx context.Context, sourceFile string, destination string, recursiv } // Check if we have a query and that it is understood - err = utils.CheckValidQuery(destURL, false) + err = utils.CheckValidQuery(destURL) if err != nil { return } + if destURL.Query().Has("recursive") { + recursive = true + } destURL.Scheme = dest_scheme diff --git a/cmd/plugin.go b/cmd/plugin.go index 2dec9ba41..3b8f6f963 100644 --- a/cmd/plugin.go +++ b/cmd/plugin.go @@ -428,13 +428,13 @@ func runPluginWorker(ctx context.Context, upload bool, workChan <-chan PluginTra } // Check we have valid query parameters - err := utils.CheckValidQuery(transfer.url, true) + err := utils.CheckValidQuery(transfer.url) if err != nil { failTransfer(transfer.url.String(), transfer.localFile, results, upload, err) return err } - if transfer.url.Query().Get("recursive") != "" { + if transfer.url.Query().Has("recursive") { recursive = true } else { recursive = false diff --git a/utils/utils.go b/utils/utils.go index 38970a0ae..81d8245f2 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -72,17 +72,12 @@ func GetPreferredCaches(preferredCaches string) (caches []*url.URL, err error) { } // This function checks if we have a valid query (or no query) for the transfer URL -func CheckValidQuery(transferUrl *url.URL, isPlugin bool) (err error) { +func CheckValidQuery(transferUrl *url.URL) (err error) { query := transferUrl.Query() - _, hasRecursive := query["recursive"] + recursive, hasRecursive := query["recursive"] _, hasPack := query["pack"] directRead, hasDirectRead := query["directread"] - // If we are not the plugin, we should not use ?recursive (we should pass a -r flag) - if !isPlugin && hasRecursive { - return errors.New("recursive query parameter is not yet supported in URLs outside of the plugin") - } - // If we have both recursive and pack, we should return a failure if hasRecursive && hasPack { return errors.New("cannot have both recursive and pack query parameters") @@ -90,7 +85,13 @@ func CheckValidQuery(transferUrl *url.URL, isPlugin bool) (err error) { // If there is an argument in the directread query param, inform the user this is deprecated and their argument will be ignored if hasDirectRead && directRead[0] != "" { - log.Warnln("Arguments (true/false) for the directread query have been deprecated and will be disallowed in a future release. The argument provided will be ignored") + log.Warnln("Arguments (true/false) for the ?directread query have been deprecated and will be disallowed in a future release. The argument provided will be ignored") + return nil + } + + // If there is an argument in the recursive query param, inform the user this is deprecated and their argument will be ignored + if hasRecursive && recursive[0] != "" { + log.Warnln("Arguments (true/false) for the ?recursive query have been deprecated and will be disallowed in a future release. The argument provided will be ignored") return nil } diff --git a/utils/utils_test.go b/utils/utils_test.go index af1ba572f..f12905e4b 100644 --- a/utils/utils_test.go +++ b/utils/utils_test.go @@ -33,7 +33,7 @@ func TestValidQuery(t *testing.T) { transferUrl, err := url.Parse(transferStr) assert.NoError(t, err) - err = CheckValidQuery(transferUrl, true) + err = CheckValidQuery(transferUrl) assert.NoError(t, err) }) @@ -43,7 +43,7 @@ func TestValidQuery(t *testing.T) { transferUrl, err := url.Parse(transferStr) assert.NoError(t, err) - err = CheckValidQuery(transferUrl, false) + err = CheckValidQuery(transferUrl) assert.NoError(t, err) }) @@ -53,7 +53,7 @@ func TestValidQuery(t *testing.T) { transferUrl, err := url.Parse(transferStr) assert.NoError(t, err) - err = CheckValidQuery(transferUrl, false) + err = CheckValidQuery(transferUrl) assert.NoError(t, err) }) @@ -63,7 +63,7 @@ func TestValidQuery(t *testing.T) { transferUrl, err := url.Parse(transferStr) assert.NoError(t, err) - err = CheckValidQuery(transferUrl, false) + err = CheckValidQuery(transferUrl) assert.Error(t, err) assert.Contains(t, err.Error(), "invalid query parameter(s) recrustive=true provided in url pelican://something/here?recrustive=true") }) @@ -74,29 +74,18 @@ func TestValidQuery(t *testing.T) { transferUrl, err := url.Parse(transferStr) assert.NoError(t, err) - err = CheckValidQuery(transferUrl, true) + err = CheckValidQuery(transferUrl) assert.Error(t, err) assert.Contains(t, err.Error(), "cannot have both recursive and pack query parameters") }) - // Test that a recursive query fails when not the plugin - t.Run("testBothPackAndRecursiveFailure", func(t *testing.T) { - transferStr := "pelican://something/here?recursive=true" - transferUrl, err := url.Parse(transferStr) - assert.NoError(t, err) - - err = CheckValidQuery(transferUrl, false) - assert.Error(t, err) - assert.Contains(t, err.Error(), "recursive query parameter is not yet supported in URLs outside of the plugin") - }) - // Test we pass with both pack and directread t.Run("testBothPackAndDirectReadSuccess", func(t *testing.T) { transferStr := "pelican://something/here?pack=tar.gz&directread" transferUrl, err := url.Parse(transferStr) assert.NoError(t, err) - err = CheckValidQuery(transferUrl, false) + err = CheckValidQuery(transferUrl) assert.NoError(t, err) }) @@ -106,7 +95,7 @@ func TestValidQuery(t *testing.T) { transferUrl, err := url.Parse(transferStr) assert.NoError(t, err) - err = CheckValidQuery(transferUrl, true) + err = CheckValidQuery(transferUrl) assert.NoError(t, err) }) @@ -116,7 +105,7 @@ func TestValidQuery(t *testing.T) { transferUrl, err := url.Parse(transferStr) assert.NoError(t, err) - err = CheckValidQuery(transferUrl, false) + err = CheckValidQuery(transferUrl) assert.NoError(t, err) }) } From 49422e635a7c8f376204fbe95746061b0c2635e3 Mon Sep 17 00:00:00 2001 From: Joe Reuss Date: Mon, 6 May 2024 15:34:01 -0500 Subject: [PATCH 2/2] Address PR comments: tidy up unit tests --- client/fed_linux_test.go | 522 +++++---------------------------------- 1 file changed, 55 insertions(+), 467 deletions(-) diff --git a/client/fed_linux_test.go b/client/fed_linux_test.go index e862ef7c6..e73606501 100644 --- a/client/fed_linux_test.go +++ b/client/fed_linux_test.go @@ -127,36 +127,8 @@ func TestRecursiveUploadsAndDownloads(t *testing.T) { // Upload the file with PUT transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, true, client.WithTokenLocation(tempToken.Name())) - assert.NoError(t, err) - if err == nil && len(transferDetailsUpload) == 3 { - countBytes17 := 0 - countBytes23 := 0 - countBytes35 := 0 - // Verify we got the correct files back (have to do this since files upload in different orders at times) - for _, transfer := range transferDetailsUpload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case int64(17): - countBytes17++ - continue - case int64(23): - countBytes23++ - continue - case int64(35): - countBytes35++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not upload proper amount of bytes") - } - } - if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not uploaded correctly") - } - } else if len(transferDetailsUpload) != 3 { - t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsUpload) // Download the files we just uploaded var transferDetailsDownload []client.TransferResults @@ -165,38 +137,8 @@ func TestRecursiveUploadsAndDownloads(t *testing.T) { } else { transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), true, client.WithTokenLocation(tempToken.Name())) } - assert.NoError(t, err) - if err == nil && len(transferDetailsDownload) == 3 { - countBytesDownloadIdx0 := 0 - countBytesDownloadIdx1 := 0 - countBytesDownloadIdx2 := 0 - - // Verify we got the correct files back (have to do this since files upload in different orders at times) - // In this case, we want to match them to the sizes of the uploaded files - for _, transfer := range transferDetailsDownload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case transferDetailsDownload[0].TransferredBytes: - countBytesDownloadIdx0++ - continue - case transferDetailsDownload[1].TransferredBytes: - countBytesDownloadIdx1++ - continue - case transferDetailsDownload[2].TransferredBytes: - countBytesDownloadIdx2++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not download proper amount of bytes") - } - } - if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not downloaded correctly") - } else if len(transferDetailsDownload) != 3 { - t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) - } - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsDownload) } }) @@ -223,36 +165,8 @@ func TestRecursiveUploadsAndDownloads(t *testing.T) { // Upload the file with PUT transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, true, client.WithTokenLocation(tempToken.Name())) - assert.NoError(t, err) - if err == nil && len(transferDetailsUpload) == 3 { - countBytes17 := 0 - countBytes23 := 0 - countBytes35 := 0 - // Verify we got the correct files back (have to do this since files upload in different orders at times) - for _, transfer := range transferDetailsUpload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case int64(17): - countBytes17++ - continue - case int64(23): - countBytes23++ - continue - case int64(35): - countBytes35++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not upload proper amount of bytes") - } - } - if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not uploaded correctly") - } - } else if len(transferDetailsUpload) != 3 { - t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsUpload) // Download the files we just uploaded tmpDir := t.TempDir() @@ -262,38 +176,8 @@ func TestRecursiveUploadsAndDownloads(t *testing.T) { } else { transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, tmpDir, true, client.WithTokenLocation(tempToken.Name())) } - assert.NoError(t, err) - if err == nil && len(transferDetailsDownload) == 3 { - countBytesDownloadIdx0 := 0 - countBytesDownloadIdx1 := 0 - countBytesDownloadIdx2 := 0 - - // Verify we got the correct files back (have to do this since files upload in different orders at times) - // In this case, we want to match them to the sizes of the uploaded files - for _, transfer := range transferDetailsDownload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case transferDetailsDownload[0].TransferredBytes: - countBytesDownloadIdx0++ - continue - case transferDetailsDownload[1].TransferredBytes: - countBytesDownloadIdx1++ - continue - case transferDetailsDownload[2].TransferredBytes: - countBytesDownloadIdx2++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not download proper amount of bytes") - } - } - if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not downloaded correctly") - } else if len(transferDetailsDownload) != 3 { - t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) - } - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsDownload) } }) @@ -313,36 +197,9 @@ func TestRecursiveUploadsAndDownloads(t *testing.T) { export.FederationPrefix, "osdf_osdf", dirName) // Upload the file with PUT transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, true, client.WithTokenLocation(tempToken.Name())) - assert.NoError(t, err) - if err == nil && len(transferDetailsUpload) == 3 { - countBytes17 := 0 - countBytes23 := 0 - countBytes35 := 0 - // Verify we got the correct files back (have to do this since files upload in different orders at times) - for _, transfer := range transferDetailsUpload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case int64(17): - countBytes17++ - continue - case int64(23): - countBytes23++ - continue - case int64(35): - countBytes35++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not upload proper amount of bytes") - } - } - if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not uploaded correctly") - } - } else if len(transferDetailsUpload) != 3 { - t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsUpload) + // Download the files we just uploaded var transferDetailsDownload []client.TransferResults if export.Capabilities.PublicReads { @@ -350,38 +207,8 @@ func TestRecursiveUploadsAndDownloads(t *testing.T) { } else { transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), true, client.WithTokenLocation(tempToken.Name())) } - assert.NoError(t, err) - if err == nil && len(transferDetailsDownload) == 3 { - countBytesDownloadIdx0 := 0 - countBytesDownloadIdx1 := 0 - countBytesDownloadIdx2 := 0 - - // Verify we got the correct files back (have to do this since files upload in different orders at times) - // In this case, we want to match them to the sizes of the uploaded files - for _, transfer := range transferDetailsDownload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case transferDetailsDownload[0].TransferredBytes: - countBytesDownloadIdx0++ - continue - case transferDetailsDownload[1].TransferredBytes: - countBytesDownloadIdx1++ - continue - case transferDetailsDownload[2].TransferredBytes: - countBytesDownloadIdx2++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not download proper amount of bytes") - } - } - if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not downloaded correctly") - } else if len(transferDetailsDownload) != 3 { - t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) - } - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsDownload) } }) @@ -395,6 +222,19 @@ func TestRecursiveUploadsAndDownloads(t *testing.T) { }) } +// Helper function to verify a successful transfer by looking at the total bytes transferred and amount of results sent back +func verifySuccessfulTransfer(t *testing.T, transferResults []client.TransferResults) { + expectedBytes := int64(75) + var totalBytes int64 // we expect this to be 17+23+35 = 75 + for _, transfer := range transferResults { + totalBytes += transfer.TransferredBytes + } + + require.Equal(t, 3, len(transferResults), fmt.Sprintf("incorrect number of transfers reported %d", len(transferResults))) + require.Equal(t, expectedBytes, totalBytes, fmt.Sprintf("incorrect number of transferred bytes: %d", totalBytes)) + +} + // Test that recursive uploads and downloads work with the ?recursive query func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { // Create instance of test federation @@ -405,6 +245,15 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { te := client.NewTransferEngine(fed.Ctx) + t.Cleanup(func() { + if err := te.Shutdown(); err != nil { + log.Errorln("Failure when shutting down transfer engine:", err) + } + // Throw in a viper.Reset for good measure. Keeps our env squeaky clean! + viper.Reset() + server_utils.ResetOriginExports() + }) + // Create a token file issuer, err := config.GetServerIssuerURL() require.NoError(t, err) @@ -434,7 +283,6 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { innerTempDir, err := os.MkdirTemp(tempDir, "InnerUploadDir") assert.NoError(t, err) defer os.RemoveAll(tempDir) - defer os.RemoveAll(tempDir) permissions := os.FileMode(0755) err = os.Chmod(tempDir, permissions) require.NoError(t, err) @@ -466,12 +314,8 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { // Test we work with just the query t.Run("testRecursiveGetAndPutWithQuery", func(t *testing.T) { - oldPref, err := config.SetPreferredPrefix(config.PelicanPrefix) + _, err := config.SetPreferredPrefix(config.PelicanPrefix) assert.NoError(t, err) - defer func() { - _, err := config.SetPreferredPrefix(oldPref) - require.NoError(t, err) - }() for _, export := range fed.Exports { // Set path for object to upload/download @@ -482,36 +326,8 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { // Upload the file with PUT transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, false, client.WithTokenLocation(tempToken.Name())) - assert.NoError(t, err) - if err == nil && len(transferDetailsUpload) == 3 { - countBytes17 := 0 - countBytes23 := 0 - countBytes35 := 0 - // Verify we got the correct files back (have to do this since files upload in different orders at times) - for _, transfer := range transferDetailsUpload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case int64(17): - countBytes17++ - continue - case int64(23): - countBytes23++ - continue - case int64(35): - countBytes35++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not upload proper amount of bytes") - } - } - if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not uploaded correctly") - } - } else if len(transferDetailsUpload) != 3 { - t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsUpload) // Download the files we just uploaded var transferDetailsDownload []client.TransferResults @@ -520,49 +336,15 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { } else { transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false, client.WithTokenLocation(tempToken.Name())) } - assert.NoError(t, err) - if err == nil && len(transferDetailsDownload) == 3 { - countBytesDownloadIdx0 := 0 - countBytesDownloadIdx1 := 0 - countBytesDownloadIdx2 := 0 - - // Verify we got the correct files back (have to do this since files upload in different orders at times) - // In this case, we want to match them to the sizes of the uploaded files - for _, transfer := range transferDetailsDownload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case transferDetailsDownload[0].TransferredBytes: - countBytesDownloadIdx0++ - continue - case transferDetailsDownload[1].TransferredBytes: - countBytesDownloadIdx1++ - continue - case transferDetailsDownload[2].TransferredBytes: - countBytesDownloadIdx2++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not download proper amount of bytes") - } - } - if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not downloaded correctly") - } else if len(transferDetailsDownload) != 3 { - t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) - } - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsDownload) } }) // Test we work with a value assigned to it (we print deprecation warning) t.Run("testRecursiveGetAndPutWithQueryWithValueTrue", func(t *testing.T) { - oldPref, err := config.SetPreferredPrefix(config.PelicanPrefix) + _, err := config.SetPreferredPrefix(config.PelicanPrefix) assert.NoError(t, err) - defer func() { - _, err := config.SetPreferredPrefix(oldPref) - require.NoError(t, err) - }() for _, export := range fed.Exports { // Set path for object to upload/download @@ -573,36 +355,8 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { // Upload the file with PUT transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, false, client.WithTokenLocation(tempToken.Name())) - assert.NoError(t, err) - if err == nil && len(transferDetailsUpload) == 3 { - countBytes17 := 0 - countBytes23 := 0 - countBytes35 := 0 - // Verify we got the correct files back (have to do this since files upload in different orders at times) - for _, transfer := range transferDetailsUpload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case int64(17): - countBytes17++ - continue - case int64(23): - countBytes23++ - continue - case int64(35): - countBytes35++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not upload proper amount of bytes") - } - } - if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not uploaded correctly") - } - } else if len(transferDetailsUpload) != 3 { - t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsUpload) // Download the files we just uploaded var transferDetailsDownload []client.TransferResults @@ -611,49 +365,15 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { } else { transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false, client.WithTokenLocation(tempToken.Name())) } - assert.NoError(t, err) - if err == nil && len(transferDetailsDownload) == 3 { - countBytesDownloadIdx0 := 0 - countBytesDownloadIdx1 := 0 - countBytesDownloadIdx2 := 0 - - // Verify we got the correct files back (have to do this since files upload in different orders at times) - // In this case, we want to match them to the sizes of the uploaded files - for _, transfer := range transferDetailsDownload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case transferDetailsDownload[0].TransferredBytes: - countBytesDownloadIdx0++ - continue - case transferDetailsDownload[1].TransferredBytes: - countBytesDownloadIdx1++ - continue - case transferDetailsDownload[2].TransferredBytes: - countBytesDownloadIdx2++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not download proper amount of bytes") - } - } - if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not downloaded correctly") - } else if len(transferDetailsDownload) != 3 { - t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) - } - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsDownload) } }) // Test we work with a value assigned to it but says recursive=false (we print deprecation warning and ignore arguments in query so we still work) t.Run("testRecursiveGetAndPutWithQueryWithValueFalse", func(t *testing.T) { - oldPref, err := config.SetPreferredPrefix(config.PelicanPrefix) + _, err := config.SetPreferredPrefix(config.PelicanPrefix) assert.NoError(t, err) - defer func() { - _, err := config.SetPreferredPrefix(oldPref) - require.NoError(t, err) - }() for _, export := range fed.Exports { // Set path for object to upload/download @@ -664,36 +384,8 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { // Upload the file with PUT transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, false, client.WithTokenLocation(tempToken.Name())) - assert.NoError(t, err) - if err == nil && len(transferDetailsUpload) == 3 { - countBytes17 := 0 - countBytes23 := 0 - countBytes35 := 0 - // Verify we got the correct files back (have to do this since files upload in different orders at times) - for _, transfer := range transferDetailsUpload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case int64(17): - countBytes17++ - continue - case int64(23): - countBytes23++ - continue - case int64(35): - countBytes35++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not upload proper amount of bytes") - } - } - if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not uploaded correctly") - } - } else if len(transferDetailsUpload) != 3 { - t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsUpload) // Download the files we just uploaded var transferDetailsDownload []client.TransferResults @@ -702,49 +394,15 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { } else { transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false, client.WithTokenLocation(tempToken.Name())) } - assert.NoError(t, err) - if err == nil && len(transferDetailsDownload) == 3 { - countBytesDownloadIdx0 := 0 - countBytesDownloadIdx1 := 0 - countBytesDownloadIdx2 := 0 - - // Verify we got the correct files back (have to do this since files upload in different orders at times) - // In this case, we want to match them to the sizes of the uploaded files - for _, transfer := range transferDetailsDownload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case transferDetailsDownload[0].TransferredBytes: - countBytesDownloadIdx0++ - continue - case transferDetailsDownload[1].TransferredBytes: - countBytesDownloadIdx1++ - continue - case transferDetailsDownload[2].TransferredBytes: - countBytesDownloadIdx2++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not download proper amount of bytes") - } - } - if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not downloaded correctly") - } else if len(transferDetailsDownload) != 3 { - t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) - } - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsDownload) } }) // Test we work with both recursive and directread query params t.Run("testRecursiveGetAndPutWithQueryAndDirectread", func(t *testing.T) { - oldPref, err := config.SetPreferredPrefix(config.PelicanPrefix) + _, err := config.SetPreferredPrefix(config.PelicanPrefix) assert.NoError(t, err) - defer func() { - _, err := config.SetPreferredPrefix(oldPref) - require.NoError(t, err) - }() for _, export := range fed.Exports { // Set path for object to upload/download @@ -755,36 +413,8 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { // Upload the file with PUT transferDetailsUpload, err := client.DoPut(fed.Ctx, tempDir, uploadURL, false, client.WithTokenLocation(tempToken.Name())) - assert.NoError(t, err) - if err == nil && len(transferDetailsUpload) == 3 { - countBytes17 := 0 - countBytes23 := 0 - countBytes35 := 0 - // Verify we got the correct files back (have to do this since files upload in different orders at times) - for _, transfer := range transferDetailsUpload { - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case int64(17): - countBytes17++ - continue - case int64(23): - countBytes23++ - continue - case int64(35): - countBytes35++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not upload proper amount of bytes") - } - } - if countBytes17 != 1 || countBytes23 != 1 || countBytes35 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not uploaded correctly") - } - } else if len(transferDetailsUpload) != 3 { - t.Fatalf("Amount of transfers results returned for upload was not correct. Transfer details returned: %d", len(transferDetailsUpload)) - } + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsUpload) // Download the files we just uploaded var transferDetailsDownload []client.TransferResults @@ -793,51 +423,9 @@ func TestRecursiveUploadsAndDownloadsWithQuery(t *testing.T) { } else { transferDetailsDownload, err = client.DoGet(fed.Ctx, uploadURL, t.TempDir(), false, client.WithTokenLocation(tempToken.Name())) } - assert.NoError(t, err) - if err == nil && len(transferDetailsDownload) == 3 { - countBytesDownloadIdx0 := 0 - countBytesDownloadIdx1 := 0 - countBytesDownloadIdx2 := 0 - - // Verify we got the correct files back (have to do this since files upload in different orders at times) - // In this case, we want to match them to the sizes of the uploaded files - for _, transfer := range transferDetailsDownload { - for _, attempt := range transfer.Attempts { - assert.Equal(t, "https://"+attempt.Endpoint, param.Origin_Url.GetString()) - } - transferredBytes := transfer.TransferredBytes - switch transferredBytes { - case transferDetailsDownload[0].TransferredBytes: - countBytesDownloadIdx0++ - continue - case transferDetailsDownload[1].TransferredBytes: - countBytesDownloadIdx1++ - continue - case transferDetailsDownload[2].TransferredBytes: - countBytesDownloadIdx2++ - continue - default: - // We got a byte amount we are not expecting - t.Fatal("did not download proper amount of bytes") - } - } - if countBytesDownloadIdx0 != 1 || countBytesDownloadIdx1 != 1 || countBytesDownloadIdx2 != 1 { - // We would hit this case if 1 counter got hit twice for some reason - t.Fatal("One of the files was not downloaded correctly") - } else if len(transferDetailsDownload) != 3 { - t.Fatalf("Amount of transfers results returned for download was not correct. Transfer details returned: %d", len(transferDetailsDownload)) - } - } - } - }) - - t.Cleanup(func() { - if err := te.Shutdown(); err != nil { - log.Errorln("Failure when shutting down transfer engine:", err) + require.NoError(t, err) + verifySuccessfulTransfer(t, transferDetailsDownload) } - // Throw in a viper.Reset for good measure. Keeps our env squeaky clean! - viper.Reset() - server_utils.ResetOriginExports() }) }