diff --git a/mountpoint-s3/CHANGELOG.md b/mountpoint-s3/CHANGELOG.md index 17a473c9d..d76ddebf9 100644 --- a/mountpoint-s3/CHANGELOG.md +++ b/mountpoint-s3/CHANGELOG.md @@ -1,5 +1,9 @@ ## Unreleased +### Other changes +* Fix an issue where read file handles could be closed too early, leading to bad file descriptor errors on subsequent reads. As a consequence of this fix, opening an existing file to overwrite it **immediately** after closing a read file handle may occasionally fail with an "Operation not permitted" error. In such cases, Mountpoint logs will also report that the file is "not writable while being read". ([#751](https://github.com/awslabs/mountpoint-s3/pull/751)) +* File handles are no longer initialized lazily. Lazy initialization was introduced in version 1.4.0 but is reverted in this change. If upgrading from 1.4.0, you may see errors that were previously deferred until read/write now raised at open time. ([#751](https://github.com/awslabs/mountpoint-s3/pull/751)) + ## v1.4.0 (January 26, 2024) ### New features diff --git a/mountpoint-s3/src/fs.rs b/mountpoint-s3/src/fs.rs index 054895742..2ec642dc2 100644 --- a/mountpoint-s3/src/fs.rs +++ b/mountpoint-s3/src/fs.rs @@ -66,17 +66,10 @@ where Client: ObjectClient + Send + Sync + 'static, Prefetcher: Prefetch, { - /// A state where the file handle is created but the type is not yet determined - Created { lookup: LookedUp, flags: i32, pid: u32 }, /// The file handle has been assigned as a read handle - Read { - request: Prefetcher::PrefetchResult, - pid: u32, - }, + Read(Prefetcher::PrefetchResult), /// The file handle has been assigned as a write handle Write(UploadState), - /// The file handle is already closed, currently only used to tell that the read is finished - Closed, } impl std::fmt::Debug for FileHandleState @@ -86,15 +79,8 @@ where { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - FileHandleState::Created { lookup, flags, pid } => f - .debug_struct("Created") - .field("lookup", lookup) - .field("flags", flags) - .field("pid", pid) - .finish(), - FileHandleState::Read { request: _, pid } => f.debug_struct("Read").field("pid", pid).finish(), + FileHandleState::Read(_) => f.debug_struct("Read").finish(), FileHandleState::Write(arg0) => f.debug_tuple("Write").field(arg0).finish(), - FileHandleState::Closed => f.debug_struct("Closed").finish(), } } } @@ -104,11 +90,6 @@ where Client: ObjectClient + Send + Sync, Prefetcher: Prefetch, { - async fn new(lookup: LookedUp, flags: i32, pid: u32) -> Self { - metrics::increment_gauge!("fs.current_handles", 1.0, "type" => "unassigned"); - FileHandleState::Created { lookup, flags, pid } - } - async fn new_write_handle( lookup: &LookedUp, ino: InodeNo, @@ -116,10 +97,6 @@ where pid: u32, fs: &S3Filesystem, ) -> Result, Error> { - if flags & libc::O_ACCMODE == libc::O_RDONLY { - return Err(err!(libc::EBADF, "file handle is not open for writes")); - } - let is_truncate = flags & libc::O_TRUNC != 0; let handle = fs .superblock @@ -146,13 +123,8 @@ where async fn new_read_handle( lookup: &LookedUp, - flags: i32, - pid: u32, fs: &S3Filesystem, ) -> Result, Error> { - if flags & libc::O_WRONLY != 0 { - return Err(err!(libc::EBADF, "file handle is not open for reads",)); - } if !lookup.stat.is_readable { return Err(err!( libc::EACCES, @@ -169,7 +141,7 @@ where let request = fs .prefetcher .prefetch(fs.client.clone(), &fs.bucket, &full_key, object_size, etag.clone()); - let handle = FileHandleState::Read { request, pid }; + let handle = FileHandleState::Read(request); metrics::increment_gauge!("fs.current_handles", 1.0, "type" => "read"); Ok(handle) } @@ -726,10 +698,29 @@ where return Err(err!(libc::EINVAL, "O_SYNC and O_DSYNC are not supported")); } - // All file handles will be lazy initialized on first read/write. - let state = FileHandleState::new(lookup, flags, pid).await.into(); + let state = if flags & libc::O_RDWR != 0 { + let is_truncate = flags & libc::O_TRUNC != 0; + if !remote_file || (self.config.allow_overwrite && is_truncate) { + // If the file is new or opened in truncate mode, we know it must be a write handle. + debug!("fs:open choosing write handle for O_RDWR"); + FileHandleState::new_write_handle(&lookup, lookup.inode.ino(), flags, pid, self).await? + } else { + // Otherwise, it must be a read handle. + debug!("fs:open choosing read handle for O_RDWR"); + FileHandleState::new_read_handle(&lookup, self).await? + } + } else if flags & libc::O_WRONLY != 0 { + FileHandleState::new_write_handle(&lookup, lookup.inode.ino(), flags, pid, self).await? + } else { + FileHandleState::new_read_handle(&lookup, self).await? + }; + let fh = self.next_handle(); - let handle = FileHandle { inode, full_key, state }; + let handle = FileHandle { + inode, + full_key, + state: AsyncMutex::new(state), + }; debug!(fh, ino, "new file handle created"); self.file_handles.write().await.insert(fh, Arc::new(handle)); @@ -766,19 +757,8 @@ where logging::record_name(handle.inode.name()); let mut state = handle.state.lock().await; let request = match &mut *state { - FileHandleState::Created { lookup, flags, pid, .. } => { - metrics::decrement_gauge!("fs.current_handles", 1.0, "type" => "unassigned"); - - *state = FileHandleState::new_read_handle(lookup, *flags, *pid, self).await?; - if let FileHandleState::Read { request, .. } = &mut *state { - request - } else { - unreachable!("handle type always be assigned above"); - } - } - FileHandleState::Read { request, .. } => request, + FileHandleState::Read(request) => request, FileHandleState::Write(_) => return Err(err!(libc::EBADF, "file handle is not open for reads")), - FileHandleState::Closed => return Err(err!(libc::EBADF, "file handle is already closed")), }; match request.read(offset as u64, size as usize).await { @@ -870,18 +850,8 @@ where let len = { let mut state = handle.state.lock().await; let request = match &mut *state { - FileHandleState::Created { lookup, flags, pid } => { - *state = FileHandleState::new_write_handle(lookup, ino, *flags, *pid, self).await?; - metrics::decrement_gauge!("fs.current_handles", 1.0, "type" => "unassigned"); - if let FileHandleState::Write(request) = &mut *state { - request - } else { - unreachable!("handle type always be assigned above"); - } - } FileHandleState::Read { .. } => return Err(err!(libc::EBADF, "file handle is not open for writes")), FileHandleState::Write(request) => request, - FileHandleState::Closed => return Err(err!(libc::EBADF, "file handle is already closed")), }; request.write(offset, data, &handle.full_key).await? @@ -1097,32 +1067,8 @@ where logging::record_name(file_handle.inode.name()); let mut state = file_handle.state.lock().await; let request = match &mut *state { - FileHandleState::Created { lookup, flags, pid } => { - // This happens when users call fsync without any read() or write() requests, - // since we don't know what type of handle it would be we need to consider what - // to do next for both cases. - // * if the file is new or opened in truncate mode, we know it must be a write - // handle so we can start an upload and complete it immediately, result in an - // empty file. - // * if the file already exists and it is not opened in truncate mode, we still - // can't be sure of its type so we will do nothing and just return ok. - let is_new_file = !lookup.inode.is_remote()?; - let is_truncate = *flags & libc::O_TRUNC != 0; - if is_new_file || is_truncate { - *state = FileHandleState::new_write_handle(lookup, lookup.inode.ino(), *flags, *pid, self).await?; - metrics::decrement_gauge!("fs.current_handles", 1.0, "type" => "unassigned"); - if let FileHandleState::Write(request) = &mut *state { - request - } else { - unreachable!("handle type always be assigned above"); - } - } else { - return Ok(()); - } - } FileHandleState::Read { .. } => return Ok(()), FileHandleState::Write(request) => request, - FileHandleState::Closed => return Ok(()), }; self.complete_upload(request, &file_handle.full_key, false, None).await } @@ -1141,10 +1087,6 @@ where // process. In many cases, the child will then immediately close (flush) the duplicated // file descriptors. We will not complete the upload if we can detect that the process // invoking flush is different from the one that originally opened the file. - // - // The same for read path. We want to stop the prefetcher and decrease the reader count - // as soon as users close a file descriptor so that we don't block users from doing other - // operation like overwrite the file. let file_handle = { let file_handles = self.file_handles.read().await; match file_handles.get(&fh) { @@ -1155,30 +1097,11 @@ where logging::record_name(file_handle.inode.name()); let mut state = file_handle.state.lock().await; match &mut *state { - FileHandleState::Created { .. } => Ok(()), - FileHandleState::Read { pid: open_pid, .. } => { - if !are_from_same_process(*open_pid, pid) { - trace!( - file_handle.full_key, - pid, - open_pid, - "not stopping prefetch because current pid differs from pid at open" - ); - return Ok(()); - } - // TODO make sure we cancel the inflight PrefetchingGetRequest. is just dropping enough? - file_handle.inode.finish_reading()?; - - // Mark the file handle state as closed so we only update the reader count once - *state = FileHandleState::Closed; - metrics::decrement_gauge!("fs.current_handles", 1.0, "type" => "read"); - Ok(()) - } + FileHandleState::Read { .. } => Ok(()), FileHandleState::Write(request) => { self.complete_upload(request, &file_handle.full_key, true, Some(pid)) .await } - FileHandleState::Closed => Ok(()), } } @@ -1210,30 +1133,7 @@ where } }; - let mut state = file_handle.state.into_inner(); - let request = match state { - FileHandleState::Created { lookup, flags, pid } => { - metrics::decrement_gauge!("fs.current_handles", 1.0, "type" => "unassigned"); - // This happens when release is called before any read() or write(), - // since we don't know what type of handle it would be we need to consider - // what to do next for both cases. - // * if the file is new or opened in truncate mode, we know it must be a write - // handle so we can start an upload from here. - // * if the file already exists and it is not opened in truncate mode, we still - // can't be sure of its type so we will just drop it. - let is_new_file = !lookup.inode.is_remote()?; - let is_truncate = flags & libc::O_TRUNC != 0; - if is_new_file || is_truncate { - state = FileHandleState::new_write_handle(&lookup, lookup.inode.ino(), flags, pid, self).await?; - if let FileHandleState::Write(request) = state { - request - } else { - unreachable!("handle type always be assigned above"); - } - } else { - return Ok(()); - } - } + let request = match file_handle.state.into_inner() { FileHandleState::Read { .. } => { // TODO make sure we cancel the inflight PrefetchingGetRequest. is just dropping enough? metrics::decrement_gauge!("fs.current_handles", 1.0, "type" => "read"); @@ -1241,7 +1141,6 @@ where return Ok(()); } FileHandleState::Write(request) => request, - FileHandleState::Closed => return Ok(()), }; let result = request.complete_if_in_progress(&file_handle.full_key).await; diff --git a/mountpoint-s3/tests/fs.rs b/mountpoint-s3/tests/fs.rs index f63ca294d..5551cfda4 100644 --- a/mountpoint-s3/tests/fs.rs +++ b/mountpoint-s3/tests/fs.rs @@ -611,13 +611,8 @@ async fn test_sequential_write(write_size: usize) { let file_ino = dentry.attr.ino; // First let's check that we can't write it again - let fh = fs - .open(file_ino, libc::S_IFREG as i32 | libc::O_WRONLY, 0) - .await - .unwrap() - .fh; let result = fs - .write(file_ino, fh, offset, &[0xaa; 27], 0, 0, None) + .open(file_ino, libc::S_IFREG as i32 | libc::O_WRONLY, 0) .await .expect_err("file should not be overwritable") .to_errno(); @@ -700,22 +695,13 @@ async fn test_duplicate_write_fails() { assert_eq!(dentry.attr.size, 0); let file_ino = dentry.attr.ino; - let opened = fs + let _opened = fs .open(file_ino, libc::S_IFREG as i32 | libc::O_WRONLY, 0) .await .unwrap(); - _ = fs - .write(file_ino, opened.fh, 0, &[0xaa; 27], 0, 0, None) - .await - .expect("first write should succeed"); - let opened = fs - .open(file_ino, libc::S_IFREG as i32 | libc::O_WRONLY, 0) - .await - .expect("open should succeed"); - // Should not be allowed to write the file a second time let err = fs - .write(file_ino, opened.fh, 0, &[0xaa; 27], 0, 0, None) + .open(file_ino, libc::S_IFREG as i32 | libc::O_WRONLY, 0) .await .expect_err("should not be able to write twice") .to_errno(); @@ -1248,20 +1234,12 @@ async fn test_flexible_retrieval_objects() { let getattr = fs.getattr(entry.ino).await.unwrap(); assert_eq!(flexible_retrieval, getattr.attr.perm == 0); - let open = fs - .open(entry.ino, libc::O_RDONLY, 0) - .await - .expect("open should succeed"); - let read_result = fs.read(entry.ino, open.fh, 0, 4096, 0, None).await; + let open = fs.open(entry.ino, libc::O_RDONLY, 0).await; if flexible_retrieval { - let err = read_result.expect_err("can't read flexible retrieval objects"); + let err = open.expect_err("can't open flexible retrieval objects"); assert_eq!(err.to_errno(), libc::EACCES); } else { - assert_eq!( - &read_result.unwrap()[..], - b"hello world", - "instant retrieval files are readable" - ); + let open = open.expect("instant retrieval files are readable"); fs.release(entry.ino, open.fh, 0, None, true).await.unwrap(); } } @@ -1287,20 +1265,12 @@ async fn test_flexible_retrieval_objects() { let getattr = fs.getattr(lookup.attr.ino).await.unwrap(); assert_eq!(flexible_retrieval, getattr.attr.perm == 0); - let open = fs - .open(lookup.attr.ino, libc::O_RDONLY, 0) - .await - .expect("open should succeed"); - let read_result = fs.read(lookup.attr.ino, open.fh, 0, 4096, 0, None).await; + let open = fs.open(lookup.attr.ino, libc::O_RDONLY, 0).await; if flexible_retrieval { - let err = read_result.expect_err("can't read flexible retrieval objects"); + let err = open.expect_err("can't open flexible retrieval objects"); assert_eq!(err.to_errno(), libc::EACCES); } else { - assert_eq!( - &read_result.unwrap()[..], - b"hello world", - "instant retrieval files are readable" - ); + let open = open.expect("instant retrieval files are readable"); fs.release(lookup.attr.ino, open.fh, 0, None, true).await.unwrap(); } } diff --git a/mountpoint-s3/tests/fuse_tests/read_test.rs b/mountpoint-s3/tests/fuse_tests/read_test.rs index 135ea45fa..538344852 100644 --- a/mountpoint-s3/tests/fuse_tests/read_test.rs +++ b/mountpoint-s3/tests/fuse_tests/read_test.rs @@ -1,5 +1,5 @@ use std::fs::{read_dir, File}; -use std::io::{Read as _, Seek, SeekFrom, Write}; +use std::io::{Read, Seek, SeekFrom, Write}; #[cfg(not(feature = "s3express_tests"))] use std::os::unix::prelude::PermissionsExt; use std::path::Path; @@ -273,10 +273,11 @@ where assert_eq!(dir_entry_names, vec!["hello.txt"]); // Overwrite the test file and verify that we can't read from a file opened in O_WRONLY - let mut write_fh = File::options().write(true).open(&file_path).unwrap(); + let mut write_fh = File::options().write(true).truncate(true).open(&file_path).unwrap(); let mut contents = String::new(); let err = write_fh.read_to_string(&mut contents).expect_err("read should fail"); assert_eq!(err.raw_os_error(), Some(libc::EBADF)); + write_fh.sync_all().unwrap(); drop(write_fh); // We shouldn't be able to read from a file mid-write in O_RDWR @@ -293,8 +294,8 @@ where assert_eq!(err.raw_os_error(), Some(libc::EBADF)); // Read should also fail from different file handle - let mut read_fh = open_for_read(&file_path, true).unwrap(); - let err = read_fh.read_to_string(&mut contents).expect_err("read should fail"); + let err = + open_for_read(&file_path, true).expect_err("opening for read should fail with pending write handles open"); assert_eq!(err.raw_os_error(), Some(libc::EPERM)); } @@ -309,3 +310,44 @@ fn read_errors_test_s3() { fn read_errors_test_mock(prefix: &str) { read_errors_test(fuse::mock_session::new, prefix); } + +fn read_after_flush_test(creator_fn: F) +where + F: FnOnce(&str, TestSessionConfig) -> (TempDir, BackgroundSession, TestClientBox), +{ + const KEY: &str = "data.bin"; + let (mount_point, _session, mut test_client) = creator_fn("read_after_flush_test", Default::default()); + + let mut rng = ChaChaRng::seed_from_u64(0x87654321); + let mut two_mib_body = vec![0; 2 * 1024 * 1024]; + rng.fill_bytes(&mut two_mib_body); + test_client.put_object(KEY, &two_mib_body).unwrap(); + + let path = mount_point.path().join(KEY); + let mut f = open_for_read(path, true).unwrap(); + + let mut content = vec![0; 128]; + f.read_exact(&mut content).unwrap(); + + let mut f_dup = f.try_clone().unwrap(); + + // Close the file. Triggers a flush on the file handle. + drop(f); + + // Read using the duplicated instance (same underlying handle). + // Seek to the end of the file to avoid relying on the kernel cache. + let pos = f_dup.seek(SeekFrom::End(-(content.len() as i64))).unwrap() as usize; + f_dup.read_exact(&mut content).unwrap(); + assert_eq!(content, two_mib_body[pos..]); +} + +#[cfg(feature = "s3_tests")] +#[test] +fn read_after_flush_test_s3() { + read_after_flush_test(fuse::s3_session::new); +} + +#[test] +fn read_after_flush_test_mock() { + read_after_flush_test(fuse::mock_session::new); +} diff --git a/mountpoint-s3/tests/fuse_tests/write_test.rs b/mountpoint-s3/tests/fuse_tests/write_test.rs index 5711fc484..3f23c6c3c 100644 --- a/mountpoint-s3/tests/fuse_tests/write_test.rs +++ b/mountpoint-s3/tests/fuse_tests/write_test.rs @@ -89,17 +89,6 @@ where let read_dir_iter = read_dir(&subdir).unwrap(); let dir_entry_names = read_dir_to_entry_names(read_dir_iter); assert_eq!(dir_entry_names, vec!["hello.txt", "new.txt"]); - - if append { - // We can't append existing files - let err = open_for_write(&path, append, write_only).expect_err("can't append existing file"); - assert_eq!(err.kind(), ErrorKind::InvalidInput); - } else { - // We shouldn't be allowed to write the file again - let mut f = open_for_write(&path, append, write_only).expect("should be able to open the file"); - let err = f.write(b"hello world").expect_err("can't write existing file"); - assert_eq!(err.kind(), ErrorKind::PermissionDenied); - } } #[cfg(feature = "s3_tests")] @@ -136,8 +125,7 @@ where // Existing files should not be writable even in O_APPEND let err = open_for_write(&path, true, true).expect_err("can't append existing file"); assert_eq!(err.kind(), ErrorKind::InvalidInput); - let mut f = open_for_write(&path, false, true).expect("should be able to open the file"); - let err = f.write(b"hello world").expect_err("can't write existing file"); + let err = open_for_write(&path, false, true).expect_err("can't open existing file for write"); assert_eq!(err.kind(), ErrorKind::PermissionDenied); // New files can't be opened with O_SYNC @@ -168,11 +156,18 @@ where // For default config, existing files can be opened O_RDWR but only reading should work on them let mut file = File::options().read(true).write(true).create(true).open(&path).unwrap(); assert!(file.read(&mut [0u8; 1]).is_ok()); - - let mut file = File::options().read(true).write(true).create(true).open(&path).unwrap(); let err = file .write(b"hello world") .expect_err("write to an existing file should fail"); + assert_eq!(err.raw_os_error(), Some(libc::EBADF)); + + // For default config, existing files cannot be opened with O_TRUNC + let err = File::options() + .read(true) + .write(true) + .truncate(true) + .open(&path) + .expect_err("existing file cannot be opened with O_TRUNC"); assert_eq!(err.raw_os_error(), Some(libc::EPERM)); } @@ -758,14 +753,14 @@ where assert_eq!(err.raw_os_error(), Some(libc::EBADF)); let mut options = File::options(); - let mut write_fh = options.write(true).open(&path).unwrap(); - let err = write_fh - .write(b"hello world") - .expect_err("writing to a file is being read should fail"); + let err = options + .write(true) + .truncate(true) + .open(&path) + .expect_err("opening a file for write while it is being read should fail"); assert_eq!(err.raw_os_error(), Some(libc::EPERM)); drop(fh); - drop(write_fh); } #[cfg(feature = "s3_tests")] @@ -802,17 +797,25 @@ where let _subdir = mount_point.path().join("dir"); let path = mount_point.path().join("dir/hello.txt"); - // Writes should fail when open the file without truncate flag + // Open should fail without truncate flag let mut options = File::options(); if !write_only { - options.read(true); + let mut read_fh = options + .read(true) + .write(true) + .open(path) + .expect("using RW should open for read"); + let err = read_fh + .write(b"hello world") + .expect_err("writing to a file opened for read should fail"); + assert_eq!(err.raw_os_error(), Some(libc::EBADF)); + } else { + let err = options + .write(true) + .open(path) + .expect_err("overwriting a file opened without truncate flag should fail"); + assert_eq!(err.raw_os_error(), Some(libc::EPERM)); } - let mut write_fh = options.write(true).open(path).unwrap(); - let err = write_fh - .write(b"hello world") - .expect_err("overwriting a file opened without truncate flag should fail"); - assert_eq!(err.raw_os_error(), Some(libc::EPERM)); - drop(write_fh); } #[cfg(feature = "s3_tests")] @@ -836,7 +839,7 @@ fn overwrite_fail_on_write_without_truncate_test_mock(write_only: bool) { ); } -fn overwrite_no_truncate_test(creator_fn: F, prefix: &str, write_only: bool) +fn overwrite_truncate_test(creator_fn: F, prefix: &str, write_only: bool) where F: FnOnce(&str, TestSessionConfig) -> (TempDir, BackgroundSession, TestClientBox), { @@ -856,36 +859,39 @@ where let _subdir = mount_point.path().join("dir"); let path = mount_point.path().join("dir/hello.txt"); - // Open the file in non-truncate mode and do nothing + // File should be empty when opened with O_TRUNC even without any write let mut options = File::options(); if !write_only { options.read(true); } - let write_fh = options.write(true).open(&path).expect("open should succeed"); + let write_fh = options + .write(true) + .truncate(true) + .open(&path) + .expect("open should succeed"); drop(write_fh); - // File content should not be changed let mut options = File::options(); - let mut fh = options.read(true).write(true).open(&path).unwrap(); + let mut read_fh = options.read(true).open(&path).unwrap(); let mut hello_contents = String::new(); - fh.read_to_string(&mut hello_contents).unwrap(); - assert_eq!(hello_contents, "hello world"); + read_fh.read_to_string(&mut hello_contents).unwrap(); + assert!(hello_contents.is_empty()); } #[cfg(feature = "s3_tests")] #[test_case(true; "write_only")] #[test_case(false; "read_write")] -fn overwrite_no_truncate_test_s3(write_only: bool) { - overwrite_no_truncate_test(fuse::s3_session::new, "overwrite_no_truncate_test", write_only); +fn overwrite_truncate_test_s3(write_only: bool) { + overwrite_truncate_test(fuse::s3_session::new, "overwrite_truncate_test", write_only); } #[test_case(true; "write_only")] #[test_case(false; "read_write")] -fn overwrite_no_truncate_test_mock(write_only: bool) { - overwrite_no_truncate_test(fuse::mock_session::new, "overwrite_no_truncate_test", write_only); +fn overwrite_truncate_test_mock(write_only: bool) { + overwrite_truncate_test(fuse::mock_session::new, "overwrite_truncate_test", write_only); } -fn overwrite_truncate_test(creator_fn: F, prefix: &str, write_only: bool) +fn overwrite_after_read_test(creator_fn: F, prefix: &str) where F: FnOnce(&str, TestSessionConfig) -> (TempDir, BackgroundSession, TestClientBox), { @@ -905,44 +911,42 @@ where let _subdir = mount_point.path().join("dir"); let path = mount_point.path().join("dir/hello.txt"); - // File should be empty when opened with O_TRUNC even without any write - let mut options = File::options(); - if !write_only { - options.read(true); - } - let write_fh = options + // Read first + let mut read_fh = File::options().read(true).open(&path).unwrap(); + let mut hello_contents = String::new(); + read_fh.read_to_string(&mut hello_contents).unwrap(); + assert_eq!(hello_contents, "hello world"); + drop(read_fh); + + // Try to open the same file for write (overwrite) + let write_fh = File::options() .write(true) .truncate(true) .open(&path) .expect("open should succeed"); drop(write_fh); - - let mut options = File::options(); - let mut read_fh = options.read(true).open(&path).unwrap(); - let mut hello_contents = String::new(); - read_fh.read_to_string(&mut hello_contents).unwrap(); - assert!(hello_contents.is_empty()); } #[cfg(feature = "s3_tests")] -#[test_case(true; "write_only")] -#[test_case(false; "read_write")] -fn overwrite_truncate_test_s3(write_only: bool) { - overwrite_truncate_test(fuse::s3_session::new, "overwrite_truncate_test", write_only); +#[test] +#[ignore = "due to a race condition on release of a read handle, overwrite after read may occasionally fail on open"] +fn overwrite_after_read_test_s3() { + overwrite_after_read_test(fuse::s3_session::new, "overwrite_after_read_test"); } -#[test_case(true; "write_only")] -#[test_case(false; "read_write")] -fn overwrite_truncate_test_mock(write_only: bool) { - overwrite_truncate_test(fuse::mock_session::new, "overwrite_truncate_test", write_only); +#[test_case(""; "no prefix")] +#[test_case("overwrite_after_read_test"; "prefix")] +#[ignore = "due to a race condition on release of a read handle, overwrite after read may occasionally fail on open"] +fn overwrite_after_read_test_mock(prefix: &str) { + overwrite_after_read_test(fuse::mock_session::new, prefix); } -fn read_overwrite_read_test(creator_fn: F, prefix: &str) +fn write_handle_no_update_existing_empty_file(creator_fn: F, prefix: &str, allow_overwrite: bool) where F: FnOnce(&str, TestSessionConfig) -> (TempDir, BackgroundSession, TestClientBox), { let filesystem_config = S3FilesystemConfig { - allow_overwrite: true, + allow_overwrite, ..Default::default() }; let test_config = TestSessionConfig { @@ -952,45 +956,37 @@ where let (mount_point, _session, mut test_client) = creator_fn(prefix, test_config); // Make sure there's an existing directory and a file - test_client.put_object("dir/hello.txt", b"hello world").unwrap(); + test_client.put_object("dir/hello.txt", b"").unwrap(); let _subdir = mount_point.path().join("dir"); let path = mount_point.path().join("dir/hello.txt"); - // Read first - let mut options = File::options(); - let mut read_fh = options.read(true).open(&path).unwrap(); - let mut hello_contents = String::new(); - read_fh.read_to_string(&mut hello_contents).unwrap(); - assert_eq!(hello_contents, "hello world"); - drop(read_fh); - - // File should be empty when opened with O_WRONLY and O_TRUNC even without any write - let mut options = File::options(); - let write_fh = options + // Open the file in non-truncate mode and do nothing + File::options() .write(true) - .truncate(true) - .open(&path) - .expect("open should succeed"); - drop(write_fh); - - let mut options = File::options(); - let mut read_fh = options.read(true).open(&path).unwrap(); - let mut hello_contents = String::new(); - read_fh.read_to_string(&mut hello_contents).unwrap(); - assert!(hello_contents.is_empty()); + .open(path) + .expect_err("write-only open should not succeed without O_TRUNC"); } #[cfg(feature = "s3_tests")] -#[test] -fn read_overwrite_read_test_s3() { - read_overwrite_read_test(fuse::s3_session::new, "read_overwrite_read_test"); +#[test_case(true; "allow overwrite")] +#[test_case(false; "disallow overwrite")] +fn write_handle_no_update_existing_empty_file_s3(allow_overwrite: bool) { + write_handle_no_update_existing_empty_file( + fuse::s3_session::new, + "write_handle_no_update_existing_empty_file", + allow_overwrite, + ); } -#[test_case(""; "no prefix")] -#[test_case("read_overwrite_read_test"; "prefix")] -fn read_overwrite_read_test_mock(prefix: &str) { - read_overwrite_read_test(fuse::mock_session::new, prefix); +#[test_case(true; "allow overwrite")] +#[test_case(false; "disallow overwrite")] +fn write_handle_no_update_existing_empty_file_mock(allow_overwrite: bool) { + write_handle_no_update_existing_empty_file( + fuse::mock_session::new, + "write_handle_no_update_existing_empty_file", + allow_overwrite, + ); } // This test checks that a write can be performed when IAM session policy enforces the usage of the specific SSE type and a KMS key ID diff --git a/mountpoint-s3/tests/reftests/harness.rs b/mountpoint-s3/tests/reftests/harness.rs index 8906b1e94..4725c3815 100644 --- a/mountpoint-s3/tests/reftests/harness.rs +++ b/mountpoint-s3/tests/reftests/harness.rs @@ -347,20 +347,12 @@ impl Harness { return; }; - let open = self - .fs - .open(inflight_write.inode, libc::O_WRONLY, 0) - .await - .expect("open should succeed"); - if inflight_write.written > 0 { - // Shouldn't be able to write to a file that is being written - let write = self - .fs - .write(inflight_write.inode, open.fh, 0, &[0; 8], 0, 0, None) - .await - .expect_err("write from another file handle should fail"); - assert_eq!(write.to_errno(), libc::EPERM); + let open = self.fs.open(inflight_write.inode, libc::O_WRONLY, 0).await; + if inflight_write.file_handle.is_some() { + // Shouldn't be able to reopen a file that's already open for writing + assert!(matches!(open, Err(e) if e.to_errno() == libc::EPERM)); } else { + let open = open.expect("open should succeed"); let inflight_write = self.inflight_writes.get_mut(index).unwrap(); inflight_write.file_handle = Some(open.fh); } @@ -710,14 +702,8 @@ impl Harness { /// readable (open should fail). async fn check_local_file(&self, inode: InodeNo) { let _stat = self.fs.getattr(inode).await.expect("stat should succeed"); - let open = self - .fs - .open(inode, libc::O_RDONLY, 0) - .await - .expect("open should succeed"); - let read_result = self.fs.read(inode, open.fh, 0, 4096, 0, None).await; - let error = read_result.expect_err("read should fail"); - assert_eq!(error.to_errno(), libc::EPERM); + let open = self.fs.open(inode, libc::O_RDONLY, 0).await; + assert!(matches!(open, Err(e) if e.to_errno() == libc::EPERM)); } }