Skip to content

Commit

Permalink
Merge pull request #2426 from realm/fsa/fix-advance-read
Browse files Browse the repository at this point in the history
Fsa/fix advance read
  • Loading branch information
finnschiermer authored Feb 7, 2017
2 parents 478c1e0 + 894d92d commit fc8b71d
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 29 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,11 @@
### Bugfixes

* Fixed a bug in handover of detached linked lists. (issue #2378).
* Fixed a bug in advance_read(): The memory mappings need to be updated and
the translation cache in the slab allocator must be invalidated prior to
traversing the transaction history. This bug could be reported as corruption
in general, or more likely as corruption of the transaction log. It is much
more likely to trigger if encryption is enabled. (issue #2383).

### Breaking changes

Expand Down
11 changes: 7 additions & 4 deletions src/realm/alloc_slab.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ void SlabAlloc::detach() noexcept
default:
REALM_UNREACHABLE();
}
invalidate_cache();
internal_invalidate_cache();

// Release all allocated memory - this forces us to create new
// slabs after re-attaching thereby ensuring that the slabs are
Expand Down Expand Up @@ -1005,7 +1005,7 @@ size_t SlabAlloc::get_total_size() const noexcept

void SlabAlloc::reset_free_space_tracking()
{
invalidate_cache();
internal_invalidate_cache();
if (is_free_space_clean())
return;

Expand All @@ -1032,12 +1032,15 @@ void SlabAlloc::reset_free_space_tracking()
}


void SlabAlloc::remap(size_t file_size)
void SlabAlloc::update_reader_view(size_t file_size)
{
internal_invalidate_cache();
if (file_size <= m_baseline) {
return;
}
REALM_ASSERT(file_size % 8 == 0); // 8-byte alignment required
REALM_ASSERT(m_attach_mode == attach_SharedFile || m_attach_mode == attach_UnsharedFile);
REALM_ASSERT_DEBUG(is_free_space_clean());
REALM_ASSERT(m_baseline <= file_size);

// Extend mapping by adding sections
REALM_ASSERT_DEBUG(matches_section_boundary(file_size));
Expand Down
14 changes: 10 additions & 4 deletions src/realm/alloc_slab.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ class SlabAlloc : public Allocator {

/// Get the size of the attached database file or buffer in number
/// of bytes. This size is not affected by new allocations. After
/// attachment, it can only be modified by a call to remap().
/// attachment, it can only be modified by a call to update_reader_view().
///
/// It is an error to call this function on a detached allocator,
/// or one that was attached using attach_empty(). Doing so will
Expand All @@ -275,6 +275,8 @@ class SlabAlloc : public Allocator {
/// space.
void reset_free_space_tracking();

/// Update the readers view of the file:
///
/// Remap the attached file such that a prefix of the specified
/// size becomes available in memory. If sucessfull,
/// get_baseline() will return the specified new file size.
Expand All @@ -288,7 +290,10 @@ class SlabAlloc : public Allocator {
/// guaranteed to be mapped as a contiguous address range. The allocation
/// of memory in the file must ensure that no allocation crosses the
/// boundary between two sections.
void remap(size_t file_size);
///
/// Clears any allocator specicific caching of address translations
/// and force any later address translations to trigger decryption if required.
void update_reader_view(size_t file_size);

/// Returns true initially, and after a call to reset_free_space_tracking()
/// up until the point of the first call to SlabAlloc::alloc(). Note that a
Expand Down Expand Up @@ -324,9 +329,9 @@ class SlabAlloc : public Allocator {
// FIXME: It would be very nice if we could detect an invalid free operation in debug mode
void do_free(ref_type, const char*) noexcept override;
char* do_translate(ref_type) const noexcept override;
void invalidate_cache() noexcept;

private:
void internal_invalidate_cache() noexcept;
enum AttachMode {
attach_None, // Nothing is attached
attach_OwnedBuffer, // We own the buffer (m_data = nullptr for empty buffer)
Expand Down Expand Up @@ -489,10 +494,11 @@ class SlabAlloc : public Allocator {
size_t find_section_in_range(size_t start_pos, size_t free_chunk_size, size_t request_size) const noexcept;

friend class Group;
friend class SharedGroup;
friend class GroupWriter;
};

inline void SlabAlloc::invalidate_cache() noexcept
inline void SlabAlloc::internal_invalidate_cache() noexcept
{
++version;
}
Expand Down
30 changes: 9 additions & 21 deletions src/realm/group.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -267,21 +267,15 @@ Group::~Group() noexcept

void Group::remap(size_t new_file_size)
{
size_t old_baseline = m_alloc.get_baseline();
if (new_file_size > old_baseline)
m_alloc.remap(new_file_size); // Throws
m_alloc.update_reader_view(new_file_size); // Throws
}


void Group::remap_and_update_refs(ref_type new_top_ref, size_t new_file_size)
{
size_t old_baseline = m_alloc.get_baseline();

if (new_file_size > old_baseline) {
m_alloc.remap(new_file_size); // Throws
}

m_alloc.invalidate_cache();
m_alloc.update_reader_view(new_file_size); // Throws
update_refs(new_top_ref, old_baseline);
}

Expand Down Expand Up @@ -346,9 +340,8 @@ void Group::attach_shared(ref_type new_top_ref, size_t new_file_size, bool writa
// available free-space.
reset_free_space_tracking(); // Throws

// Update memory mapping if database file has grown
if (new_file_size > m_alloc.get_baseline())
m_alloc.remap(new_file_size); // Throws
// update readers view of memory
m_alloc.update_reader_view(new_file_size); // Throws

// When `new_top_ref` is null, ask attach() to create a new node structure
// for an empty group, but only during the initiation of write
Expand Down Expand Up @@ -958,11 +951,9 @@ void Group::commit()

size_t old_baseline = m_alloc.get_baseline();

// Remap file if it has grown
// Update view of the file
size_t new_file_size = out.get_file_size();
if (new_file_size > old_baseline) {
m_alloc.remap(new_file_size); // Throws
}
m_alloc.update_reader_view(new_file_size); // Throws

out.commit(top_ref); // Throws

Expand Down Expand Up @@ -1850,18 +1841,15 @@ void Group::advance_transact(ref_type new_top_ref, size_t new_file_size, _impl::
// the the per-table accessor dirty flags (Table::m_dirty) to prune the
// traversal to the set of accessors that were touched by the changes in the
// transaction logs.
// Update memory mapping if database file has grown

m_alloc.update_reader_view(new_file_size); // Throws

bool schema_changed = false;
_impl::TransactLogParser parser; // Throws
TransactAdvancer advancer(*this, schema_changed);
parser.parse(in, advancer); // Throws

// Update memory mapping if database file has grown
if (new_file_size > m_alloc.get_baseline()) {
m_alloc.remap(new_file_size); // Throws
}

m_alloc.invalidate_cache();
m_top.detach(); // Soft detach
bool create_group_when_missing = false; // See Group::attach_shared().
attach(new_top_ref, create_group_when_missing); // Throws
Expand Down
5 changes: 5 additions & 0 deletions src/realm/group_shared.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1039,6 +1039,11 @@ inline bool SharedGroup::do_advance_read(O* observer, VersionID version_id, _imp
version_type new_version = new_read_lock.m_version;
size_t new_file_size = new_read_lock.m_file_size;
ref_type new_top_ref = new_read_lock.m_top_ref;

// Synchronize readers view of the file
SlabAlloc& alloc = m_group.m_alloc;
alloc.update_reader_view(new_file_size);

hist.update_early_from_top_ref(new_version, new_file_size, new_top_ref); // Throws
}

Expand Down

0 comments on commit fc8b71d

Please sign in to comment.