diff --git a/CHANGELOG.md b/CHANGELOG.md index 75d15cea196..cdb39caf68b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ * Add support for building with Xcode 14 using the CMake project ([PR #5577](https://github.com/realm/realm-core/pull/5577)). * Expose MongoDB client interface in the C API. ([PR #5638](https://github.com/realm/realm-core/pull/5638)). * Add support in the C API for constructing a new `realm_app_t` object via `realm_app_create`. ([PR #5570](https://github.com/realm/realm-core/issues/5570)) +* Reduce use of memory mappings and virtual address space ([PR #5645](https://github.com/realm/realm-core/pull/5645)). Also fixes some errors (see below) ### Fixed * ([#????](https://github.com/realm/realm-core/issues/????), since v?.?.?) @@ -14,6 +15,7 @@ * Fix compilation failures on watchOS platforms which do not support thread-local storage. ([#7694](https://github.com/realm/realm-swift/issues/7694), [#7695](https://github.com/realm/realm-swift/issues/7695) since v11.7.0) * Fix a data race when committing a transaction while multiple threads are waiting for the write lock on platforms using emulated interprocess condition variables (most platforms other than non-Android Linux). * Fix a data race when writing audit events which could occur if the sync client thread was busy with other work when the event Realm was opened. +* Fix some cases of running out of virtual address space (seen/reported as mmap failures) ([PR #5645](https://github.com/realm/realm-core/pull/5645)) * Audit event scopes containing only write events and no read events would occasionally throw a `BadVersion` exception when a write transaction was committed (since v11.17.0). ### Breaking changes diff --git a/src/realm/alloc_slab.cpp b/src/realm/alloc_slab.cpp index 8fa5d301d05..2a99f023ba7 100644 --- a/src/realm/alloc_slab.cpp +++ b/src/realm/alloc_slab.cpp @@ -1122,8 +1122,14 @@ void SlabAlloc::update_reader_view(size_t file_size) if (old_baseline < old_slab_base) { // old_slab_base should be 0 if we had no mappings previously REALM_ASSERT(old_num_mappings > 0); - replace_last_mapping = true; - --old_num_mappings; + // try to extend the old mapping in-place instead of replacing it. + MapEntry& cur_entry = m_mappings.back(); + const size_t section_start_offset = get_section_base(old_num_mappings - 1); + const size_t section_size = std::min(1 << section_shift, file_size - section_start_offset); + if (!cur_entry.primary_mapping.try_extend_to(section_size)) { + replace_last_mapping = true; + --old_num_mappings; + } } // Create new mappings covering from the end of the last complete @@ -1134,8 +1140,25 @@ void SlabAlloc::update_reader_view(size_t file_size) for (size_t k = old_num_mappings; k < num_mappings; ++k) { const size_t section_start_offset = get_section_base(k); const size_t section_size = std::min(1 << section_shift, file_size - section_start_offset); - new_mappings.push_back( - {util::File::Map(m_file, section_start_offset, File::access_ReadOnly, section_size)}); + if (section_size == (1 << section_shift)) { + new_mappings.push_back( + {util::File::Map(m_file, section_start_offset, File::access_ReadOnly, section_size)}); + } + else { + new_mappings.push_back({util::File::Map()}); + auto& mapping = new_mappings.back().primary_mapping; + bool reserved = + mapping.try_reserve(m_file, File::access_ReadOnly, 1 << section_shift, section_start_offset); + if (reserved) { + // if reservation is supported, first attempt at extending must succeed + if (!mapping.try_extend_to(section_size)) + throw std::bad_alloc(); + } + else { + new_mappings.back().primary_mapping.map(m_file, File::access_ReadOnly, section_size, 0, + section_start_offset); + } + } } } @@ -1169,7 +1192,7 @@ void SlabAlloc::update_reader_view(size_t file_size) // Build the fast path mapping - // The fast path mapping is an array which will is used from multiple threads + // The fast path mapping is an array which is used from multiple threads // without locking - see translate(). // Addition of a new mapping may require a completely new fast mapping table. diff --git a/src/realm/util/encrypted_file_mapping.cpp b/src/realm/util/encrypted_file_mapping.cpp index 72dd82472a6..673ad547990 100644 --- a/src/realm/util/encrypted_file_mapping.cpp +++ b/src/realm/util/encrypted_file_mapping.cpp @@ -560,7 +560,6 @@ void EncryptedFileMapping::mark_for_refresh(size_t ref_start, size_t ref_end) } } - void EncryptedFileMapping::write_and_update_all(size_t local_page_ndx, size_t begin_offset, size_t end_offset) noexcept { @@ -860,13 +859,24 @@ void EncryptedFileMapping::read_barrier(const void* addr, size_t size, Header_to } } +void EncryptedFileMapping::extend_to(size_t offset, size_t new_size) +{ + REALM_ASSERT(new_size % (1ULL << m_page_shift) == 0); + size_t num_pages = new_size >> m_page_shift; + m_page_state.resize(num_pages, PageState::Clean); + m_chunk_dont_scan.resize((num_pages + page_to_chunk_factor - 1) >> page_to_chunk_shift, false); + m_file.cryptor.set_file_size((off_t)(offset + new_size)); +} void EncryptedFileMapping::set(void* new_addr, size_t new_size, size_t new_file_offset) { REALM_ASSERT(new_file_offset % (1ULL << m_page_shift) == 0); REALM_ASSERT(new_size % (1ULL << m_page_shift) == 0); - REALM_ASSERT(new_size > 0); + // This seems dangerous - correct operation in a setting with multiple (partial) + // mappings of the same file would rely on ordering of individual mapping requests. + // Currently we only ever extend the file - but when we implement continuous defrag, + // this design should be revisited. m_file.cryptor.set_file_size(off_t(new_size + new_file_offset)); flush(); diff --git a/src/realm/util/encrypted_file_mapping.hpp b/src/realm/util/encrypted_file_mapping.hpp index 1d6530a3cc4..574cd246a51 100644 --- a/src/realm/util/encrypted_file_mapping.hpp +++ b/src/realm/util/encrypted_file_mapping.hpp @@ -72,6 +72,10 @@ class EncryptedFileMapping { // Flushes any remaining dirty pages from the old mapping void set(void* new_addr, size_t new_size, size_t new_file_offset); + // Extend the size of this mapping. Memory holding decrypted pages must + // have been allocated earlier + void extend_to(size_t offset, size_t new_size); + size_t collect_decryption_count() { return m_num_decrypted; @@ -104,6 +108,7 @@ class EncryptedFileMapping { size_t m_num_decrypted; // 1 for every page decrypted enum PageState { + Clean = 0, Touched = 1, // a ref->ptr translation has taken place UpToDate = 2, // the page is fully up to date RefetchRequired = 4, // the page is valid for old translations, but requires re-decryption for new diff --git a/src/realm/util/file.cpp b/src/realm/util/file.cpp index c8771413699..62ae990ef84 100644 --- a/src/realm/util/file.cpp +++ b/src/realm/util/file.cpp @@ -1691,9 +1691,10 @@ void File::MapBase::map(const File& f, AccessMode a, size_t size, int map_flags, #else m_addr = f.map(a, size, map_flags, offset); #endif - m_size = size; + m_size = m_reservation_size = size; m_fd = f.m_fd; m_offset = offset; + m_a = a; } @@ -1701,20 +1702,85 @@ void File::MapBase::unmap() noexcept { if (!m_addr) return; - REALM_ASSERT(m_size); - File::unmap(m_addr, m_size); - m_addr = nullptr; - m_size = 0; + REALM_ASSERT(m_reservation_size); #if REALM_ENABLE_ENCRYPTION - m_encrypted_mapping = nullptr; + if (m_encrypted_mapping) { + m_encrypted_mapping = nullptr; + util::remove_encrypted_mapping(m_addr, m_size); + } #endif + ::munmap(m_addr, m_reservation_size); + m_addr = nullptr; + m_size = 0; + m_reservation_size = 0; } void File::MapBase::remap(const File& f, AccessMode a, size_t size, int map_flags) { REALM_ASSERT(m_addr); m_addr = f.remap(m_addr, m_size, a, size, map_flags); - m_size = size; + m_size = m_reservation_size = size; +} + +bool File::MapBase::try_reserve(const File& file, AccessMode a, size_t size, size_t offset) +{ +#ifdef _WIN32 + // unsupported for now + return false; +#else + void* addr = ::mmap(0, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (addr == MAP_FAILED) + return false; + m_addr = addr; + REALM_ASSERT(m_size == 0); + m_a = a; + m_reservation_size = size; + m_fd = file.get_descriptor(); + m_offset = offset; +#if REALM_ENABLE_ENCRYPTION + if (file.m_encryption_key) { + m_encrypted_mapping = util::reserve_mapping(addr, m_fd, offset, a, file.m_encryption_key.get()); + } +#endif +#endif + return true; +} + +bool File::MapBase::try_extend_to(size_t size) noexcept +{ + if (size > m_reservation_size) { + return false; + } + // return false; +#ifndef _WIN32 + char* extension_start_addr = (char*)m_addr + m_size; + size_t extension_size = size - m_size; + size_t extension_start_offset = m_offset + m_size; +#if REALM_ENABLE_ENCRYPTION + if (m_encrypted_mapping) { + void* got_addr = ::mmap(extension_start_addr, extension_size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1, 0); + if (got_addr == MAP_FAILED) + return false; + REALM_ASSERT(got_addr == extension_start_addr); + util::extend_encrypted_mapping(m_encrypted_mapping, m_addr, m_offset, m_size, size); + m_size = size; + return true; + } +#endif + try { + void* got_addr = + util::mmap_fixed(m_fd, extension_start_addr, extension_size, m_a, extension_start_offset, nullptr); + if (got_addr == extension_start_addr) { + m_size = size; + return true; + } + } + catch (...) { + return false; + } +#endif + return false; } void File::MapBase::sync() diff --git a/src/realm/util/file.hpp b/src/realm/util/file.hpp index 464c01ac9b0..99586a7d51d 100644 --- a/src/realm/util/file.hpp +++ b/src/realm/util/file.hpp @@ -646,8 +646,10 @@ class File { struct MapBase { void* m_addr = nullptr; mutable size_t m_size = 0; + size_t m_reservation_size = 0; size_t m_offset = 0; FileDesc m_fd; + AccessMode m_a; MapBase() noexcept; ~MapBase() noexcept; @@ -659,12 +661,18 @@ class File { // Use void map(const File&, AccessMode, size_t size, int map_flags, size_t offset = 0); + // reserve address space for later mapping operations. + // returns false if reservation can't be done. + bool try_reserve(const File&, AccessMode, size_t size, size_t offset = 0); void remap(const File&, AccessMode, size_t size, int map_flags); void unmap() noexcept; // fully update any process shared representation (e.g. buffer cache). // other processes will be able to see changes, but a full platform crash // may loose data void flush(); + // try to extend the mapping in-place. Virtual address space must have + // been set aside earlier by a call to reserve() + bool try_extend_to(size_t size) noexcept; // fully synchronize any underlying storage. After completion, a full platform // crash will *not* have lost data. void sync(); @@ -764,11 +772,13 @@ class File::Map : private MapBase { unmap(); m_addr = other.get_addr(); m_size = other.m_size; + m_a = other.m_a; + m_reservation_size = other.m_reservation_size; m_offset = other.m_offset; m_fd = other.m_fd; other.m_offset = 0; other.m_addr = nullptr; - other.m_size = 0; + other.m_size = other.m_reservation_size = 0; #if REALM_ENABLE_ENCRYPTION m_encrypted_mapping = other.m_encrypted_mapping; other.m_encrypted_mapping = nullptr; @@ -793,6 +803,8 @@ class File::Map : private MapBase { /// currently attached to a memory mapped file. void unmap() noexcept; + bool try_reserve(const File&, AccessMode a = access_ReadOnly, size_t size = sizeof(T), size_t offset = 0); + /// See File::remap(). /// /// Calling this function on a Map instance that is not currently @@ -801,6 +813,9 @@ class File::Map : private MapBase { /// returned by get_addr(). T* remap(const File&, AccessMode = access_ReadOnly, size_t size = sizeof(T), int map_flags = 0); + /// Try to extend the existing mapping to a given size + bool try_extend_to(size_t size) noexcept; + /// See File::sync_map(). /// /// Calling this function on an instance that is not currently @@ -1187,6 +1202,12 @@ inline T* File::Map::map(const File& f, AccessMode a, size_t size, int map_fl return static_cast(m_addr); } +template +inline bool File::Map::try_reserve(const File& f, AccessMode a, size_t size, size_t offset) +{ + return MapBase::try_reserve(f, a, size, offset); +} + template inline void File::Map::unmap() noexcept { @@ -1204,6 +1225,12 @@ inline T* File::Map::remap(const File& f, AccessMode a, size_t size, int map_ return static_cast(m_addr); } +template +inline bool File::Map::try_extend_to(size_t size) noexcept +{ + return MapBase::try_extend_to(sizeof(T) * size); +} + template inline void File::Map::sync() { diff --git a/src/realm/util/file_mapper.cpp b/src/realm/util/file_mapper.cpp index 7fa7bb716d6..cc88d16d0b5 100644 --- a/src/realm/util/file_mapper.cpp +++ b/src/realm/util/file_mapper.cpp @@ -240,10 +240,10 @@ static void ensure_reclaimer_thread_runs() struct ReclaimerThreadStopper { ~ReclaimerThreadStopper() { - if (reclaimer_thread) { - reclaimer_shutdown = true; - reclaimer_thread->join(); - } + if (reclaimer_thread) { + reclaimer_shutdown = true; + reclaimer_thread->join(); + } } } reclaimer_thread_stopper; #else // REALM_PLATFORM_APPLE @@ -262,7 +262,9 @@ static void ensure_reclaimer_thread_runs() } reclaimer_timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, reclaimer_queue); dispatch_source_set_timer(reclaimer_timer, DISPATCH_TIME_NOW, NSEC_PER_SEC, NSEC_PER_SEC); - dispatch_source_set_event_handler(reclaimer_timer, ^{ reclaim_pages(); }); + dispatch_source_set_event_handler(reclaimer_timer, ^{ + reclaim_pages(); + }); dispatch_resume(reclaimer_timer); } } @@ -273,7 +275,8 @@ struct ReclaimerThreadStopper { if (reclaimer_timer) { dispatch_source_cancel(reclaimer_timer); // Block until any currently-running timer tasks are done - dispatch_sync(reclaimer_queue, ^{}); + dispatch_sync(reclaimer_queue, ^{ + }); dispatch_release(reclaimer_timer); dispatch_release(reclaimer_queue); } @@ -298,8 +301,9 @@ void encryption_note_reader_start(SharedFileInfo& info, const void* reader_id) { UniqueLock lock(mapping_mutex); ensure_reclaimer_thread_runs(); - auto j = std::find_if(info.readers.begin(), info.readers.end(), - [=](auto& reader) { return reader.reader_ID == reader_id; }); + auto j = std::find_if(info.readers.begin(), info.readers.end(), [=](auto& reader) { + return reader.reader_ID == reader_id; + }); if (j == info.readers.end()) { ReaderInfo i = {reader_id, info.current_version}; info.readers.push_back(i); @@ -493,6 +497,7 @@ SharedFileInfo* get_file_info_for_file(File& file) return it->info.get(); } + namespace { EncryptedFileMapping* add_mapping(void* addr, size_t size, FileDesc fd, size_t file_offset, File::AccessMode access, const char* encryption_key) @@ -635,6 +640,28 @@ void* mmap(FileDesc fd, size_t size, File::AccessMode access, size_t offset, con } } + +EncryptedFileMapping* reserve_mapping(void* addr, FileDesc fd, size_t offset, File::AccessMode access, + const char* encryption_key) +{ + return add_mapping(addr, 0, fd, offset, access, encryption_key); +} + +void extend_encrypted_mapping(EncryptedFileMapping* mapping, void* addr, size_t offset, size_t old_size, + size_t new_size) +{ + LockGuard lock(mapping_mutex); + auto m = find_mapping_for_addr(addr, old_size); + REALM_ASSERT(m); + m->size = new_size; + mapping->extend_to(offset, new_size); +} + +void remove_encrypted_mapping(void* addr, size_t size) +{ + remove_mapping(addr, size); +} + void* mmap_reserve(FileDesc fd, size_t reservation_size, File::AccessMode access, size_t offset_in_file, const char* enc_key, EncryptedFileMapping*& mapping) { @@ -682,7 +709,7 @@ void* mmap_anon(size_t size) ULARGE_INTEGER s; s.QuadPart = size; - + hMapFile = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE, s.HighPart, s.LowPart, nullptr); if (hMapFile == NULL) { throw std::system_error(GetLastError(), std::system_category(), "CreateFileMapping() failed"); @@ -712,6 +739,7 @@ void* mmap_anon(size_t size) void* mmap_fixed(FileDesc fd, void* address_request, size_t size, File::AccessMode access, size_t offset, const char* enc_key) { + _impl::SimulatedFailure::trigger_mmap(size); static_cast(enc_key); // FIXME: Consider removing this parameter #ifdef _WIN32 REALM_ASSERT(false); @@ -965,5 +993,5 @@ void msync(FileDesc fd, void* addr, size_t size) } #endif } -} -} +} // namespace util +} // namespace realm diff --git a/src/realm/util/file_mapper.hpp b/src/realm/util/file_mapper.hpp index bba0bac9089..bbdecc49aea 100644 --- a/src/realm/util/file_mapper.hpp +++ b/src/realm/util/file_mapper.hpp @@ -92,6 +92,12 @@ void* mmap_fixed(FileDesc fd, void* address_request, size_t size, File::AccessMo void* mmap_reserve(FileDesc fd, size_t size, File::AccessMode am, size_t offset, const char* enc_key, EncryptedFileMapping*& mapping); +EncryptedFileMapping* reserve_mapping(void* addr, FileDesc fd, size_t offset, File::AccessMode access, + const char* encryption_key); + +void extend_encrypted_mapping(EncryptedFileMapping* mapping, void* addr, size_t offset, size_t old_size, + size_t new_size); +void remove_encrypted_mapping(void* addr, size_t size); void do_encryption_read_barrier(const void* addr, size_t size, HeaderToSize header_to_size, EncryptedFileMapping* mapping); diff --git a/test/test_alloc.cpp b/test/test_alloc.cpp index 48d3752b1f4..52efab97977 100644 --- a/test/test_alloc.cpp +++ b/test/test_alloc.cpp @@ -30,6 +30,7 @@ #include #include "test.hpp" +#include "util/test_only.hpp" using namespace realm; using namespace realm::util; @@ -369,8 +370,9 @@ NONCONCURRENT_TEST_IF(Alloc_MapFailureRecovery, _impl::SimulatedFailure::is_enab alloc.get_file().resize(page_size * 2); alloc.update_reader_view(page_size * 2); CHECK_EQUAL(alloc.get_baseline(), page_size * 2); - CHECK_EQUAL(initial_version + 1, alloc.get_mapping_version()); - CHECK_NOT_EQUAL(initial_translated, alloc.translate(1000)); + // These two no longer applies: + // CHECK_EQUAL(initial_version + 1, alloc.get_mapping_version()); + // CHECK_NOT_EQUAL(initial_translated, alloc.translate(1000)); // Delete the old mapping. Will double-delete it if we incorrectly added // the mapping in the call that failed. diff --git a/test/test_lang_bind_helper.cpp b/test/test_lang_bind_helper.cpp index 9f0d24856c1..d1091c7da9c 100644 --- a/test/test_lang_bind_helper.cpp +++ b/test/test_lang_bind_helper.cpp @@ -339,8 +339,8 @@ class MyHistory : public _impl::History { m_changesets[m_incoming_version].changes = std::move(m_incoming_changeset); m_changesets[m_incoming_version].finalized = true; } - void get_changesets(version_type begin_version, version_type end_version, BinaryIterator* buffer) const - noexcept override + void get_changesets(version_type begin_version, version_type end_version, + BinaryIterator* buffer) const noexcept override { size_t n = size_t(end_version - begin_version); for (size_t i = 0; i < n; ++i) { diff --git a/test/test_shared.cpp b/test/test_shared.cpp index 9218d3ae70c..aee52c1aca8 100644 --- a/test/test_shared.cpp +++ b/test/test_shared.cpp @@ -100,7 +100,7 @@ ONLY(Query_QuickSort2) { Random random(random_int()); // Seed from slow global generator - // Triggers QuickSort because range > len + // Triggers QuickSort because range > len Table ttt; auto ints = ttt.add_column(type_Int, "1"); auto strings = ttt.add_column(type_String, "2"); diff --git a/test/test_table.cpp b/test/test_table.cpp index 6531efce848..f6d3f167fde 100644 --- a/test/test_table.cpp +++ b/test/test_table.cpp @@ -98,9 +98,7 @@ namespace { // both non-nullable: template struct value_copier { - value_copier(bool) - { - } + value_copier(bool) {} T2 operator()(T1 from_value, bool = false) { return from_value; @@ -145,9 +143,7 @@ struct value_copier, T2> { // identical to non-specialized case, but specialization needed to avoid capture by 2 previous decls template struct value_copier, Optional> { - value_copier(bool) - { - } + value_copier(bool) {} Optional operator()(Optional from_value, bool) { return from_value; @@ -240,7 +236,7 @@ struct value_copier { return from_value; } }; -} +} // namespace #ifdef JAVA_MANY_COLUMNS_CRASH @@ -431,9 +427,9 @@ TEST(Table_DateTimeMinMax) auto col = table->add_column(type_Timestamp, "time", true); - // We test different code paths of the internal Core minmax method. First a null value as initial "best candidate", - // then non-null first. For each case we then try both a substitution of best candidate, then non-substitution. 4 - // permutations in total. + // We test different code paths of the internal Core minmax method. First a null value as initial "best + // candidate", then non-null first. For each case we then try both a substitution of best candidate, then + // non-substitution. 4 permutations in total. std::vector objs(3); objs[0] = table->create_object(); @@ -517,8 +513,8 @@ TEST(Table_MinMaxSingleNullRow) table->create_object(); - CHECK(table->maximum_timestamp(date_col).is_null()); // max on table - table->where().find_all().maximum_timestamp(date_col, &key); // max on tableview + CHECK(table->maximum_timestamp(date_col).is_null()); // max on table + table->where().find_all().maximum_timestamp(date_col, &key); // max on tableview CHECK(key == null_key); table->where().maximum_timestamp(date_col, &key); // max on query CHECK(key == null_key); @@ -549,8 +545,8 @@ TEST(Table_MinMaxSingleNullRow) table->create_object(); - CHECK(table->minimum_timestamp(date_col).is_null()); // max on table - table->where().find_all().minimum_timestamp(date_col, &key); // max on tableview + CHECK(table->minimum_timestamp(date_col).is_null()); // max on table + table->where().find_all().minimum_timestamp(date_col, &key); // max on tableview CHECK(key == null_key); table->where().minimum_timestamp(date_col, &key); // max on query CHECK(key == null_key); @@ -649,8 +645,7 @@ TEST(TableView_AggregateBugs) TEST(Table_AggregateFuzz) { // Tests sum, avg, min, max on Table, TableView, Query, for types float, Timestamp, int - for(int iter = 0; iter < 50 + 1000 * TEST_DURATION; iter++) - { + for (int iter = 0; iter < 50 + 1000 * TEST_DURATION; iter++) { Group g; TableRef table = g.add_table("test_table"); @@ -829,7 +824,6 @@ TEST(Table_AggregateFuzz) i = table->where().find_all().sum_int(int_col); CHECK_EQUAL(i, sum); - } // Test methods on Query @@ -2271,8 +2265,8 @@ TEST(Table_NullableChecks) Obj obj = t.create_object(); StringData sd; // construct a null reference - Timestamp ts; // null - BinaryData bd;; // null + Timestamp ts; // null + BinaryData bd; // null obj.set(str_col, sd); obj.set(int_col, realm::null()); obj.set(bool_col, realm::null()); @@ -3595,7 +3589,7 @@ NONCONCURRENT_TEST(Table_object_seq_rnd) { #ifdef PERFORMACE_TESTING size_t rows = 1'000'000; - int runs = 100; // runs for building scenario + int runs = 100; // runs for building scenario #else size_t rows = 100'000; int runs = 100; @@ -4530,7 +4524,7 @@ struct Tester { template ColKey Tester::col; -} +} // namespace // The run() method will first add lots of objects, and then remove them. This will test // both node splits and empty leaf destruction and get good search index code coverage @@ -4611,10 +4605,26 @@ std::string generate_value() return str; } -template<> bool generate_value() { return test_util::random_int() & 0x1; } -template<> float generate_value() { return float(1.0 * test_util::random_int() / (test_util::random_int(1, 1000))); } -template<> double generate_value() { return 1.0 * test_util::random_int() / (test_util::random_int(1, 1000)); } -template<> Timestamp generate_value() { return Timestamp(test_util::random_int(0, 1000000), test_util::random_int(0, 1000000000)); } +template <> +bool generate_value() +{ + return test_util::random_int() & 0x1; +} +template <> +float generate_value() +{ + return float(1.0 * test_util::random_int() / (test_util::random_int(1, 1000))); +} +template <> +double generate_value() +{ + return 1.0 * test_util::random_int() / (test_util::random_int(1, 1000)); +} +template <> +Timestamp generate_value() +{ + return Timestamp(test_util::random_int(0, 1000000), test_util::random_int(0, 1000000000)); +} template <> Decimal128 generate_value() { @@ -5282,7 +5292,8 @@ TEST(Table_ChangePKNullability) table->set_nullability(pk_col, false, true); } -TEST(Table_MultipleObjs) { +TEST(Table_MultipleObjs) +{ SHARED_GROUP_TEST_PATH(path); std::unique_ptr hist(make_in_realm_history());