From 54f23a4524d3d5f86df56663eb984f81490ca600 Mon Sep 17 00:00:00 2001 From: Badrish Chandramouli Date: Thu, 13 Dec 2018 18:30:30 -0800 Subject: [PATCH] WIP: Codegen-free FASTER C# (#67) * Initial checkin * Removed Key* from benchmarks * Changed callback functions from static to object. * Removed raw pointers to Value type. * All user types are now ref-based. * Generic version of FASTER compiles and works fine for blittable types, for in-memory operations. BasicFASTERTests test cases now pass. There is no performance regression for 50:50 read/upsert YCSB workload. ManagedSample1 is now based on generic FASTER. Support for non-blittable objects has not yet been added. Codegen fails - but can be ignored for now. * Updates to support generic version * Updates to refactor allocator * Testing alt allocator structure * Fully working generic support, still needs cleanup * Fixed multi-threading bug with generic allocator. * Further updates, object recovery not yet passing. * Fixes to generic object recovery. All tests pass! Cleanup still pending. * Update samples * More cleanup and refactoring * More cleanup and code cutting. * Heavy simplification of user API and interfaces. * Removed InitialValueLength from user callback functions * Updating AddressInfo calculation for AnyCPU 32-bit compat. Other cleanup. * Cleanup of inlining directives * Changing key comparer to use a separate IFasterEqualityComparer interface. Now, there are no interface restrictions or requirements on any of the FASTER key/value types. * Cleanup, simplification, updates. Persistence API cleaned up as well. * Simplifying Guid determination for persistence callback. * Fixing test break in AnyCPU-Debug due to different struct layout. * Fixing the checkpointing directory settings and log creation and initialization code. --- .gitignore | 3 +- cs/FASTER.sln | 112 +- cs/benchmark/ConcurrentDictionaryBenchmark.cs | 22 +- cs/benchmark/FasterYcsbBenchmark.cs | 61 +- cs/benchmark/Functions.cs | 77 + .../Index/UserCode => benchmark}/Input.cs | 9 +- cs/benchmark/Key.cs | 39 + .../Index/UserCode => benchmark}/Output.cs | 10 +- cs/benchmark/Value.cs | 24 + cs/playground/ClassCache/Program.cs | 30 +- cs/playground/ClassCache/Types.cs | 87 +- .../App.config | 0 .../ClassSample.csproj} | 2 +- cs/playground/ClassSample/Program.cs | 175 +++ .../Properties/AssemblyInfo.cs | 0 cs/playground/ManagedSample1/Functions.cs | 93 -- cs/playground/ManagedSample1/ICustomFaster.cs | 157 -- cs/playground/ManagedSample1/KeyStruct.cs | 66 - cs/playground/ManagedSample1/OutputStruct.cs | 16 - cs/playground/ManagedSample1/ValueStruct.cs | 72 - cs/playground/ManagedSample2/Program.cs | 5 +- cs/playground/ManagedSample3/Program.cs | 145 -- cs/playground/ManagedSample4/Program.cs | 157 -- cs/playground/ManagedSampleCore/Functions.cs | 2 +- .../ManagedSampleCore/ICustomFaster.cs | 2 +- .../ManagedSampleCore/InputStruct.cs | 2 +- cs/playground/ManagedSampleCore/KeyStruct.cs | 2 +- .../ManagedSampleCore/OutputStruct.cs | 2 +- cs/playground/ManagedSampleCore/Program.cs | 2 +- .../ManagedSampleCore/ValueStruct.cs | 2 +- .../App.config | 0 .../MixedSample.csproj} | 2 +- cs/playground/MixedSample/Program.cs | 173 +++ .../App.config | 0 cs/playground/StructSample/Functions.cs | 74 + cs/playground/StructSample/Program.cs | 64 + .../Properties/AssemblyInfo.cs | 0 .../StructSample.csproj} | 4 +- cs/playground/StructSample/Types.cs | 42 + cs/playground/StructSampleCore/App.config | 6 + cs/playground/StructSampleCore/Functions.cs | 74 + .../InputStruct.cs | 10 +- cs/playground/StructSampleCore/KeyStruct.cs | 25 + .../StructSampleCore/OutputStruct.cs | 12 + .../Program.cs | 25 +- .../Properties/AssemblyInfo.cs | 22 + .../StructSampleCore/StructSampleCore.csproj | 39 + cs/playground/StructSampleCore/ValueStruct.cs | 16 + cs/playground/SumStore/AdId.cs | 62 - .../SumStore/ConcurrentRecoveryTest.cs | 104 +- cs/playground/SumStore/ConcurrentTest.cs | 57 +- cs/playground/SumStore/Functions.cs | 87 -- cs/playground/SumStore/ICustomFaster.cs | 41 - cs/playground/SumStore/Input.cs | 19 - cs/playground/SumStore/NumClicks.cs | 69 - cs/playground/SumStore/Output.cs | 16 - cs/playground/SumStore/Program.cs | 2 +- .../SumStore/SingleThreadedRecoveryTest.cs | 60 +- cs/playground/SumStore/SumStoreTypes.cs | 104 ++ cs/src/core/Allocator/AllocatorBase.cs | 1350 +++++++++++++++++ cs/src/core/Allocator/AsyncIOContext.cs | 18 +- cs/src/core/Allocator/BlittableAllocator.cs | 270 ++++ cs/src/core/Allocator/GenericAllocator.cs | 837 ++++++++++ cs/src/core/Allocator/IPageHandlers.cs | 55 - cs/src/core/Allocator/PMMAsyncIO.cs | 553 ------- .../core/Allocator/PersistentMemoryMalloc.cs | 846 ----------- cs/src/core/Codegen/CompilerBase.cs | 246 --- .../core/Codegen/FasterHashTableCompiler.cs | 127 -- cs/src/core/Codegen/HashTableManager.cs | 51 - ...MixedBlitManagedFasterHashTableCompiler.cs | 260 ---- cs/src/core/Codegen/RoslynHelpers.cs | 224 --- cs/src/core/Codegen/TypeReplacer.cs | 113 -- cs/src/core/Codegen/TypeReplacerCompiler.cs | 38 - cs/src/core/Codegen/Utilities.cs | 238 --- cs/src/core/Device/Devices.cs | 68 + cs/src/core/Device/IDevice.cs | 10 +- cs/src/core/Device/LocalStorageDevice.cs | 42 +- .../core/Device/ManagedLocalStorageDevice.cs | 45 +- cs/src/core/Device/NullDevice.cs | 2 +- cs/src/core/Device/StorageDeviceBase.cs | 31 +- cs/src/core/Epochs/LightEpoch.cs | 8 +- cs/src/core/FASTER.core.csproj | 22 +- cs/src/core/FASTER.core.debug.nuspec | 9 +- cs/src/core/FASTER.core.nuspec | 9 +- cs/src/core/Index/Common/AddressInfo.cs | 52 +- .../core/Index/Common/CheckpointSettings.cs | 39 + cs/src/core/Index/Common/Contexts.cs | 156 +- cs/src/core/Index/Common/Layout.cs | 71 - cs/src/core/Index/Common/LogSettings.cs | 68 + cs/src/core/Index/Common/RecordInfo.cs | 16 +- cs/src/core/Index/FASTER/AsyncIO.cs | 150 -- cs/src/core/Index/FASTER/Checkpoint.cs | 56 +- cs/src/core/Index/FASTER/FASTER.cs | 273 +--- cs/src/core/Index/FASTER/FASTERImpl.cs | 340 +++-- cs/src/core/Index/FASTER/FASTERThread.cs | 72 +- cs/src/core/Index/FASTER/Recovery.cs | 55 +- .../Interfaces/IFasterEqualityComparer.cs | 26 + .../IFASTER.cs => Interfaces/IFasterKV.cs} | 14 +- cs/src/core/Index/Interfaces/IFunctions.cs | 108 ++ .../Index/Interfaces/IObjectSerializer.cs | 111 ++ cs/src/core/Index/UserCode/Context.cs | 24 - cs/src/core/Index/UserCode/Functions.cs | 156 -- cs/src/core/Index/UserCode/Key.cs | 172 --- cs/src/core/Index/UserCode/Value.cs | 316 ---- .../core/ManagedLayer/BlittableTypeWrapper.cs | 59 - cs/src/core/ManagedLayer/FASTERFactory.cs | 198 --- cs/src/core/ManagedLayer/IFASTERKey.cs | 45 - cs/src/core/ManagedLayer/IFASTERValue.cs | 32 - cs/src/core/ManagedLayer/IFASTER_Mixed.cs | 156 -- cs/src/core/ManagedLayer/IManagedFAST.cs | 159 -- cs/src/core/ManagedLayer/IUserFunctions.cs | 80 - .../core/ManagedLayer/MixedContextWrapper.cs | 44 - .../ManagedLayer/MixedFunctionsWrapper.cs | 139 -- cs/src/core/ManagedLayer/MixedInputWrapper.cs | 57 - cs/src/core/ManagedLayer/MixedKeyWrapper.cs | 135 -- cs/src/core/ManagedLayer/MixedManagedFAST.cs | 502 ------ .../core/ManagedLayer/MixedOutputWrapper.cs | 40 - .../core/ManagedLayer/MixedUnwrappedTypes.cs | 98 -- .../core/ManagedLayer/MixedUserFunctions.cs | 46 - cs/src/core/ManagedLayer/MixedValueWrapper.cs | 347 ----- .../{NativeBufferPool.cs => BufferPool.cs} | 16 +- .../core/Utilities/FasterEqualityComparer.cs | 28 + cs/src/core/Utilities/PageAsyncResultTypes.cs | 12 +- cs/src/core/Utilities/Utility.cs | 47 +- cs/test/BasicDiskFASTERTests.cs | 75 + cs/test/BasicFASTERTests.cs | 36 +- cs/test/ComponentRecoveryTests.cs | 6 +- cs/test/FASTER.test.csproj | 3 +- cs/test/FullRecoveryTests.cs | 86 +- cs/test/LargeObjectTests.cs | 80 +- cs/test/MiscFASTERTests.cs | 107 ++ cs/test/ObjectFASTERTests.cs | 50 +- cs/test/ObjectRecoveryTest.cs | 50 +- cs/test/ObjectRecoveryTestTypes.cs | 102 +- cs/test/ObjectTestTypes.cs | 192 ++- cs/test/RecoveryTestTypes.cs | 207 +-- cs/test/SimpleRecoveryTest.cs | 181 +-- cs/test/SimpleTests.cs | 34 +- cs/test/TestTypes.cs | 220 +-- 139 files changed, 5432 insertions(+), 8569 deletions(-) create mode 100644 cs/benchmark/Functions.cs rename cs/{src/core/Index/UserCode => benchmark}/Input.cs (57%) create mode 100644 cs/benchmark/Key.cs rename cs/{src/core/Index/UserCode => benchmark}/Output.cs (68%) create mode 100644 cs/benchmark/Value.cs rename cs/playground/{ManagedSample3 => ClassSample}/App.config (100%) rename cs/playground/{ManagedSample3/ManagedSample3.csproj => ClassSample/ClassSample.csproj} (96%) create mode 100644 cs/playground/ClassSample/Program.cs rename cs/playground/{ManagedSample3 => ClassSample}/Properties/AssemblyInfo.cs (100%) delete mode 100644 cs/playground/ManagedSample1/Functions.cs delete mode 100644 cs/playground/ManagedSample1/ICustomFaster.cs delete mode 100644 cs/playground/ManagedSample1/KeyStruct.cs delete mode 100644 cs/playground/ManagedSample1/OutputStruct.cs delete mode 100644 cs/playground/ManagedSample1/ValueStruct.cs delete mode 100644 cs/playground/ManagedSample3/Program.cs delete mode 100644 cs/playground/ManagedSample4/Program.cs rename cs/playground/{ManagedSample4 => MixedSample}/App.config (100%) rename cs/playground/{ManagedSample4/ManagedSample4.csproj => MixedSample/MixedSample.csproj} (95%) create mode 100644 cs/playground/MixedSample/Program.cs rename cs/playground/{ManagedSample1 => StructSample}/App.config (100%) create mode 100644 cs/playground/StructSample/Functions.cs create mode 100644 cs/playground/StructSample/Program.cs rename cs/playground/{ManagedSample1 => StructSample}/Properties/AssemblyInfo.cs (100%) rename cs/playground/{ManagedSample1/ManagedSample1.csproj => StructSample/StructSample.csproj} (91%) create mode 100644 cs/playground/StructSample/Types.cs create mode 100644 cs/playground/StructSampleCore/App.config create mode 100644 cs/playground/StructSampleCore/Functions.cs rename cs/playground/{ManagedSample1 => StructSampleCore}/InputStruct.cs (54%) create mode 100644 cs/playground/StructSampleCore/KeyStruct.cs create mode 100644 cs/playground/StructSampleCore/OutputStruct.cs rename cs/playground/{ManagedSample1 => StructSampleCore}/Program.cs (64%) create mode 100644 cs/playground/StructSampleCore/Properties/AssemblyInfo.cs create mode 100644 cs/playground/StructSampleCore/StructSampleCore.csproj create mode 100644 cs/playground/StructSampleCore/ValueStruct.cs delete mode 100644 cs/playground/SumStore/AdId.cs delete mode 100644 cs/playground/SumStore/Functions.cs delete mode 100644 cs/playground/SumStore/ICustomFaster.cs delete mode 100644 cs/playground/SumStore/Input.cs delete mode 100644 cs/playground/SumStore/NumClicks.cs delete mode 100644 cs/playground/SumStore/Output.cs create mode 100644 cs/playground/SumStore/SumStoreTypes.cs create mode 100644 cs/src/core/Allocator/AllocatorBase.cs create mode 100644 cs/src/core/Allocator/BlittableAllocator.cs create mode 100644 cs/src/core/Allocator/GenericAllocator.cs delete mode 100644 cs/src/core/Allocator/IPageHandlers.cs delete mode 100644 cs/src/core/Allocator/PMMAsyncIO.cs delete mode 100644 cs/src/core/Allocator/PersistentMemoryMalloc.cs delete mode 100644 cs/src/core/Codegen/CompilerBase.cs delete mode 100644 cs/src/core/Codegen/FasterHashTableCompiler.cs delete mode 100644 cs/src/core/Codegen/HashTableManager.cs delete mode 100644 cs/src/core/Codegen/MixedBlitManagedFasterHashTableCompiler.cs delete mode 100644 cs/src/core/Codegen/RoslynHelpers.cs delete mode 100644 cs/src/core/Codegen/TypeReplacer.cs delete mode 100644 cs/src/core/Codegen/TypeReplacerCompiler.cs delete mode 100644 cs/src/core/Codegen/Utilities.cs create mode 100644 cs/src/core/Device/Devices.cs create mode 100644 cs/src/core/Index/Common/CheckpointSettings.cs delete mode 100644 cs/src/core/Index/Common/Layout.cs create mode 100644 cs/src/core/Index/Common/LogSettings.cs delete mode 100644 cs/src/core/Index/FASTER/AsyncIO.cs create mode 100644 cs/src/core/Index/Interfaces/IFasterEqualityComparer.cs rename cs/src/core/Index/{FASTER/IFASTER.cs => Interfaces/IFasterKV.cs} (92%) create mode 100644 cs/src/core/Index/Interfaces/IFunctions.cs create mode 100644 cs/src/core/Index/Interfaces/IObjectSerializer.cs delete mode 100644 cs/src/core/Index/UserCode/Context.cs delete mode 100644 cs/src/core/Index/UserCode/Functions.cs delete mode 100644 cs/src/core/Index/UserCode/Key.cs delete mode 100644 cs/src/core/Index/UserCode/Value.cs delete mode 100644 cs/src/core/ManagedLayer/BlittableTypeWrapper.cs delete mode 100644 cs/src/core/ManagedLayer/FASTERFactory.cs delete mode 100644 cs/src/core/ManagedLayer/IFASTERKey.cs delete mode 100644 cs/src/core/ManagedLayer/IFASTERValue.cs delete mode 100644 cs/src/core/ManagedLayer/IFASTER_Mixed.cs delete mode 100644 cs/src/core/ManagedLayer/IManagedFAST.cs delete mode 100644 cs/src/core/ManagedLayer/IUserFunctions.cs delete mode 100644 cs/src/core/ManagedLayer/MixedContextWrapper.cs delete mode 100644 cs/src/core/ManagedLayer/MixedFunctionsWrapper.cs delete mode 100644 cs/src/core/ManagedLayer/MixedInputWrapper.cs delete mode 100644 cs/src/core/ManagedLayer/MixedKeyWrapper.cs delete mode 100644 cs/src/core/ManagedLayer/MixedManagedFAST.cs delete mode 100644 cs/src/core/ManagedLayer/MixedOutputWrapper.cs delete mode 100644 cs/src/core/ManagedLayer/MixedUnwrappedTypes.cs delete mode 100644 cs/src/core/ManagedLayer/MixedUserFunctions.cs delete mode 100644 cs/src/core/ManagedLayer/MixedValueWrapper.cs rename cs/src/core/Utilities/{NativeBufferPool.cs => BufferPool.cs} (91%) create mode 100644 cs/src/core/Utilities/FasterEqualityComparer.cs create mode 100644 cs/test/BasicDiskFASTERTests.cs create mode 100644 cs/test/MiscFASTERTests.cs diff --git a/.gitignore b/.gitignore index 71cf31617..12971f9e1 100644 --- a/.gitignore +++ b/.gitignore @@ -191,4 +191,5 @@ packages/ *.VC.opendb .vs/ *.lib -nativebin/ \ No newline at end of file +nativebin/ +/cs/benchmark/Properties/launchSettings.json diff --git a/cs/FASTER.sln b/cs/FASTER.sln index 9d1efd31d..5ed1397f9 100644 --- a/cs/FASTER.sln +++ b/cs/FASTER.sln @@ -5,35 +5,31 @@ VisualStudioVersion = 15.0.27004.2008 MinimumVisualStudioVersion = 10.0.40219.1 Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "FASTER.benchmark", "benchmark\FASTER.benchmark.csproj", "{33A732D1-2B58-4FEE-9696-B9483496229F}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FASTER.test", "test\FASTER.test.csproj", "{0DC7F5A2-E963-4E7F-BD37-6F7864B726F2}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "FASTER.test", "test\FASTER.test.csproj", "{0DC7F5A2-E963-4E7F-BD37-6F7864B726F2}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "playground", "playground", "{E6026D6A-01C5-4582-B2C1-64751490DABE}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ManagedSample1", "playground\ManagedSample1\ManagedSample1.csproj", "{17BDD0A5-98E5-464A-8A00-050D9FF4C562}" -EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "benchmark", "benchmark", "{CA6AB459-A31A-4C15-B1A6-A82C349B54B4}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{81B3B5D1-70F6-4979-AC76-003F9A6B316B}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ManagedSample2", "playground\ManagedSample2\ManagedSample2.csproj", "{7DB87633-9CAB-4AE4-9ED0-AA6E77448486}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ManagedSample3", "playground\ManagedSample3\ManagedSample3.csproj", "{3E571C7C-59B5-485C-AC78-3F34D3511CD2}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SumStore", "playground\SumStore\SumStore.csproj", "{05D61B37-9714-4234-9961-384A63F7175E}" -EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ManagedSample4", "playground\ManagedSample4\ManagedSample4.csproj", "{E1AC9797-ABE3-4881-A51B-37D8687AAE35}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SumStore", "playground\SumStore\SumStore.csproj", "{05D61B37-9714-4234-9961-384A63F7175E}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ClassCache", "playground\ClassCache\ClassCache.csproj", "{10FD4868-BB16-442B-B0AC-18AE278D9C60}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "NestedTypesTest", "playground\NestedTypesTest\NestedTypesTest.csproj", "{2D5F23F7-3184-43EC-A7F1-C924F7FEF786}" -EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{28800357-C8CE-4CD0-A2AD-D4A910ABB496}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "core", "core", "{9531E3D2-217B-4446-98E8-E48F0FDD1452}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "FASTER.core", "src\core\FASTER.core.csproj", "{F947BC6A-2943-4AC7-ACA7-F17351E25FE7}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ManagedSampleCore", "playground\ManagedSampleCore\ManagedSampleCore.csproj", "{C9391533-1F31-47F6-BE08-9642C95401A8}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "StructSample", "playground\StructSample\StructSample.csproj", "{494703CF-C1C9-4800-B994-EF3974EB051D}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "StructSampleCore", "playground\StructSampleCore\StructSampleCore.csproj", "{D938612F-4B99-409E-953E-28A3A027B0E3}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "ClassSample", "playground\ClassSample\ClassSample.csproj", "{18B6FB88-202F-4DAD-A582-17E1CEB873EC}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "MixedSample", "playground\MixedSample\MixedSample.csproj", "{8B9F682D-145C-4085-AD8A-845255597F5D}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -59,30 +55,6 @@ Global {0DC7F5A2-E963-4E7F-BD37-6F7864B726F2}.Release|Any CPU.Build.0 = Release|Any CPU {0DC7F5A2-E963-4E7F-BD37-6F7864B726F2}.Release|x64.ActiveCfg = Release|x64 {0DC7F5A2-E963-4E7F-BD37-6F7864B726F2}.Release|x64.Build.0 = Release|x64 - {17BDD0A5-98E5-464A-8A00-050D9FF4C562}.Debug|Any CPU.ActiveCfg = Debug|x64 - {17BDD0A5-98E5-464A-8A00-050D9FF4C562}.Debug|Any CPU.Build.0 = Debug|x64 - {17BDD0A5-98E5-464A-8A00-050D9FF4C562}.Debug|x64.ActiveCfg = Debug|x64 - {17BDD0A5-98E5-464A-8A00-050D9FF4C562}.Debug|x64.Build.0 = Debug|x64 - {17BDD0A5-98E5-464A-8A00-050D9FF4C562}.Release|Any CPU.ActiveCfg = Release|x64 - {17BDD0A5-98E5-464A-8A00-050D9FF4C562}.Release|Any CPU.Build.0 = Release|x64 - {17BDD0A5-98E5-464A-8A00-050D9FF4C562}.Release|x64.ActiveCfg = Release|x64 - {17BDD0A5-98E5-464A-8A00-050D9FF4C562}.Release|x64.Build.0 = Release|x64 - {7DB87633-9CAB-4AE4-9ED0-AA6E77448486}.Debug|Any CPU.ActiveCfg = Debug|x64 - {7DB87633-9CAB-4AE4-9ED0-AA6E77448486}.Debug|Any CPU.Build.0 = Debug|x64 - {7DB87633-9CAB-4AE4-9ED0-AA6E77448486}.Debug|x64.ActiveCfg = Debug|x64 - {7DB87633-9CAB-4AE4-9ED0-AA6E77448486}.Debug|x64.Build.0 = Debug|x64 - {7DB87633-9CAB-4AE4-9ED0-AA6E77448486}.Release|Any CPU.ActiveCfg = Release|x64 - {7DB87633-9CAB-4AE4-9ED0-AA6E77448486}.Release|Any CPU.Build.0 = Release|x64 - {7DB87633-9CAB-4AE4-9ED0-AA6E77448486}.Release|x64.ActiveCfg = Release|x64 - {7DB87633-9CAB-4AE4-9ED0-AA6E77448486}.Release|x64.Build.0 = Release|x64 - {3E571C7C-59B5-485C-AC78-3F34D3511CD2}.Debug|Any CPU.ActiveCfg = Debug|x64 - {3E571C7C-59B5-485C-AC78-3F34D3511CD2}.Debug|Any CPU.Build.0 = Debug|x64 - {3E571C7C-59B5-485C-AC78-3F34D3511CD2}.Debug|x64.ActiveCfg = Debug|x64 - {3E571C7C-59B5-485C-AC78-3F34D3511CD2}.Debug|x64.Build.0 = Debug|x64 - {3E571C7C-59B5-485C-AC78-3F34D3511CD2}.Release|Any CPU.ActiveCfg = Release|x64 - {3E571C7C-59B5-485C-AC78-3F34D3511CD2}.Release|Any CPU.Build.0 = Release|x64 - {3E571C7C-59B5-485C-AC78-3F34D3511CD2}.Release|x64.ActiveCfg = Release|x64 - {3E571C7C-59B5-485C-AC78-3F34D3511CD2}.Release|x64.Build.0 = Release|x64 {05D61B37-9714-4234-9961-384A63F7175E}.Debug|Any CPU.ActiveCfg = Debug|x64 {05D61B37-9714-4234-9961-384A63F7175E}.Debug|Any CPU.Build.0 = Debug|x64 {05D61B37-9714-4234-9961-384A63F7175E}.Debug|x64.ActiveCfg = Debug|x64 @@ -91,14 +63,6 @@ Global {05D61B37-9714-4234-9961-384A63F7175E}.Release|Any CPU.Build.0 = Release|x64 {05D61B37-9714-4234-9961-384A63F7175E}.Release|x64.ActiveCfg = Release|x64 {05D61B37-9714-4234-9961-384A63F7175E}.Release|x64.Build.0 = Release|x64 - {E1AC9797-ABE3-4881-A51B-37D8687AAE35}.Debug|Any CPU.ActiveCfg = Debug|x64 - {E1AC9797-ABE3-4881-A51B-37D8687AAE35}.Debug|Any CPU.Build.0 = Debug|x64 - {E1AC9797-ABE3-4881-A51B-37D8687AAE35}.Debug|x64.ActiveCfg = Debug|x64 - {E1AC9797-ABE3-4881-A51B-37D8687AAE35}.Debug|x64.Build.0 = Debug|x64 - {E1AC9797-ABE3-4881-A51B-37D8687AAE35}.Release|Any CPU.ActiveCfg = Release|x64 - {E1AC9797-ABE3-4881-A51B-37D8687AAE35}.Release|Any CPU.Build.0 = Release|x64 - {E1AC9797-ABE3-4881-A51B-37D8687AAE35}.Release|x64.ActiveCfg = Release|x64 - {E1AC9797-ABE3-4881-A51B-37D8687AAE35}.Release|x64.Build.0 = Release|x64 {10FD4868-BB16-442B-B0AC-18AE278D9C60}.Debug|Any CPU.ActiveCfg = Debug|x64 {10FD4868-BB16-442B-B0AC-18AE278D9C60}.Debug|Any CPU.Build.0 = Debug|x64 {10FD4868-BB16-442B-B0AC-18AE278D9C60}.Debug|x64.ActiveCfg = Debug|x64 @@ -107,14 +71,6 @@ Global {10FD4868-BB16-442B-B0AC-18AE278D9C60}.Release|Any CPU.Build.0 = Release|x64 {10FD4868-BB16-442B-B0AC-18AE278D9C60}.Release|x64.ActiveCfg = Release|x64 {10FD4868-BB16-442B-B0AC-18AE278D9C60}.Release|x64.Build.0 = Release|x64 - {2D5F23F7-3184-43EC-A7F1-C924F7FEF786}.Debug|Any CPU.ActiveCfg = Debug|x64 - {2D5F23F7-3184-43EC-A7F1-C924F7FEF786}.Debug|Any CPU.Build.0 = Debug|x64 - {2D5F23F7-3184-43EC-A7F1-C924F7FEF786}.Debug|x64.ActiveCfg = Debug|x64 - {2D5F23F7-3184-43EC-A7F1-C924F7FEF786}.Debug|x64.Build.0 = Debug|x64 - {2D5F23F7-3184-43EC-A7F1-C924F7FEF786}.Release|Any CPU.ActiveCfg = Release|x64 - {2D5F23F7-3184-43EC-A7F1-C924F7FEF786}.Release|Any CPU.Build.0 = Release|x64 - {2D5F23F7-3184-43EC-A7F1-C924F7FEF786}.Release|x64.ActiveCfg = Release|x64 - {2D5F23F7-3184-43EC-A7F1-C924F7FEF786}.Release|x64.Build.0 = Release|x64 {F947BC6A-2943-4AC7-ACA7-F17351E25FE7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {F947BC6A-2943-4AC7-ACA7-F17351E25FE7}.Debug|Any CPU.Build.0 = Debug|Any CPU {F947BC6A-2943-4AC7-ACA7-F17351E25FE7}.Debug|x64.ActiveCfg = Debug|x64 @@ -123,14 +79,38 @@ Global {F947BC6A-2943-4AC7-ACA7-F17351E25FE7}.Release|Any CPU.Build.0 = Release|Any CPU {F947BC6A-2943-4AC7-ACA7-F17351E25FE7}.Release|x64.ActiveCfg = Release|x64 {F947BC6A-2943-4AC7-ACA7-F17351E25FE7}.Release|x64.Build.0 = Release|x64 - {C9391533-1F31-47F6-BE08-9642C95401A8}.Debug|Any CPU.ActiveCfg = Debug|x64 - {C9391533-1F31-47F6-BE08-9642C95401A8}.Debug|Any CPU.Build.0 = Debug|x64 - {C9391533-1F31-47F6-BE08-9642C95401A8}.Debug|x64.ActiveCfg = Debug|x64 - {C9391533-1F31-47F6-BE08-9642C95401A8}.Debug|x64.Build.0 = Debug|x64 - {C9391533-1F31-47F6-BE08-9642C95401A8}.Release|Any CPU.ActiveCfg = Release|x64 - {C9391533-1F31-47F6-BE08-9642C95401A8}.Release|Any CPU.Build.0 = Release|x64 - {C9391533-1F31-47F6-BE08-9642C95401A8}.Release|x64.ActiveCfg = Release|x64 - {C9391533-1F31-47F6-BE08-9642C95401A8}.Release|x64.Build.0 = Release|x64 + {494703CF-C1C9-4800-B994-EF3974EB051D}.Debug|Any CPU.ActiveCfg = Debug|x64 + {494703CF-C1C9-4800-B994-EF3974EB051D}.Debug|Any CPU.Build.0 = Debug|x64 + {494703CF-C1C9-4800-B994-EF3974EB051D}.Debug|x64.ActiveCfg = Debug|x64 + {494703CF-C1C9-4800-B994-EF3974EB051D}.Debug|x64.Build.0 = Debug|x64 + {494703CF-C1C9-4800-B994-EF3974EB051D}.Release|Any CPU.ActiveCfg = Release|x64 + {494703CF-C1C9-4800-B994-EF3974EB051D}.Release|Any CPU.Build.0 = Release|x64 + {494703CF-C1C9-4800-B994-EF3974EB051D}.Release|x64.ActiveCfg = Release|x64 + {494703CF-C1C9-4800-B994-EF3974EB051D}.Release|x64.Build.0 = Release|x64 + {D938612F-4B99-409E-953E-28A3A027B0E3}.Debug|Any CPU.ActiveCfg = Debug|x64 + {D938612F-4B99-409E-953E-28A3A027B0E3}.Debug|Any CPU.Build.0 = Debug|x64 + {D938612F-4B99-409E-953E-28A3A027B0E3}.Debug|x64.ActiveCfg = Debug|x64 + {D938612F-4B99-409E-953E-28A3A027B0E3}.Debug|x64.Build.0 = Debug|x64 + {D938612F-4B99-409E-953E-28A3A027B0E3}.Release|Any CPU.ActiveCfg = Release|x64 + {D938612F-4B99-409E-953E-28A3A027B0E3}.Release|Any CPU.Build.0 = Release|x64 + {D938612F-4B99-409E-953E-28A3A027B0E3}.Release|x64.ActiveCfg = Release|x64 + {D938612F-4B99-409E-953E-28A3A027B0E3}.Release|x64.Build.0 = Release|x64 + {18B6FB88-202F-4DAD-A582-17E1CEB873EC}.Debug|Any CPU.ActiveCfg = Debug|x64 + {18B6FB88-202F-4DAD-A582-17E1CEB873EC}.Debug|Any CPU.Build.0 = Debug|x64 + {18B6FB88-202F-4DAD-A582-17E1CEB873EC}.Debug|x64.ActiveCfg = Debug|x64 + {18B6FB88-202F-4DAD-A582-17E1CEB873EC}.Debug|x64.Build.0 = Debug|x64 + {18B6FB88-202F-4DAD-A582-17E1CEB873EC}.Release|Any CPU.ActiveCfg = Release|x64 + {18B6FB88-202F-4DAD-A582-17E1CEB873EC}.Release|Any CPU.Build.0 = Release|x64 + {18B6FB88-202F-4DAD-A582-17E1CEB873EC}.Release|x64.ActiveCfg = Release|x64 + {18B6FB88-202F-4DAD-A582-17E1CEB873EC}.Release|x64.Build.0 = Release|x64 + {8B9F682D-145C-4085-AD8A-845255597F5D}.Debug|Any CPU.ActiveCfg = Debug|x64 + {8B9F682D-145C-4085-AD8A-845255597F5D}.Debug|Any CPU.Build.0 = Debug|x64 + {8B9F682D-145C-4085-AD8A-845255597F5D}.Debug|x64.ActiveCfg = Debug|x64 + {8B9F682D-145C-4085-AD8A-845255597F5D}.Debug|x64.Build.0 = Debug|x64 + {8B9F682D-145C-4085-AD8A-845255597F5D}.Release|Any CPU.ActiveCfg = Release|x64 + {8B9F682D-145C-4085-AD8A-845255597F5D}.Release|Any CPU.Build.0 = Release|x64 + {8B9F682D-145C-4085-AD8A-845255597F5D}.Release|x64.ActiveCfg = Release|x64 + {8B9F682D-145C-4085-AD8A-845255597F5D}.Release|x64.Build.0 = Release|x64 EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -138,16 +118,14 @@ Global GlobalSection(NestedProjects) = preSolution {33A732D1-2B58-4FEE-9696-B9483496229F} = {CA6AB459-A31A-4C15-B1A6-A82C349B54B4} {0DC7F5A2-E963-4E7F-BD37-6F7864B726F2} = {81B3B5D1-70F6-4979-AC76-003F9A6B316B} - {17BDD0A5-98E5-464A-8A00-050D9FF4C562} = {E6026D6A-01C5-4582-B2C1-64751490DABE} - {7DB87633-9CAB-4AE4-9ED0-AA6E77448486} = {E6026D6A-01C5-4582-B2C1-64751490DABE} - {3E571C7C-59B5-485C-AC78-3F34D3511CD2} = {E6026D6A-01C5-4582-B2C1-64751490DABE} {05D61B37-9714-4234-9961-384A63F7175E} = {E6026D6A-01C5-4582-B2C1-64751490DABE} - {E1AC9797-ABE3-4881-A51B-37D8687AAE35} = {E6026D6A-01C5-4582-B2C1-64751490DABE} {10FD4868-BB16-442B-B0AC-18AE278D9C60} = {E6026D6A-01C5-4582-B2C1-64751490DABE} - {2D5F23F7-3184-43EC-A7F1-C924F7FEF786} = {E6026D6A-01C5-4582-B2C1-64751490DABE} {9531E3D2-217B-4446-98E8-E48F0FDD1452} = {28800357-C8CE-4CD0-A2AD-D4A910ABB496} {F947BC6A-2943-4AC7-ACA7-F17351E25FE7} = {9531E3D2-217B-4446-98E8-E48F0FDD1452} - {C9391533-1F31-47F6-BE08-9642C95401A8} = {E6026D6A-01C5-4582-B2C1-64751490DABE} + {494703CF-C1C9-4800-B994-EF3974EB051D} = {E6026D6A-01C5-4582-B2C1-64751490DABE} + {D938612F-4B99-409E-953E-28A3A027B0E3} = {E6026D6A-01C5-4582-B2C1-64751490DABE} + {18B6FB88-202F-4DAD-A582-17E1CEB873EC} = {E6026D6A-01C5-4582-B2C1-64751490DABE} + {8B9F682D-145C-4085-AD8A-845255597F5D} = {E6026D6A-01C5-4582-B2C1-64751490DABE} EndGlobalSection GlobalSection(ExtensibilityGlobals) = postSolution SolutionGuid = {A0750637-2CCB-4139-B25E-F2CE740DCFAC} diff --git a/cs/benchmark/ConcurrentDictionaryBenchmark.cs b/cs/benchmark/ConcurrentDictionaryBenchmark.cs index 29016d137..d2d3bd6dd 100644 --- a/cs/benchmark/ConcurrentDictionaryBenchmark.cs +++ b/cs/benchmark/ConcurrentDictionaryBenchmark.cs @@ -4,7 +4,6 @@ #pragma warning disable 0162 //#define DASHBOARD -//#define USE_CODEGEN using FASTER.core; using System; @@ -55,7 +54,6 @@ public enum Op : ulong Key[] init_keys_; Key[] txn_keys_; - Key* txn_keys_ptr; long idx_ = 0; @@ -135,9 +133,7 @@ private void RunYcsb(int thread_idx) chunk_idx = Interlocked.Add(ref idx_, kChunkSize) - kChunkSize; } - var local_txn_keys_ptr = txn_keys_ptr + chunk_idx; - - for (long idx = chunk_idx; idx < chunk_idx + kChunkSize && !done; ++idx, ++local_txn_keys_ptr) + for (long idx = chunk_idx; idx < chunk_idx + kChunkSize && !done; ++idx) { Op op; int r = (int)rng.Generate(100); @@ -152,13 +148,13 @@ private void RunYcsb(int thread_idx) { case Op.Upsert: { - store[*local_txn_keys_ptr] = value; + store[txn_keys_[idx]] = value; ++writes_done; break; } case Op.Read: { - if (store.TryGetValue(*local_txn_keys_ptr, out value)) + if (store.TryGetValue(txn_keys_[idx], out value)) { ++reads_done; } @@ -166,7 +162,7 @@ private void RunYcsb(int thread_idx) } case Op.ReadModifyWrite: { - store.AddOrUpdate(*local_txn_keys_ptr, *(Value*)(input_ptr + (idx & 0x7)), (k, v) => new Value { value = v.value + (input_ptr + (idx & 0x7))->value }); + store.AddOrUpdate(txn_keys_[idx], *(Value*)(input_ptr + (idx & 0x7)), (k, v) => new Value { value = v.value + (input_ptr + (idx & 0x7))->value }); ++writes_done; break; } @@ -434,9 +430,9 @@ private void LoadDataFromFile(string filePath) { stream.Position = offset; int size = stream.Read(chunk, 0, kFileChunkSize); - for (int idx = 0; idx < size; idx += Key.kSizeInBytes) + for (int idx = 0; idx < size; idx += 8) { - init_keys_[count] = *((Key*)(chunk_ptr + idx)); + init_keys_[count].value = *(long*)(chunk_ptr + idx); ++count; } if (size == kFileChunkSize) @@ -466,8 +462,6 @@ private void LoadDataFromFile(string filePath) Console.WriteLine("loading txns from " + txn_filename + " into memory..."); txn_keys_ = new Key[kTxnCount]; - GCHandle handle2 = GCHandle.Alloc(txn_keys_, GCHandleType.Pinned); - txn_keys_ptr = (Key*)handle2.AddrOfPinnedObject(); count = 0; long offset = 0; @@ -476,7 +470,7 @@ private void LoadDataFromFile(string filePath) { stream.Position = offset; int size = stream.Read(chunk, 0, kFileChunkSize); - for (int idx = 0; idx < size; idx += Key.kSizeInBytes) + for (int idx = 0; idx < size; idx += 8) { txn_keys_[count] = *((Key*)(chunk_ptr + idx)); ++count; @@ -545,8 +539,6 @@ private void LoadSyntheticData() RandomGenerator generator = new RandomGenerator(); txn_keys_ = new Key[kTxnCount]; - GCHandle handle2 = GCHandle.Alloc(txn_keys_, GCHandleType.Pinned); - txn_keys_ptr = (Key*)handle2.AddrOfPinnedObject(); for (int idx = 0; idx < kTxnCount; idx++) { diff --git a/cs/benchmark/FasterYcsbBenchmark.cs b/cs/benchmark/FasterYcsbBenchmark.cs index 2824d182f..14eceba93 100644 --- a/cs/benchmark/FasterYcsbBenchmark.cs +++ b/cs/benchmark/FasterYcsbBenchmark.cs @@ -4,7 +4,6 @@ #pragma warning disable 0162 //#define DASHBOARD -//#define USE_CODEGEN using FASTER.core; using System; @@ -16,7 +15,7 @@ namespace FASTER.benchmark { - public unsafe class FASTER_YcsbBenchmark + public class FASTER_YcsbBenchmark { public enum Op : ulong { @@ -25,7 +24,7 @@ public enum Op : ulong ReadModifyWrite = 2 } - const bool kUseSyntheticData = true; + const bool kUseSyntheticData = false; const bool kUseSmallData = false; const long kInitCount = kUseSmallData ? 2500480 : 250000000; const long kTxnCount = kUseSmallData ? 10000000 : 1000000000; @@ -37,20 +36,13 @@ public enum Op : ulong Key[] init_keys_; Key[] txn_keys_; - Key* txn_keys_ptr; long idx_ = 0; Input[] input_; - Input* input_ptr; readonly IDevice device; -#if USE_CODEGEN - IFasterKV -#else - FasterKV -#endif - store; + FasterKV store; long total_ops_done = 0; @@ -86,14 +78,10 @@ public FASTER_YcsbBenchmark(int threadCount_, int numaStyle_, string distributio freq = Stopwatch.Frequency; #endif - device = FasterFactory.CreateLogDevice("C:\\data\\hlog"); + device = Devices.CreateLogDevice("C:\\data\\hlog"); -#if USE_CODEGEN - store = FasterFactory.Create -#else - store = new FasterKV -#endif - (kMaxKey / 2, new LogSettings { LogDevice = device }); + store = new FasterKV + (kMaxKey / 2, new Functions(), new LogSettings { LogDevice = device }); } private void RunYcsb(int thread_idx) @@ -108,7 +96,11 @@ private void RunYcsb(int thread_idx) Stopwatch sw = new Stopwatch(); sw.Start(); + Value value = default(Value); + Input input = default(Input); + Output output = default(Output); + long reads_done = 0; long writes_done = 0; @@ -131,9 +123,7 @@ private void RunYcsb(int thread_idx) chunk_idx = Interlocked.Add(ref idx_, kChunkSize) - kChunkSize; } - var local_txn_keys_ptr = txn_keys_ptr + chunk_idx; - - for (long idx = chunk_idx; idx < chunk_idx + kChunkSize && !done; ++idx, ++local_txn_keys_ptr) + for (long idx = chunk_idx; idx < chunk_idx + kChunkSize && !done; ++idx) { Op op; int r = (int)rng.Generate(100); @@ -158,13 +148,13 @@ private void RunYcsb(int thread_idx) { case Op.Upsert: { - store.Upsert(local_txn_keys_ptr, &value, null, 1); + store.Upsert(ref txn_keys_[idx], ref value, Empty.Default, 1); ++writes_done; break; } case Op.Read: { - Status result = store.Read(local_txn_keys_ptr, null, (Output*)&value, null, 1); + Status result = store.Read(ref txn_keys_[idx], ref input, ref output, Empty.Default, 1); if (result == Status.OK) { ++reads_done; @@ -173,7 +163,7 @@ private void RunYcsb(int thread_idx) } case Op.ReadModifyWrite: { - Status result = store.RMW(local_txn_keys_ptr, input_ptr + (idx & 0x7), null, 1); + Status result = store.RMW(ref txn_keys_[idx], ref input_[idx & 0x7], Empty.Default, 1); if (result == Status.OK) { ++writes_done; @@ -213,17 +203,15 @@ private void RunYcsb(int thread_idx) public unsafe void Run() { + Native32.AffinitizeThreadShardedNuma((uint)0, 2); + RandomGenerator rng = new RandomGenerator(); LoadData(); input_ = new Input[8]; for (int i = 0; i < 8; i++) - { input_[i].value = i; - } - GCHandle handle = GCHandle.Alloc(input_, GCHandleType.Pinned); - input_ptr = (Input*)handle.AddrOfPinnedObject(); #if DASHBOARD var dash = new Thread(() => DoContinuousMeasurements()); @@ -351,8 +339,7 @@ private void SetupYcsb(int thread_idx) } } - Key key = init_keys_[idx]; - store.Upsert(&key, &value, null, 1); + store.Upsert(ref init_keys_[idx], ref value, Empty.Default, 1); } #if DASHBOARD count += (int)kChunkSize; @@ -448,7 +435,7 @@ void DoContinuousMeasurements() #region Load Data - private void LoadDataFromFile(string filePath) + private unsafe void LoadDataFromFile(string filePath) { string init_filename = filePath + "\\load_" + distribution + "_250M_raw.dat"; string txn_filename = filePath + "\\run_" + distribution + "_250M_1000M_raw.dat"; @@ -470,9 +457,9 @@ private void LoadDataFromFile(string filePath) { stream.Position = offset; int size = stream.Read(chunk, 0, kFileChunkSize); - for (int idx = 0; idx < size; idx += Key.kSizeInBytes) + for (int idx = 0; idx < size; idx += 8) { - init_keys_[count] = *((Key*)(chunk_ptr + idx)); + init_keys_[count].value = *(long*)(chunk_ptr + idx); ++count; } if (size == kFileChunkSize) @@ -502,8 +489,6 @@ private void LoadDataFromFile(string filePath) Console.WriteLine("loading txns from " + txn_filename + " into memory..."); txn_keys_ = new Key[kTxnCount]; - GCHandle handle2 = GCHandle.Alloc(txn_keys_, GCHandleType.Pinned); - txn_keys_ptr = (Key*)handle2.AddrOfPinnedObject(); count = 0; long offset = 0; @@ -512,9 +497,9 @@ private void LoadDataFromFile(string filePath) { stream.Position = offset; int size = stream.Read(chunk, 0, kFileChunkSize); - for (int idx = 0; idx < size; idx += Key.kSizeInBytes) + for (int idx = 0; idx < size; idx += 8) { - txn_keys_[count] = *((Key*)(chunk_ptr + idx)); + txn_keys_[count].value = *(long*)(chunk_ptr + idx); ++count; } if (size == kFileChunkSize) @@ -581,8 +566,6 @@ private void LoadSyntheticData() RandomGenerator generator = new RandomGenerator(); txn_keys_ = new Key[kTxnCount]; - GCHandle handle2 = GCHandle.Alloc(txn_keys_, GCHandleType.Pinned); - txn_keys_ptr = (Key*)handle2.AddrOfPinnedObject(); for (int idx = 0; idx < kTxnCount; idx++) { diff --git a/cs/benchmark/Functions.cs b/cs/benchmark/Functions.cs new file mode 100644 index 000000000..65fc6e1b2 --- /dev/null +++ b/cs/benchmark/Functions.cs @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +#pragma warning disable 1591 + +using System; +using System.Runtime.CompilerServices; +using System.Diagnostics; +using FASTER.core; + +namespace FASTER.benchmark +{ + public struct Functions : IFunctions + { + public void RMWCompletionCallback(ref Key key, ref Input input, Empty ctx, Status status) + { + } + + public void ReadCompletionCallback(ref Key key, ref Input input, ref Output output, Empty ctx, Status status) + { + } + + public void UpsertCompletionCallback(ref Key key, ref Value value, Empty ctx) + { + } + + public void CheckpointCompletionCallback(Guid sessionId, long serialNum) + { + Debug.WriteLine("Session {0} reports persistence until {1}", sessionId, serialNum); + } + + // Read functions + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void SingleReader(ref Key key, ref Input input, ref Value value, ref Output dst) + { + dst.value = value; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void ConcurrentReader(ref Key key, ref Input input, ref Value value, ref Output dst) + { + dst.value = value; + } + + // Upsert functions + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void SingleWriter(ref Key key, ref Value src, ref Value dst) + { + dst = src; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void ConcurrentWriter(ref Key key, ref Value src, ref Value dst) + { + dst = src; + } + + // RMW functions + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void InitialUpdater(ref Key key, ref Input input, ref Value value) + { + value.value = input.value; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void InPlaceUpdater(ref Key key, ref Input input, ref Value value) + { + value.value += input.value; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void CopyUpdater(ref Key key, ref Input input, ref Value oldValue, ref Value newValue) + { + newValue.value = input.value + oldValue.value; + } + } +} diff --git a/cs/src/core/Index/UserCode/Input.cs b/cs/benchmark/Input.cs similarity index 57% rename from cs/src/core/Index/UserCode/Input.cs rename to cs/benchmark/Input.cs index b7be877f0..0c87ba36a 100644 --- a/cs/src/core/Index/UserCode/Input.cs +++ b/cs/benchmark/Input.cs @@ -5,15 +5,10 @@ using System.Runtime.CompilerServices; -namespace FASTER.core +namespace FASTER.benchmark { - public unsafe struct Input + public struct Input { public long value; - - public static Input* MoveToContext(Input* input) - { - return input; - } } } diff --git a/cs/benchmark/Key.cs b/cs/benchmark/Key.cs new file mode 100644 index 000000000..6f1ac727f --- /dev/null +++ b/cs/benchmark/Key.cs @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +#pragma warning disable 1591 + +using System; +using System.IO; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using FASTER.core; + +namespace FASTER.benchmark +{ + [StructLayout(LayoutKind.Explicit, Size = 8)] + public struct Key : IFasterEqualityComparer + { + [FieldOffset(0)] + public long value; + + + public override string ToString() + { + return "{ " + value + " }"; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public long GetHashCode64(ref Key k) + { + return Utility.GetHashCode(k.value); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool Equals(ref Key k1, ref Key k2) + { + return k1.value == k2.value; + } + + } +} diff --git a/cs/src/core/Index/UserCode/Output.cs b/cs/benchmark/Output.cs similarity index 68% rename from cs/src/core/Index/UserCode/Output.cs rename to cs/benchmark/Output.cs index 479516be6..5ba8d8a4f 100644 --- a/cs/src/core/Index/UserCode/Output.cs +++ b/cs/benchmark/Output.cs @@ -8,18 +8,12 @@ using System.Runtime.InteropServices; using System.Threading; -namespace FASTER.core +namespace FASTER.benchmark { [StructLayout(LayoutKind.Explicit)] - public unsafe struct Output + public struct Output { [FieldOffset(0)] public Value value; - - public static Output* MoveToContext(Output* value) - { - return value; - } - } } diff --git a/cs/benchmark/Value.cs b/cs/benchmark/Value.cs new file mode 100644 index 000000000..be373764d --- /dev/null +++ b/cs/benchmark/Value.cs @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +#pragma warning disable 1591 + +#define EIGHT_BYTE_VALUE +//#define FIXED_SIZE_VALUE +//#define FIXED_SIZE_VALUE_WITH_LOCK + +using System; +using System.IO; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using FASTER.core; + +namespace FASTER.benchmark +{ + [StructLayout(LayoutKind.Explicit, Size = 8)] + public struct Value + { + [FieldOffset(0)] + public long value; + } +} diff --git a/cs/playground/ClassCache/Program.cs b/cs/playground/ClassCache/Program.cs index 90b5bd813..8c032102b 100644 --- a/cs/playground/ClassCache/Program.cs +++ b/cs/playground/ClassCache/Program.cs @@ -16,11 +16,15 @@ class Program { static void Main(string[] args) { - var log = FasterFactory.CreateLogDevice(Path.GetTempPath() + "hybridlog"); - var objlog = FasterFactory.CreateObjectLogDevice(Path.GetTempPath() + "hybridlog"); - var h = FasterFactory.Create - - (1L << 20, new CacheFunctions(), new LogSettings { LogDevice = log, ObjectLogDevice = objlog }); + var context = default(CacheContext); + + var log = Devices.CreateLogDevice(Path.GetTempPath() + "hybridlog"); + var objlog = Devices.CreateObjectLogDevice(Path.GetTempPath() + "hybridlog"); + var h = new FasterKV + ( + 1L << 20, new CacheFunctions(), + new LogSettings { LogDevice = log, ObjectLogDevice = objlog } + ); h.StartSession(); @@ -39,8 +43,9 @@ static void Main(string[] args) Console.WriteLine($"{i}: {workingSet / 1048576}M"); } } - - h.Upsert(new CacheKey(i), new CacheValue(i), default(CacheContext), 0); + var key = new CacheKey(i); + var value = new CacheValue(i); + h.Upsert(ref key, ref value, context, 0); } sw.Stop(); Console.WriteLine("Total time to upsert {0} elements: {1:0.000} secs ({2:0.00} inserts/sec)", max, sw.ElapsedMilliseconds/1000.0, max / (sw.ElapsedMilliseconds / 1000.0)); @@ -51,14 +56,15 @@ static void Main(string[] args) var rnd = new Random(); int statusPending = 0; - var o = new CacheOutput(); - + var output = new CacheOutput(); + var input = default(CacheInput); sw.Restart(); for (int i = 0; i < max; i++) { - long key = rnd.Next(max); + long k = rnd.Next(max); - var status = h.Read(new CacheKey(key), default(CacheInput), ref o, default(CacheContext), 0); + var key = new CacheKey(k); + var status = h.Read(ref key, ref input, ref output, context, 0); switch (status) { @@ -68,7 +74,7 @@ static void Main(string[] args) case Status.ERROR: throw new Exception("Error!"); } - if (o.value.value != key) + if (output.value.value != key.key) throw new Exception("Read error!"); } sw.Stop(); diff --git a/cs/playground/ClassCache/Types.cs b/cs/playground/ClassCache/Types.cs index daef18210..d8024bbaa 100644 --- a/cs/playground/ClassCache/Types.cs +++ b/cs/playground/ClassCache/Types.cs @@ -11,7 +11,7 @@ namespace ClassCache { - public class CacheKey : IFasterKey + public class CacheKey : IFasterEqualityComparer { public long key; @@ -22,33 +22,30 @@ public CacheKey(long first) key = first; } - public bool Equals(CacheKey other) + public long GetHashCode64(ref CacheKey key) { - return key == other.key; + return Utility.GetHashCode(key.key); } - - public long GetHashCode64() + public bool Equals(ref CacheKey k1, ref CacheKey k2) { - return Utility.GetHashCode(key); - } - - public CacheKey Clone() - { - return this; + return k1.key == k2.key; } + } - public void Deserialize(Stream fromStream) + public class CacheKeySerializer : BinaryObjectSerializer + { + public override void Deserialize(ref CacheKey obj) { - throw new NotImplementedException(); + obj.key = reader.ReadInt64(); } - public void Serialize(Stream toStream) + public override void Serialize(ref CacheKey obj) { - throw new NotImplementedException(); + writer.Write(obj.key); } } - public class CacheValue : IFasterValue + public class CacheValue { public long value; @@ -58,20 +55,18 @@ public CacheValue(long first) { value = first; } + } - public CacheValue Clone() - { - return this; - } - - public void Deserialize(Stream fromStream) + public class CacheValueSerializer : BinaryObjectSerializer + { + public override void Deserialize(ref CacheValue obj) { - throw new NotImplementedException(); + obj.value = reader.ReadInt64(); } - public void Serialize(Stream toStream) + public override void Serialize(ref CacheValue obj) { - throw new NotImplementedException(); + writer.Write(obj.value); } } @@ -88,35 +83,61 @@ public struct CacheContext { } - public class CacheFunctions : IUserFunctions + public class CacheFunctions : IFunctions { - public void CopyUpdater(CacheKey key, CacheInput input, CacheValue oldValue, ref CacheValue newValue) + public void ConcurrentReader(ref CacheKey key, ref CacheInput input, ref CacheValue value, ref CacheOutput dst) { + dst.value = value; } - public void InitialUpdater(CacheKey key, CacheInput input, ref CacheValue value) + public void ConcurrentWriter(ref CacheKey key, ref CacheValue src, ref CacheValue dst) { + dst = src; } - public void InPlaceUpdater(CacheKey key, CacheInput input, ref CacheValue value) + public void CopyUpdater(ref CacheKey key, ref CacheInput input, ref CacheValue oldValue, ref CacheValue newValue) { + throw new NotImplementedException(); } - public void ReadCompletionCallback(CacheContext ctx, CacheOutput output, Status status) + public void InitialUpdater(ref CacheKey key, ref CacheInput input, ref CacheValue value) { + throw new NotImplementedException(); } - public void Reader(CacheKey key, CacheInput input, CacheValue value, ref CacheOutput dst) + public void InPlaceUpdater(ref CacheKey key, ref CacheInput input, ref CacheValue value) + { + throw new NotImplementedException(); + } + + public void CheckpointCompletionCallback(Guid sessionId, long serialNum) + { + throw new NotImplementedException(); + } + + public void ReadCompletionCallback(ref CacheKey key, ref CacheInput input, ref CacheOutput output, CacheContext ctx, Status status) + { + throw new NotImplementedException(); + } + + public void RMWCompletionCallback(ref CacheKey key, ref CacheInput input, CacheContext ctx, Status status) + { + throw new NotImplementedException(); + } + + public void SingleReader(ref CacheKey key, ref CacheInput input, ref CacheValue value, ref CacheOutput dst) { dst.value = value; } - public void RMWCompletionCallback(CacheContext ctx, Status status) + public void SingleWriter(ref CacheKey key, ref CacheValue src, ref CacheValue dst) { + dst = src; } - public void UpsertCompletionCallback(CacheContext ctx) + public void UpsertCompletionCallback(ref CacheKey key, ref CacheValue value, CacheContext ctx) { + throw new NotImplementedException(); } } } diff --git a/cs/playground/ManagedSample3/App.config b/cs/playground/ClassSample/App.config similarity index 100% rename from cs/playground/ManagedSample3/App.config rename to cs/playground/ClassSample/App.config diff --git a/cs/playground/ManagedSample3/ManagedSample3.csproj b/cs/playground/ClassSample/ClassSample.csproj similarity index 96% rename from cs/playground/ManagedSample3/ManagedSample3.csproj rename to cs/playground/ClassSample/ClassSample.csproj index 93573b261..8294ee20a 100644 --- a/cs/playground/ManagedSample3/ManagedSample3.csproj +++ b/cs/playground/ClassSample/ClassSample.csproj @@ -9,7 +9,7 @@ Exe true - ManagedSample3 + ClassSample prompt PackageReference true diff --git a/cs/playground/ClassSample/Program.cs b/cs/playground/ClassSample/Program.cs new file mode 100644 index 000000000..f2972fcc9 --- /dev/null +++ b/cs/playground/ClassSample/Program.cs @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using FASTER.core; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace ClassSample +{ + public class MyKey : IFasterEqualityComparer + { + public int key; + + public long GetHashCode64(ref MyKey key) + { + return Utility.GetHashCode(key.key); + } + + public bool Equals(ref MyKey key1, ref MyKey key2) + { + return key1.key == key2.key; + } + } + + public class MyKeySerializer : BinaryObjectSerializer + { + public override void Serialize(ref MyKey key) + { + writer.Write(key.key); + } + + public override void Deserialize(ref MyKey key) + { + key.key = reader.ReadInt32(); + } + } + + + public class MyValue + { + public int value; + } + + public class MyValueSerializer : BinaryObjectSerializer + { + public override void Serialize(ref MyValue value) + { + writer.Write(value.value); + } + + public override void Deserialize(ref MyValue value) + { + value.value = reader.ReadInt32(); + } + } + + public class MyInput + { + } + + public class MyOutput + { + public MyValue value; + } + + + public class MyContext + { + } + + public class MyFunctions : IFunctions + { + public void ConcurrentReader(ref MyKey key, ref MyInput input, ref MyValue value, ref MyOutput dst) + { + throw new NotImplementedException(); + } + + public void ConcurrentWriter(ref MyKey key, ref MyValue src, ref MyValue dst) + { + throw new NotImplementedException(); + } + + public void CopyUpdater(ref MyKey key, ref MyInput input, ref MyValue oldValue, ref MyValue newValue) + { + throw new NotImplementedException(); + } + + public void InitialUpdater(ref MyKey key, ref MyInput input, ref MyValue value) + { + throw new NotImplementedException(); + } + + public void InPlaceUpdater(ref MyKey key, ref MyInput input, ref MyValue value) + { + throw new NotImplementedException(); + } + + public void CheckpointCompletionCallback(Guid sessionId, long serialNum) + { + throw new NotImplementedException(); + } + + public void ReadCompletionCallback(ref MyKey key, ref MyInput input, ref MyOutput output, MyContext ctx, Status status) + { + throw new NotImplementedException(); + } + + public void RMWCompletionCallback(ref MyKey key, ref MyInput input, MyContext ctx, Status status) + { + throw new NotImplementedException(); + } + + public void SingleReader(ref MyKey key, ref MyInput input, ref MyValue value, ref MyOutput dst) + { + throw new NotImplementedException(); + } + + public void SingleWriter(ref MyKey key, ref MyValue src, ref MyValue dst) + { + throw new NotImplementedException(); + } + + public void UpsertCompletionCallback(ref MyKey key, ref MyValue value, MyContext ctx) + { + throw new NotImplementedException(); + } + } + + class Program + { + static void Main(string[] args) + { + var log = Devices.CreateLogDevice(Path.GetTempPath() + "hybridlog"); + var objlog = Devices.CreateObjectLogDevice(Path.GetTempPath() + "hybridlog"); + + var h = new FasterKV + + (128, new MyFunctions(), + new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MemorySizeBits = 29 }, + null, + new SerializerSettings { keySerializer = () => new MyKeySerializer(), valueSerializer = () => new MyValueSerializer() } + ); + + var context = default(MyContext); + + h.StartSession(); + + for (int i = 0; i < 20000; i++) + { + var _key = new MyKey { key = i }; + var value = new MyValue { value = i }; + h.Upsert(ref _key, ref value, context, 0); + if (i % 32 == 0) h.Refresh(); + } + var key = new MyKey { key = 23 }; + var input = default(MyInput); + MyOutput g1 = new MyOutput(); + h.Read(ref key, ref input, ref g1, context, 0); + + h.CompletePending(true); + + MyOutput g2 = new MyOutput(); + key = new MyKey { key = 46 }; + h.Read(ref key, ref input, ref g2, context, 0); + h.CompletePending(true); + + Console.WriteLine("Success!"); + Console.ReadLine(); + } + } +} diff --git a/cs/playground/ManagedSample3/Properties/AssemblyInfo.cs b/cs/playground/ClassSample/Properties/AssemblyInfo.cs similarity index 100% rename from cs/playground/ManagedSample3/Properties/AssemblyInfo.cs rename to cs/playground/ClassSample/Properties/AssemblyInfo.cs diff --git a/cs/playground/ManagedSample1/Functions.cs b/cs/playground/ManagedSample1/Functions.cs deleted file mode 100644 index cd2566d0a..000000000 --- a/cs/playground/ManagedSample1/Functions.cs +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using FASTER.core; -using System.Diagnostics; -using System.Runtime.CompilerServices; - -namespace ManagedSampleCore -{ - /// - /// Callback functions for FASTER operations customized to user types - /// See \cs\src\core\Index\UserCode\Functions.cs for template details - /// - public unsafe class Functions - { - public static void RMWCompletionCallback(KeyStruct* key, InputStruct* output, Empty* ctx, Status status) - { - } - - public static void ReadCompletionCallback(KeyStruct* key, InputStruct* input, OutputStruct* output, Empty* ctx, Status status) - { - } - - public static void UpsertCompletionCallback(KeyStruct* key, ValueStruct* output, Empty* ctx) - { - } - - public static void PersistenceCallback(long thread_id, long serial_num) - { - Debug.WriteLine("Thread {0} repors persistence until {1}", thread_id, serial_num); - } - - // Read functions - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void SingleReader(KeyStruct* key, InputStruct* input, ValueStruct* value, OutputStruct* dst) - { - ValueStruct.Copy(value, (ValueStruct*)dst); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void ConcurrentReader(KeyStruct* key, InputStruct* input, ValueStruct* value, OutputStruct* dst) - { - ValueStruct.AcquireReadLock(value); - ValueStruct.Copy(value, (ValueStruct*)dst); - ValueStruct.ReleaseReadLock(value); - } - - // Upsert functions - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void SingleWriter(KeyStruct* key, ValueStruct* src, ValueStruct* dst) - { - ValueStruct.Copy(src, dst); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void ConcurrentWriter(KeyStruct* key, ValueStruct* src, ValueStruct* dst) - { - ValueStruct.AcquireWriteLock(dst); - ValueStruct.Copy(src, dst); - ValueStruct.ReleaseWriteLock(dst); - } - - // RMW functions - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int InitialValueLength(KeyStruct* key, InputStruct* input) - { - return ValueStruct.GetLength(default(ValueStruct*)); - } - - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void InitialUpdater(KeyStruct* key, InputStruct* input, ValueStruct* value) - { - ValueStruct.Copy((ValueStruct*)input, value); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void InPlaceUpdater(KeyStruct* key, InputStruct* input, ValueStruct* value) - { - ValueStruct.AcquireWriteLock(value); - value->vfield1 += input->ifield1; - value->vfield2 += input->ifield2; - ValueStruct.ReleaseWriteLock(value); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void CopyUpdater(KeyStruct* key, InputStruct* input, ValueStruct* oldValue, ValueStruct* newValue) - { - newValue->vfield1 = oldValue->vfield1 + input->ifield1; - newValue->vfield2 = oldValue->vfield2 + input->ifield2; - } - } -} diff --git a/cs/playground/ManagedSample1/ICustomFaster.cs b/cs/playground/ManagedSample1/ICustomFaster.cs deleted file mode 100644 index decbef86a..000000000 --- a/cs/playground/ManagedSample1/ICustomFaster.cs +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using FASTER.core; -using System; - -namespace ManagedSampleCore -{ - /// - /// Custom interface of FASTER for user-specified types - /// See cs\src\core\Index\FASTER\IFASTER.cs for template - /// - /// Interface to FASTER key-value store - /// (customized for sample types Key, Value, Input, Output, Context) - /// Since there are pointers in the API, we cannot automatically create a - /// generic version covering arbitrary blittable types. Instead, the - /// user defines the customized interface and provides it to FASTER - /// so it can return a (generated) instance for that interface. - /// - public unsafe interface ICustomFasterKv - { - /* Thread-related operations */ - - /// - /// Start a session with FASTER. FASTER sessions correspond to threads issuing - /// operations to FASTER. - /// - /// Session identifier - Guid StartSession(); - - /// - /// Continue a session after recovery. Provide FASTER with the identifier of the - /// session that is being continued. - /// - /// - /// Sequence number for resuming operations - long ContinueSession(Guid guid); - - /// - /// Stop a session and de-register the thread from FASTER. - /// - void StopSession(); - - /// - /// Refresh the session epoch. The caller is required to invoke Refresh periodically - /// in order to guarantee system liveness. - /// - void Refresh(); - - /* Store Interface */ - - /// - /// Read operation - /// - /// Key of read - /// Input argument used by Reader to select what part of value to read - /// Reader stores the read result in output - /// User context to identify operation in asynchronous callback - /// Increasing sequence number of operation (used for recovery) - /// Status of operation - Status Read(KeyStruct* key, InputStruct* input, OutputStruct* output, Empty* context, long lsn); - - /// - /// (Blind) upsert operation - /// - /// Key of read - /// Value being upserted - /// User context to identify operation in asynchronous callback - /// Increasing sequence number of operation (used for recovery) - /// Status of operation - Status Upsert(KeyStruct* key, ValueStruct* value, Empty* context, long lsn); - - /// - /// Atomic read-modify-write operation - /// - /// Key of read - /// Input argument used by RMW callback to perform operation - /// User context to identify operation in asynchronous callback - /// Increasing sequence number of operation (used for recovery) - /// Status of operation - Status RMW(KeyStruct* key, InputStruct* input, Empty* context, long lsn); - - /// - /// Complete all pending operations issued by this session - /// - /// Whether we spin-wait for pending operations to complete - /// Whether all pending operations have completed - bool CompletePending(bool wait); - - /// - /// Truncate the log until, but not including, untilAddress - /// - /// Address to shift until - bool ShiftBeginAddress(long untilAddress); - - - /* Recovery */ - - /// - /// Take full checkpoint of FASTER - /// - /// Token describing checkpoint - /// Whether checkpoint was initiated - bool TakeFullCheckpoint(out Guid token); - - /// - /// Take checkpoint of FASTER index only (not log) - /// - /// Token describing checkpoin - /// Whether checkpoint was initiated - bool TakeIndexCheckpoint(out Guid token); - - /// - /// Take checkpoint of FASTER log only (not index) - /// - /// Token describing checkpoin - /// Whether checkpoint was initiated - bool TakeHybridLogCheckpoint(out Guid token); - - /// - /// Recover using full checkpoint token - /// - /// - void Recover(Guid fullcheckpointToken); - - /// - /// Recover using a separate index and log checkpoint token - /// - /// - /// - void Recover(Guid indexToken, Guid hybridLogToken); - - /// - /// Complete ongoing checkpoint (spin-wait) - /// - /// - /// Whether checkpoint has completed - bool CompleteCheckpoint(bool wait); - - /* Statistics */ - /// - /// Get size of FASTER - /// - long LogTailAddress { get; } - - /// - /// Get (safe) read-only address of FASTER - /// - long LogReadOnlyAddress { get; } - - /// - /// Dump distribution of #entries in hash table, to console - /// - void DumpDistribution(); - } -} - diff --git a/cs/playground/ManagedSample1/KeyStruct.cs b/cs/playground/ManagedSample1/KeyStruct.cs deleted file mode 100644 index c0a4e2f07..000000000 --- a/cs/playground/ManagedSample1/KeyStruct.cs +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using FASTER.core; -using System; -using System.IO; -using System.Runtime.CompilerServices; - -namespace ManagedSampleCore -{ - public unsafe struct KeyStruct - { - public const int physicalSize = sizeof(long) + sizeof(long); - public long kfield1; - public long kfield2; - - public static long GetHashCode(KeyStruct* key) - { - return Utility.GetHashCode(*((long*)key)); - } - public static bool Equals(KeyStruct* k1, KeyStruct* k2) - { - return k1->kfield1 == k2->kfield1 && k1->kfield2 == k2->kfield2; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int GetLength(KeyStruct* key) - { - return physicalSize; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void Copy(KeyStruct* src, KeyStruct* dst) - { - dst->kfield1 = src->kfield1; - dst->kfield2 = src->kfield2; - } - - #region Serialization - public static bool HasObjectsToSerialize() - { - return false; - } - - public static void Serialize(KeyStruct* key, Stream toStream) - { - throw new InvalidOperationException(); - } - - public static void Deserialize(KeyStruct* key, Stream fromStream) - { - throw new InvalidOperationException(); - } - - public static void Free(KeyStruct* key) - { - throw new InvalidOperationException(); - } - #endregion - - public static KeyStruct* MoveToContext(KeyStruct* key) - { - return key; - } - } -} diff --git a/cs/playground/ManagedSample1/OutputStruct.cs b/cs/playground/ManagedSample1/OutputStruct.cs deleted file mode 100644 index 4a1690309..000000000 --- a/cs/playground/ManagedSample1/OutputStruct.cs +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -namespace ManagedSampleCore -{ - public unsafe struct OutputStruct - { - public ValueStruct value; - - public static OutputStruct* MoveToContext(OutputStruct* output) - { - return output; - } - - } -} diff --git a/cs/playground/ManagedSample1/ValueStruct.cs b/cs/playground/ManagedSample1/ValueStruct.cs deleted file mode 100644 index 6c26dc0cb..000000000 --- a/cs/playground/ManagedSample1/ValueStruct.cs +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using System; -using System.IO; -using System.Runtime.CompilerServices; - -namespace ManagedSampleCore -{ - public unsafe struct ValueStruct - { - public const int physicalSize = sizeof(long) + sizeof(long); - public long vfield1; - public long vfield2; - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int GetLength(ValueStruct* input) - { - return physicalSize; - } - - public static void Copy(ValueStruct* src, ValueStruct* dst) - { - dst->vfield1 = src->vfield1; - dst->vfield2 = src->vfield2; - } - - // Shared read/write capabilities on value - public static void AcquireReadLock(ValueStruct* value) - { - } - - public static void ReleaseReadLock(ValueStruct* value) - { - } - - public static void AcquireWriteLock(ValueStruct* value) - { - } - - public static void ReleaseWriteLock(ValueStruct* value) - { - } - - #region Serialization - public static bool HasObjectsToSerialize() - { - return false; - } - - public static void Serialize(ValueStruct* key, Stream toStream) - { - throw new InvalidOperationException(); - } - - public static void Deserialize(ValueStruct* key, Stream fromStream) - { - throw new InvalidOperationException(); - } - - public static void Free(ValueStruct* key) - { - throw new InvalidOperationException(); - } - #endregion - - public static ValueStruct* MoveToContext(ValueStruct* value) - { - return value; - } - } -} diff --git a/cs/playground/ManagedSample2/Program.cs b/cs/playground/ManagedSample2/Program.cs index 10731ae4f..14db84d56 100644 --- a/cs/playground/ManagedSample2/Program.cs +++ b/cs/playground/ManagedSample2/Program.cs @@ -16,9 +16,8 @@ static void Main(string[] args) { // This sample uses structs, but via the safe API (no pointers) - var fht = FasterFactory.Create - + var fht = new FasterKV + (128, new CustomFunctions()); fht.StartSession(); diff --git a/cs/playground/ManagedSample3/Program.cs b/cs/playground/ManagedSample3/Program.cs deleted file mode 100644 index 25c3f4789..000000000 --- a/cs/playground/ManagedSample3/Program.cs +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using FASTER.core; -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Text; -using System.Threading.Tasks; - -namespace ManagedSample3 -{ - public class MyKey - { - public int key; - public MyKey Clone() - { - return this; - } - - public long GetHashCode64() - { - return Utility.GetHashCode(key); - } - - public bool Equals(MyKey otherKey) - { - return key == otherKey.key; - } - public void Serialize(Stream toStream) - { - new BinaryWriter(toStream).Write(key); - } - - public void Deserialize(Stream fromStream) - { - key = new BinaryReader(fromStream).ReadInt32(); - } - } - - public class MyValue - { - public int value; - public MyValue Clone() - { - return this; - } - - public void Serialize(Stream toStream) - { - new BinaryWriter(toStream).Write(value); - } - - public void Deserialize(Stream fromStream) - { - value = new BinaryReader(fromStream).ReadInt32(); - } - } - - public class MyInput - { - } - - public class MyOutput - { - public MyValue value; - } - - - public class MyContext - { - } - - public class MyFunctions : IUserFunctions - { - public void RMWCompletionCallback(MyContext ctx, Status status) - { - } - - public void ReadCompletionCallback(MyContext ctx, MyOutput output, Status status) - { - } - - public void UpsertCompletionCallback(MyContext ctx) - { - } - - public void CopyUpdater(MyKey key, MyInput input, MyValue oldValue, ref MyValue newValue) - { - } - - public int InitialValueLength(MyKey key, MyInput input) - { - return sizeof(int) + sizeof(int); - } - - public void InitialUpdater(MyKey key, MyInput input, ref MyValue value) - { - } - - public void InPlaceUpdater(MyKey key, MyInput input, ref MyValue value) - { - } - - public void Reader(MyKey key, MyInput input, MyValue value, ref MyOutput dst) - { - dst.value = value; - } - } - - class Program - { - static void Main(string[] args) - { - var log = FasterFactory.CreateLogDevice(Path.GetTempPath() + "hybridlog"); - var objlog = FasterFactory.CreateObjectLogDevice(Path.GetTempPath() + "hybridlog"); - - var h = FasterFactory.Create - - (128, new MyFunctions(), - new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MemorySizeBits = 29 } - ); - - h.StartSession(); - - for (int i = 0; i < 20000; i++) - { - h.Upsert(new MyKey { key = i }, new MyValue { value = i }, default(MyContext), 0); - if (i % 32 == 0) h.Refresh(); - } - MyOutput g1 = new MyOutput(); - h.Read(new MyKey { key = 23 }, new MyInput(), ref g1, new MyContext(), 0); - - h.CompletePending(true); - - MyOutput g2 = new MyOutput(); - h.Read(new MyKey { key = 46 }, new MyInput(), ref g2, new MyContext(), 0); - h.CompletePending(true); - - Console.WriteLine("Success!"); - Console.ReadLine(); - } - } -} diff --git a/cs/playground/ManagedSample4/Program.cs b/cs/playground/ManagedSample4/Program.cs deleted file mode 100644 index a831bae00..000000000 --- a/cs/playground/ManagedSample4/Program.cs +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using FASTER.core; -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Text; -using System.Threading.Tasks; - -namespace ManagedSample4 -{ - - public struct Wrap - { - public T field; - - public new long GetHashCode() - { - return Utility.GetHashCode(field.GetHashCode()); - } - } - - public class MyKey - { - public int key; - public MyKey Clone() - { - return this; - } - - public long GetHashCode64() - { - return Utility.GetHashCode(key); - } - - public bool Equals(MyKey otherKey) - { - return key == otherKey.key; - } - public void Serialize(Stream toStream) - { - new BinaryWriter(toStream).Write(key); - } - - public void Deserialize(Stream fromStream) - { - key = new BinaryReader(fromStream).ReadInt32(); - } - } - - public class MyValue - { - public int value; - public MyValue Clone() - { - return this; - } - - public void Serialize(Stream toStream) - { - new BinaryWriter(toStream).Write(value); - } - - public void Deserialize(Stream fromStream) - { - value = new BinaryReader(fromStream).ReadInt32(); - } - } - - public class MyInput - { - } - - public class MyOutput - { - public MyValue value; - } - - - public class MyContext - { - } - - public class MyFunctions : IUserFunctions, Wrap, Wrap, Wrap, MyContext> - { - public void RMWCompletionCallback(MyContext ctx, Status status) - { - } - - public void ReadCompletionCallback(MyContext ctx, Wrap output, Status status) - { - } - public void UpsertCompletionCallback(MyContext ctx) - { - } - - public void CopyUpdater(Wrap key, Wrap input, Wrap oldValue, ref Wrap newValue) - { - newValue.field = oldValue.field + input.field; - } - - public int InitialValueLength(Wrap key, Wrap input) - { - return sizeof(int) + sizeof(int); - } - - public void InitialUpdater(Wrap key, Wrap input, ref Wrap value) - { - value.field = input.field; - } - - public void InPlaceUpdater(Wrap key, Wrap input, ref Wrap value) - { - value.field += input.field; - } - - public void Reader(Wrap key, Wrap input, Wrap value, ref Wrap dst) - { - dst.field = value.field; - } - } - - class Program - { - static void Main(string[] args) - { - var log = FasterFactory.CreateLogDevice(Path.GetTempPath() + "hybridlog.log"); - var h = FasterFactory.Create - , Wrap, Wrap, Wrap, MyContext, MyFunctions> - (128, new MyFunctions(), - new LogSettings { LogDevice = log, MemorySizeBits = 29 } - ); - - h.StartSession(); - - for (int i = 0; i <20000; i++) - { - h.RMW(new Wrap { field = i }, new Wrap { field = i }, default(MyContext), 0); - h.RMW(new Wrap { field = i }, new Wrap { field = i }, default(MyContext), 0); - if (i % 32 == 0) h.Refresh(); - } - Wrap g1 = new Wrap(); - h.Read(new Wrap { field = 19999 }, new Wrap(), ref g1, new MyContext(), 0); - - h.CompletePending(true); - - Wrap g2 = new Wrap(); - h.Read(new Wrap { field = 46 }, new Wrap(), ref g2, new MyContext(), 0); - h.CompletePending(true); - - Console.WriteLine("Success!"); - Console.ReadLine(); - } - } -} diff --git a/cs/playground/ManagedSampleCore/Functions.cs b/cs/playground/ManagedSampleCore/Functions.cs index f7de480f0..58979e7a3 100644 --- a/cs/playground/ManagedSampleCore/Functions.cs +++ b/cs/playground/ManagedSampleCore/Functions.cs @@ -5,7 +5,7 @@ using System.Diagnostics; using System.Runtime.CompilerServices; -namespace ManagedSampleCore +namespace StructSample { public unsafe class Functions { diff --git a/cs/playground/ManagedSampleCore/ICustomFaster.cs b/cs/playground/ManagedSampleCore/ICustomFaster.cs index decbef86a..1f30472d2 100644 --- a/cs/playground/ManagedSampleCore/ICustomFaster.cs +++ b/cs/playground/ManagedSampleCore/ICustomFaster.cs @@ -4,7 +4,7 @@ using FASTER.core; using System; -namespace ManagedSampleCore +namespace StructSample { /// /// Custom interface of FASTER for user-specified types diff --git a/cs/playground/ManagedSampleCore/InputStruct.cs b/cs/playground/ManagedSampleCore/InputStruct.cs index 593ec4cb9..cd6f49ce6 100644 --- a/cs/playground/ManagedSampleCore/InputStruct.cs +++ b/cs/playground/ManagedSampleCore/InputStruct.cs @@ -3,7 +3,7 @@ using System.Runtime.CompilerServices; -namespace ManagedSampleCore +namespace StructSample { public unsafe struct InputStruct { diff --git a/cs/playground/ManagedSampleCore/KeyStruct.cs b/cs/playground/ManagedSampleCore/KeyStruct.cs index c0a4e2f07..d0898d61d 100644 --- a/cs/playground/ManagedSampleCore/KeyStruct.cs +++ b/cs/playground/ManagedSampleCore/KeyStruct.cs @@ -6,7 +6,7 @@ using System.IO; using System.Runtime.CompilerServices; -namespace ManagedSampleCore +namespace StructSample { public unsafe struct KeyStruct { diff --git a/cs/playground/ManagedSampleCore/OutputStruct.cs b/cs/playground/ManagedSampleCore/OutputStruct.cs index 4a1690309..49eb3baaf 100644 --- a/cs/playground/ManagedSampleCore/OutputStruct.cs +++ b/cs/playground/ManagedSampleCore/OutputStruct.cs @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. -namespace ManagedSampleCore +namespace StructSample { public unsafe struct OutputStruct { diff --git a/cs/playground/ManagedSampleCore/Program.cs b/cs/playground/ManagedSampleCore/Program.cs index 4c2a8dc6f..505784f35 100644 --- a/cs/playground/ManagedSampleCore/Program.cs +++ b/cs/playground/ManagedSampleCore/Program.cs @@ -8,7 +8,7 @@ using System.Text; using System.Threading.Tasks; -namespace ManagedSampleCore +namespace StructSample { public class Program { diff --git a/cs/playground/ManagedSampleCore/ValueStruct.cs b/cs/playground/ManagedSampleCore/ValueStruct.cs index 6c26dc0cb..2b73a5234 100644 --- a/cs/playground/ManagedSampleCore/ValueStruct.cs +++ b/cs/playground/ManagedSampleCore/ValueStruct.cs @@ -5,7 +5,7 @@ using System.IO; using System.Runtime.CompilerServices; -namespace ManagedSampleCore +namespace StructSample { public unsafe struct ValueStruct { diff --git a/cs/playground/ManagedSample4/App.config b/cs/playground/MixedSample/App.config similarity index 100% rename from cs/playground/ManagedSample4/App.config rename to cs/playground/MixedSample/App.config diff --git a/cs/playground/ManagedSample4/ManagedSample4.csproj b/cs/playground/MixedSample/MixedSample.csproj similarity index 95% rename from cs/playground/ManagedSample4/ManagedSample4.csproj rename to cs/playground/MixedSample/MixedSample.csproj index 7b4c4e34f..1b65ffc1c 100644 --- a/cs/playground/ManagedSample4/ManagedSample4.csproj +++ b/cs/playground/MixedSample/MixedSample.csproj @@ -8,7 +8,7 @@ Exe - ManagedSample4 + MixedSample prompt PackageReference true diff --git a/cs/playground/MixedSample/Program.cs b/cs/playground/MixedSample/Program.cs new file mode 100644 index 000000000..c7aadd1b2 --- /dev/null +++ b/cs/playground/MixedSample/Program.cs @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using FASTER.core; +using System; +using System.Collections.Generic; +using System.IO; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace MixedSample +{ + public class MyKey : IFasterEqualityComparer + { + public int key; + + public long GetHashCode64(ref MyKey key) + { + return Utility.GetHashCode(key.key); + } + + public bool Equals(ref MyKey k1, ref MyKey k2) + { + return k1.key == k2.key; + } + } + + public class MyKeySerializer : BinaryObjectSerializer + { + public override void Serialize(ref MyKey key) + { + writer.Write(key.key); + } + + public override void Deserialize(ref MyKey key) + { + key.key = reader.ReadInt32(); + } + } + + + public class MyValue + { + public int value; + } + + public class MyValueSerializer : BinaryObjectSerializer + { + public override void Serialize(ref MyValue value) + { + writer.Write(value.value); + } + + public override void Deserialize(ref MyValue value) + { + value.value = reader.ReadInt32(); + } + } + + public class MyInput + { + } + + public class MyOutput + { + public MyValue value; + } + + + public class MyContext + { + } + + public class MyFunctions : IFunctions + { + public void ConcurrentReader(ref MyKey key, ref MyInput input, ref MyValue value, ref MyOutput dst) + { + throw new NotImplementedException(); + } + + public void ConcurrentWriter(ref MyKey key, ref MyValue src, ref MyValue dst) + { + throw new NotImplementedException(); + } + + public void CopyUpdater(ref MyKey key, ref MyInput input, ref MyValue oldValue, ref MyValue newValue) + { + throw new NotImplementedException(); + } + + public void InitialUpdater(ref MyKey key, ref MyInput input, ref MyValue value) + { + throw new NotImplementedException(); + } + + public void InPlaceUpdater(ref MyKey key, ref MyInput input, ref MyValue value) + { + throw new NotImplementedException(); + } + + public void CheckpointCompletionCallback(Guid sessionId, long serialNum) + { + throw new NotImplementedException(); + } + + public void ReadCompletionCallback(ref MyKey key, ref MyInput input, ref MyOutput output, MyContext ctx, Status status) + { + throw new NotImplementedException(); + } + + public void RMWCompletionCallback(ref MyKey key, ref MyInput input, MyContext ctx, Status status) + { + throw new NotImplementedException(); + } + + public void SingleReader(ref MyKey key, ref MyInput input, ref MyValue value, ref MyOutput dst) + { + throw new NotImplementedException(); + } + + public void SingleWriter(ref MyKey key, ref MyValue src, ref MyValue dst) + { + throw new NotImplementedException(); + } + + public void UpsertCompletionCallback(ref MyKey key, ref MyValue value, MyContext ctx) + { + throw new NotImplementedException(); + } + } + + class Program + { + static void Main(string[] args) + { + var log = Devices.CreateLogDevice(Path.GetTempPath() + "hybridlog"); + var objlog = Devices.CreateObjectLogDevice(Path.GetTempPath() + "hybridlog"); + + var h = new FasterKV + + (128, new MyFunctions(), + new LogSettings { LogDevice = log, ObjectLogDevice = objlog, MemorySizeBits = 29 } + ); + + var context = default(MyContext); + + h.StartSession(); + + for (int i = 0; i < 20000; i++) + { + var _key = new MyKey { key = i }; + var value = new MyValue { value = i }; + h.Upsert(ref _key, ref value, context, 0); + if (i % 32 == 0) h.Refresh(); + } + var key = new MyKey { key = 23 }; + var input = default(MyInput); + MyOutput g1 = new MyOutput(); + h.Read(ref key, ref input, ref g1, context, 0); + + h.CompletePending(true); + + MyOutput g2 = new MyOutput(); + key = new MyKey { key = 46 }; + h.Read(ref key, ref input, ref g2, context, 0); + h.CompletePending(true); + + Console.WriteLine("Success!"); + Console.ReadLine(); + } + } +} diff --git a/cs/playground/ManagedSample1/App.config b/cs/playground/StructSample/App.config similarity index 100% rename from cs/playground/ManagedSample1/App.config rename to cs/playground/StructSample/App.config diff --git a/cs/playground/StructSample/Functions.cs b/cs/playground/StructSample/Functions.cs new file mode 100644 index 000000000..59fd99910 --- /dev/null +++ b/cs/playground/StructSample/Functions.cs @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using FASTER.core; +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; + +namespace StructSampleCore +{ + /// + /// Callback functions for FASTER operations + /// + public class Functions : IFunctions + { + public void RMWCompletionCallback(ref Key key, ref Input output, Empty ctx, Status status) + { + } + + public void ReadCompletionCallback(ref Key key, ref Input input, ref Output output, Empty ctx, Status status) + { + } + + public void UpsertCompletionCallback(ref Key key, ref Value output, Empty ctx) + { + } + + public void CheckpointCompletionCallback(Guid sessionId, long serialNum) + { + Debug.WriteLine("Session {0} reports persistence until {1}", sessionId, serialNum); + } + + // Read functions + public void SingleReader(ref Key key, ref Input input, ref Value value, ref Output dst) + { + dst.value = value; + } + + public void ConcurrentReader(ref Key key, ref Input input, ref Value value, ref Output dst) + { + dst.value = value; + } + + // Upsert functions + public void SingleWriter(ref Key key, ref Value src, ref Value dst) + { + dst = src; + } + + public void ConcurrentWriter(ref Key key, ref Value src, ref Value dst) + { + dst = src; + } + + // RMW functions + public void InitialUpdater(ref Key key, ref Input input, ref Value value) + { + value.vfield1 = input.ifield1; + value.vfield2 = input.ifield2; + } + + public void InPlaceUpdater(ref Key key, ref Input input, ref Value value) + { + value.vfield1 += input.ifield1; + value.vfield2 += input.ifield2; + } + + public void CopyUpdater(ref Key key, ref Input input, ref Value oldValue, ref Value newValue) + { + newValue.vfield1 = oldValue.vfield1 + input.ifield1; + newValue.vfield2 = oldValue.vfield2 + input.ifield2; + } + } +} diff --git a/cs/playground/StructSample/Program.cs b/cs/playground/StructSample/Program.cs new file mode 100644 index 000000000..daa7a2ac5 --- /dev/null +++ b/cs/playground/StructSample/Program.cs @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using FASTER.core; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace StructSampleCore +{ + public class Program + { + static void Main(string[] args) + { + // This sample uses "blittable" key and value types, which enables the + // "high speed" mode for FASTER. You can override the default key equality + // comparer in two ways; + // (1) Make Key implement IFasterEqualityComparer interface + // (2) Provide IFasterEqualityComparer instance as param to constructor + // Serializers are not required for blittable key and value types. + + var fht = + new FasterKV + (128, new Functions(), new LogSettings { LogDevice = Devices.CreateLogDevice(""), MutableFraction = 0.5 }); + + fht.StartSession(); + + Input input = default(Input); + Output output = default(Output); + + var key1 = new Key { kfield1 = 13, kfield2 = 14 }; + var value = new Value { vfield1 = 23, vfield2 = 24 }; + + // Upsert item into store, and read it back + fht.Upsert(ref key1, ref value, Empty.Default, 0); + fht.Read(ref key1, ref input, ref output, Empty.Default, 0); + + if ((output.value.vfield1 != value.vfield1) || (output.value.vfield2 != value.vfield2)) + Console.WriteLine("Error!"); + else + Console.WriteLine("Success!"); + + var key2 = new Key { kfield1 = 15, kfield2 = 16 }; + input = new Input { ifield1 = 25, ifield2 = 26 }; + + // Two read-modify-write (RMW) operations (sum aggregator) + // Followed by read of result + fht.RMW(ref key2, ref input, Empty.Default, 0); + fht.RMW(ref key2, ref input, Empty.Default, 0); + fht.Read(ref key2, ref input, ref output, Empty.Default, 0); + + if ((output.value.vfield1 != input.ifield1*2) || (output.value.vfield2 != input.ifield2*2)) + Console.WriteLine("Error!"); + else + Console.WriteLine("Success!"); + + fht.StopSession(); + + Console.ReadLine(); + } + } +} diff --git a/cs/playground/ManagedSample1/Properties/AssemblyInfo.cs b/cs/playground/StructSample/Properties/AssemblyInfo.cs similarity index 100% rename from cs/playground/ManagedSample1/Properties/AssemblyInfo.cs rename to cs/playground/StructSample/Properties/AssemblyInfo.cs diff --git a/cs/playground/ManagedSample1/ManagedSample1.csproj b/cs/playground/StructSample/StructSample.csproj similarity index 91% rename from cs/playground/ManagedSample1/ManagedSample1.csproj rename to cs/playground/StructSample/StructSample.csproj index 0559ed2db..8f102730a 100644 --- a/cs/playground/ManagedSample1/ManagedSample1.csproj +++ b/cs/playground/StructSample/StructSample.csproj @@ -8,8 +8,8 @@ Exe - true - ManagedSample1 + false + StructSample prompt PackageReference true diff --git a/cs/playground/StructSample/Types.cs b/cs/playground/StructSample/Types.cs new file mode 100644 index 000000000..d46ef1d73 --- /dev/null +++ b/cs/playground/StructSample/Types.cs @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using FASTER.core; +using System; +using System.IO; +using System.Runtime.CompilerServices; + +namespace StructSampleCore +{ + public struct Key : IFasterEqualityComparer + { + public long kfield1; + public long kfield2; + + public long GetHashCode64(ref Key key) + { + return Utility.GetHashCode(key.kfield1); + } + public bool Equals(ref Key k1, ref Key k2) + { + return k1.kfield1 == k2.kfield1 && k1.kfield2 == k2.kfield2; + } + } + + public struct Value + { + public long vfield1; + public long vfield2; + } + + public struct Input + { + public long ifield1; + public long ifield2; + } + + public struct Output + { + public Value value; + } +} diff --git a/cs/playground/StructSampleCore/App.config b/cs/playground/StructSampleCore/App.config new file mode 100644 index 000000000..d69a9b153 --- /dev/null +++ b/cs/playground/StructSampleCore/App.config @@ -0,0 +1,6 @@ + + + + + + diff --git a/cs/playground/StructSampleCore/Functions.cs b/cs/playground/StructSampleCore/Functions.cs new file mode 100644 index 000000000..7205be6e3 --- /dev/null +++ b/cs/playground/StructSampleCore/Functions.cs @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using FASTER.core; +using System; +using System.Diagnostics; +using System.Runtime.CompilerServices; + +namespace StructSampleCore +{ + /// + /// Callback functions for FASTER operations + /// + public class Functions : IFunctions + { + public void RMWCompletionCallback(ref KeyStruct key, ref InputStruct output, Empty ctx, Status status) + { + } + + public void ReadCompletionCallback(ref KeyStruct key, ref InputStruct input, ref OutputStruct output, Empty ctx, Status status) + { + } + + public void UpsertCompletionCallback(ref KeyStruct key, ref ValueStruct output, Empty ctx) + { + } + + public void CheckpointCompletionCallback(Guid sessionId, long serialNum) + { + Debug.WriteLine("Session {0} reports persistence until {1}", sessionId, serialNum); + } + + // Read functions + public void SingleReader(ref KeyStruct key, ref InputStruct input, ref ValueStruct value, ref OutputStruct dst) + { + dst.value = value; + } + + public void ConcurrentReader(ref KeyStruct key, ref InputStruct input, ref ValueStruct value, ref OutputStruct dst) + { + dst.value = value; + } + + // Upsert functions + public void SingleWriter(ref KeyStruct key, ref ValueStruct src, ref ValueStruct dst) + { + dst = src; + } + + public void ConcurrentWriter(ref KeyStruct key, ref ValueStruct src, ref ValueStruct dst) + { + dst = src; + } + + // RMW functions + public void InitialUpdater(ref KeyStruct key, ref InputStruct input, ref ValueStruct value) + { + value.vfield1 = input.ifield1; + value.vfield2 = input.ifield2; + } + + public void InPlaceUpdater(ref KeyStruct key, ref InputStruct input, ref ValueStruct value) + { + value.vfield1 += input.ifield1; + value.vfield2 += input.ifield2; + } + + public void CopyUpdater(ref KeyStruct key, ref InputStruct input, ref ValueStruct oldValue, ref ValueStruct newValue) + { + newValue.vfield1 = oldValue.vfield1 + input.ifield1; + newValue.vfield2 = oldValue.vfield2 + input.ifield2; + } + } +} diff --git a/cs/playground/ManagedSample1/InputStruct.cs b/cs/playground/StructSampleCore/InputStruct.cs similarity index 54% rename from cs/playground/ManagedSample1/InputStruct.cs rename to cs/playground/StructSampleCore/InputStruct.cs index 593ec4cb9..b0349e000 100644 --- a/cs/playground/ManagedSample1/InputStruct.cs +++ b/cs/playground/StructSampleCore/InputStruct.cs @@ -1,18 +1,14 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. +using FASTER.core; using System.Runtime.CompilerServices; -namespace ManagedSampleCore +namespace StructSampleCore { - public unsafe struct InputStruct + public struct InputStruct { public long ifield1; public long ifield2; - - public static InputStruct* MoveToContext(InputStruct* input) - { - return input; - } } } diff --git a/cs/playground/StructSampleCore/KeyStruct.cs b/cs/playground/StructSampleCore/KeyStruct.cs new file mode 100644 index 000000000..7e120541b --- /dev/null +++ b/cs/playground/StructSampleCore/KeyStruct.cs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using FASTER.core; +using System; +using System.IO; +using System.Runtime.CompilerServices; + +namespace StructSampleCore +{ + public struct KeyStruct : IFasterEqualityComparer + { + public long kfield1; + public long kfield2; + + public long GetHashCode64(ref KeyStruct key) + { + return Utility.GetHashCode(key.kfield1); + } + public bool Equals(ref KeyStruct k1, ref KeyStruct k2) + { + return k1.kfield1 == k2.kfield1 && k1.kfield2 == k2.kfield2; + } + } +} diff --git a/cs/playground/StructSampleCore/OutputStruct.cs b/cs/playground/StructSampleCore/OutputStruct.cs new file mode 100644 index 000000000..057bfb144 --- /dev/null +++ b/cs/playground/StructSampleCore/OutputStruct.cs @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using FASTER.core; + +namespace StructSampleCore +{ + public struct OutputStruct + { + public ValueStruct value; + } +} diff --git a/cs/playground/ManagedSample1/Program.cs b/cs/playground/StructSampleCore/Program.cs similarity index 64% rename from cs/playground/ManagedSample1/Program.cs rename to cs/playground/StructSampleCore/Program.cs index 8a53ca796..d912e42f6 100644 --- a/cs/playground/ManagedSample1/Program.cs +++ b/cs/playground/StructSampleCore/Program.cs @@ -8,32 +8,33 @@ using System.Text; using System.Threading.Tasks; -namespace ManagedSampleCore +namespace StructSampleCore { public class Program { - static unsafe void Main(string[] args) + static void Main(string[] args) { - // This sample uses the unsafe API of FASTER, and works only for blittable struct types + // This sample uses blittable structs the unsafe API of FASTER, and works only for blittable struct types // Your structs have to implement certain static methods (see the structs for details) // You also define the interface (ICustomFaster) that will be returned by the factory // This sample represents the highest performance level for FASTER, at the expense of // supporting limited types. - var fht = FasterFactory.Create - - (128, new LogSettings { LogDevice = FasterFactory.CreateLogDevice(""), MutableFraction = 0.5 }); + var fht = + new FasterKV + (128, new Functions(), new LogSettings { LogDevice = Devices.CreateLogDevice(""), MutableFraction = 0.5 }); fht.StartSession(); + InputStruct input = default(InputStruct); OutputStruct output = default(OutputStruct); var key1 = new KeyStruct { kfield1 = 13, kfield2 = 14 }; var value = new ValueStruct { vfield1 = 23, vfield2 = 24 }; // Upsert item into store, and read it back - fht.Upsert(&key1, &value, null, 0); - fht.Read(&key1, null, &output, null, 0); + fht.Upsert(ref key1, ref value, Empty.Default, 0); + fht.Read(ref key1, ref input, ref output, Empty.Default, 0); if ((output.value.vfield1 != value.vfield1) || (output.value.vfield2 != value.vfield2)) Console.WriteLine("Error!"); @@ -41,13 +42,13 @@ static unsafe void Main(string[] args) Console.WriteLine("Success!"); var key2 = new KeyStruct { kfield1 = 15, kfield2 = 16 }; - var input = new InputStruct { ifield1 = 25, ifield2 = 26 }; + input = new InputStruct { ifield1 = 25, ifield2 = 26 }; // Two read-modify-write (RMW) operations (sum aggregator) // Followed by read of result - fht.RMW(&key2, &input, null, 0); - fht.RMW(&key2, &input, null, 0); - fht.Read(&key2, null, &output, null, 0); + fht.RMW(ref key2, ref input, Empty.Default, 0); + fht.RMW(ref key2, ref input, Empty.Default, 0); + fht.Read(ref key2, ref input, ref output, Empty.Default, 0); if ((output.value.vfield1 != input.ifield1*2) || (output.value.vfield2 != input.ifield2*2)) Console.WriteLine("Error!"); diff --git a/cs/playground/StructSampleCore/Properties/AssemblyInfo.cs b/cs/playground/StructSampleCore/Properties/AssemblyInfo.cs new file mode 100644 index 000000000..5e08438c2 --- /dev/null +++ b/cs/playground/StructSampleCore/Properties/AssemblyInfo.cs @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using System.Reflection; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; + +// General Information about an assembly is controlled through the following +// set of attributes. Change these attribute values to modify the information +// associated with an assembly. +[assembly: AssemblyDescription("")] +[assembly: AssemblyCopyright("Copyright © 2017")] +[assembly: AssemblyTrademark("")] +[assembly: AssemblyCulture("")] + +// Setting ComVisible to false makes the types in this assembly not visible +// to COM components. If you need to access a type in this assembly from +// COM, set the ComVisible attribute to true on that type. +[assembly: ComVisible(false)] + +// The following GUID is for the ID of the typelib if this project is exposed to COM +[assembly: Guid("17bdd0a5-98e5-464a-8a00-050d9ff4c562")] diff --git a/cs/playground/StructSampleCore/StructSampleCore.csproj b/cs/playground/StructSampleCore/StructSampleCore.csproj new file mode 100644 index 000000000..06d9ad194 --- /dev/null +++ b/cs/playground/StructSampleCore/StructSampleCore.csproj @@ -0,0 +1,39 @@ + + + + netcoreapp2.0 + x64 + win7-x64;linux-x64 + true + + + + Exe + false + StructSampleCore + prompt + PackageReference + true + + + + TRACE;DEBUG + full + true + bin\x64\Debug\ + + + TRACE + pdbonly + true + bin\x64\Release\ + + + + + + + + + + \ No newline at end of file diff --git a/cs/playground/StructSampleCore/ValueStruct.cs b/cs/playground/StructSampleCore/ValueStruct.cs new file mode 100644 index 000000000..8bc301057 --- /dev/null +++ b/cs/playground/StructSampleCore/ValueStruct.cs @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using FASTER.core; +using System; +using System.IO; +using System.Runtime.CompilerServices; + +namespace StructSampleCore +{ + public struct ValueStruct + { + public long vfield1; + public long vfield2; + } +} diff --git a/cs/playground/SumStore/AdId.cs b/cs/playground/SumStore/AdId.cs deleted file mode 100644 index 15cd389a4..000000000 --- a/cs/playground/SumStore/AdId.cs +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using FASTER.core; -using System; -using System.IO; -using System.Runtime.CompilerServices; - -namespace SumStore -{ - public unsafe struct AdId - { - public const int physicalSize = sizeof(long); - public long adId; - - public static long GetHashCode(AdId* key) - { - return Utility.GetHashCode(*((long*)key)); - } - public static bool Equals(AdId* k1, AdId* k2) - { - return k1->adId == k2->adId; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int GetLength(AdId* key) - { - return physicalSize; - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void Copy(AdId* src, AdId* dst) - { - dst->adId = src->adId; - } - - public static AdId* MoveToContext(AdId* value) - { - return value; - } - #region Serialization - public static bool HasObjectsToSerialize() - { - return false; - } - - public static void Serialize(AdId* key, Stream toStream) - { - throw new InvalidOperationException(); - } - - public static void Deserialize(AdId* key, Stream fromStream) - { - throw new InvalidOperationException(); - } - public static void Free(AdId* key) - { - throw new InvalidOperationException(); - } - #endregion - } -} diff --git a/cs/playground/SumStore/ConcurrentRecoveryTest.cs b/cs/playground/SumStore/ConcurrentRecoveryTest.cs index 7b205e640..537da1734 100644 --- a/cs/playground/SumStore/ConcurrentRecoveryTest.cs +++ b/cs/playground/SumStore/ConcurrentRecoveryTest.cs @@ -24,7 +24,7 @@ public class ConcurrentRecoveryTest : IFasterRecoveryTest const long checkpointInterval = (1 << 22); int threadCount; int numActiveThreads; - ICustomFasterKv fht; + FasterKV fht; BlockingCollection inputArrays; List tokens; public ConcurrentRecoveryTest(int threadCount) @@ -32,12 +32,14 @@ public ConcurrentRecoveryTest(int threadCount) this.threadCount = threadCount; tokens = new List(); - var log = FasterFactory.CreateLogDevice("logs\\hlog"); + var log = Devices.CreateLogDevice("logs\\hlog"); // Create FASTER index - fht = FasterFactory.Create - - (keySpace, new LogSettings { LogDevice = log }, new CheckpointSettings { CheckpointDir = "logs" }); + fht = new FasterKV + + (keySpace, new Functions(), + new LogSettings { LogDevice = log }, + new CheckpointSettings { CheckpointDir = "logs" }); numActiveThreads = 0; inputArrays = new BlockingCollection(); @@ -45,7 +47,7 @@ public ConcurrentRecoveryTest(int threadCount) Prepare(); } - public unsafe void Prepare() + public void Prepare() { Console.WriteLine("Creating Input Arrays"); @@ -70,7 +72,7 @@ public unsafe void Prepare() } } - private unsafe void CreateInputArrays(int threadId) + private void CreateInputArrays(int threadId) { var inputArray = new Input[numOps]; for (int i = 0; i < numOps; i++) @@ -83,7 +85,7 @@ private unsafe void CreateInputArrays(int threadId) } - public unsafe void Populate() + public void Populate() { Thread[] workers = new Thread[threadCount]; for (int idx = 0; idx < threadCount; ++idx) @@ -114,12 +116,10 @@ public unsafe void Populate() } } - private unsafe void PopulateWorker(int threadId) + private void PopulateWorker(int threadId) { Native32.AffinitizeThreadRoundRobin((uint)threadId); - Empty context; - var success = inputArrays.TryTake(out Input[] inputArray); if(!success) { @@ -133,29 +133,25 @@ private unsafe void PopulateWorker(int threadId) Interlocked.Increment(ref numActiveThreads); // Process the batch of input data - fixed (Input* input = inputArray) + for (long i = 0; i < numOps; i++) { - for (long i = 0; i < numOps; i++) - { - fht.RMW(&((input + i)->adId), input + i, &context, i); + fht.RMW(ref inputArray[i].adId, ref inputArray[i], Empty.Default, i); - - if ((i+1) % checkpointInterval == 0 && numActiveThreads == threadCount) + if ((i+1) % checkpointInterval == 0 && numActiveThreads == threadCount) + { + if(fht.TakeFullCheckpoint(out Guid token)) { - if(fht.TakeFullCheckpoint(out Guid token)) - { - tokens.Add(token); - } + tokens.Add(token); } + } - if (i % completePendingInterval == 0) - { - fht.CompletePending(false); - } - else if (i % refreshInterval == 0) - { - fht.Refresh(); - } + if (i % completePendingInterval == 0) + { + fht.CompletePending(false); + } + else if (i % refreshInterval == 0) + { + fht.Refresh(); } } @@ -171,7 +167,7 @@ private unsafe void PopulateWorker(int threadId) } - public unsafe void Continue() + public void Continue() { Console.WriteLine("Ready to Run. version to recover? [Enter]"); var line = Console.ReadLine(); @@ -209,12 +205,10 @@ public unsafe void Continue() } } - private unsafe void ContinueWorker(int threadId, Guid guid) + private void ContinueWorker(int threadId, Guid guid) { Native32.AffinitizeThreadRoundRobin((uint)threadId); - Empty context; - var success = inputArrays.TryTake(out Input[] inputArray); if (!success) { @@ -230,28 +224,25 @@ private unsafe void ContinueWorker(int threadId, Guid guid) Console.WriteLine("Thread {0} starting from {1}", threadId, startNum + 1); // Prpcess the batch of input data - fixed (Input* input = inputArray) + for (long i = startNum + 1; i < numOps; i++) { - for (long i = startNum + 1; i < numOps; i++) - { - fht.RMW(&((input + i)->adId), input + i, &context, i); + fht.RMW(ref inputArray[i].adId, ref inputArray[i], Empty.Default, i); - if ((i+1) % checkpointInterval == 0 && numActiveThreads == threadCount) + if ((i+1) % checkpointInterval == 0 && numActiveThreads == threadCount) + { + if (fht.TakeFullCheckpoint(out Guid token)) { - if (fht.TakeFullCheckpoint(out Guid token)) - { - Console.WriteLine("Calling TakeCheckpoint"); - } + Console.WriteLine("Calling TakeCheckpoint"); } + } - if (i % completePendingInterval == 0) - { - fht.CompletePending(false); - } - else if (i % refreshInterval == 0) - { - fht.Refresh(); - } + if (i % completePendingInterval == 0) + { + fht.CompletePending(false); + } + else if (i % refreshInterval == 0) + { + fht.Refresh(); } } @@ -266,13 +257,12 @@ private unsafe void ContinueWorker(int threadId, Guid guid) Console.WriteLine("Populate successful on thread {0}", threadId); } - public unsafe void RecoverAndTest(Guid indexToken, Guid hybridLogToken) + public void RecoverAndTest(Guid indexToken, Guid hybridLogToken) { // Recover fht.Recover(indexToken, hybridLogToken); // Create array for reading - Empty context; var inputArray = new Input[numUniqueKeys]; for (int i = 0; i < numUniqueKeys; i++) { @@ -282,14 +272,14 @@ public unsafe void RecoverAndTest(Guid indexToken, Guid hybridLogToken) // Register with thread fht.StartSession(); + Input input = default(Input); + Output output = default(Output); // Issue read requests - fixed (Input* input = inputArray) + for (var i = 0; i < numUniqueKeys; i++) { - for (var i = 0; i < numUniqueKeys; i++) - { - fht.Read(&((input + i)->adId), null, (Output*)&((input + i)->numClicks), &context, i); - } + var status = fht.Read(ref inputArray[i].adId, ref input, ref output, Empty.Default, i); + inputArray[i].numClicks = output.value; } // Complete all pending requests diff --git a/cs/playground/SumStore/ConcurrentTest.cs b/cs/playground/SumStore/ConcurrentTest.cs index 56f3bfd40..3a5be7049 100644 --- a/cs/playground/SumStore/ConcurrentTest.cs +++ b/cs/playground/SumStore/ConcurrentTest.cs @@ -24,7 +24,7 @@ public class ConcurrentTest: IFasterRecoveryTest const long checkpointInterval = (1 << 22); readonly int threadCount; int numActiveThreads; - ICustomFasterKv fht; + FasterKV fht; BlockingCollection inputArrays; readonly long[] threadNumOps; @@ -33,10 +33,12 @@ public ConcurrentTest(int threadCount) this.threadCount = threadCount; // Create FASTER index - var log = FasterFactory.CreateLogDevice("logs\\hlog"); - fht = FasterFactory.Create - - (keySpace, new LogSettings { LogDevice = log }, new CheckpointSettings { CheckpointDir = "logs" }); + var log = Devices.CreateLogDevice("logs\\hlog"); + fht = new FasterKV + + (keySpace, new Functions(), + new LogSettings { LogDevice = log }, + new CheckpointSettings { CheckpointDir = "logs" }); numActiveThreads = 0; inputArrays = new BlockingCollection(); @@ -44,7 +46,7 @@ public ConcurrentTest(int threadCount) Prepare(); } - public unsafe void Prepare() + public void Prepare() { Console.WriteLine("Creating Input Arrays"); @@ -69,7 +71,7 @@ public unsafe void Prepare() } } - private unsafe void CreateInputArrays(int threadId) + private void CreateInputArrays(int threadId) { var inputArray = new Input[numOps]; for (int i = 0; i < numOps; i++) @@ -82,7 +84,7 @@ private unsafe void CreateInputArrays(int threadId) } - public unsafe void Populate() + public void Populate() { Thread[] workers = new Thread[threadCount]; for (int idx = 0; idx < threadCount; ++idx) @@ -109,12 +111,10 @@ public unsafe void Populate() Test(); } - private unsafe void PopulateWorker(int threadId) + private void PopulateWorker(int threadId) { Native32.AffinitizeThreadRoundRobin((uint)threadId); - Empty context; - var success = inputArrays.TryTake(out Input[] inputArray); if(!success) { @@ -131,20 +131,17 @@ private unsafe void PopulateWorker(int threadId) var random = new Random(threadId + 1); threadNumOps[threadId] = (numOps / 2) + random.Next() % (numOps / 4); - fixed (Input* input = inputArray) + for (long i = 0; i < threadNumOps[threadId]; i++) { - for (long i = 0; i < threadNumOps[threadId]; i++) - { - fht.RMW(&((input + i)->adId), input + i, &context, i); + fht.RMW(ref inputArray[i].adId, ref inputArray[i], Empty.Default, i); - if (i % completePendingInterval == 0) - { - fht.CompletePending(false); - } - else if (i % refreshInterval == 0) - { - fht.Refresh(); - } + if (i % completePendingInterval == 0) + { + fht.CompletePending(false); + } + else if (i % refreshInterval == 0) + { + fht.Refresh(); } } @@ -159,11 +156,9 @@ private unsafe void PopulateWorker(int threadId) Console.WriteLine("Populate successful on thread {0}", threadId); } - public unsafe void Test() + public void Test() { - // Create array for reading - Empty context; var inputArray = new Input[numUniqueKeys]; for (int i = 0; i < numUniqueKeys; i++) { @@ -173,14 +168,14 @@ public unsafe void Test() // Register with thread fht.StartSession(); + Input input = default(Input); + Output output = default(Output); // Issue read requests - fixed (Input* input = inputArray) + for (var i = 0; i < numUniqueKeys; i++) { - for (var i = 0; i < numUniqueKeys; i++) - { - fht.Read(&((input + i)->adId), null, (Output*)&((input + i)->numClicks), &context, i); - } + var status = fht.Read(ref inputArray[i].adId, ref input, ref output, Empty.Default, i); + inputArray[i].numClicks = output.value; } // Complete all pending requests diff --git a/cs/playground/SumStore/Functions.cs b/cs/playground/SumStore/Functions.cs deleted file mode 100644 index ee7a4c5c7..000000000 --- a/cs/playground/SumStore/Functions.cs +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using FASTER.core; -using System; -using System.Diagnostics; -using System.Runtime.CompilerServices; -using System.Threading; - -namespace SumStore -{ - public unsafe class Functions - { - public static void RMWCompletionCallback(AdId* key, Input* input, Empty* ctx, Status status) - { - } - - public static void ReadCompletionCallback(AdId* key, Input* input, Output* output, Empty* ctx, Status status) - { - } - - public static void UpsertCompletionCallback(AdId* key, NumClicks* input, Empty* ctx) - { - } - - public static void PersistenceCallback(long thread_id, long serial_num) - { - Console.WriteLine("Thread {0} reports persistence until {1}", thread_id, serial_num); - } - - // Read functions - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void SingleReader(AdId* key, Input* input, NumClicks* value, Output* dst) - { - NumClicks.Copy(value, (NumClicks*)dst); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void ConcurrentReader(AdId* key, Input* input, NumClicks* value, Output* dst) - { - NumClicks.AcquireReadLock(value); - NumClicks.Copy(value, (NumClicks*)dst); - NumClicks.ReleaseReadLock(value); - } - - // Upsert functions - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void SingleWriter(AdId* key, NumClicks* src, NumClicks* dst) - { - NumClicks.Copy(src, dst); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void ConcurrentWriter(AdId* key, NumClicks* src, NumClicks* dst) - { - NumClicks.AcquireWriteLock(dst); - NumClicks.Copy(src, dst); - NumClicks.ReleaseWriteLock(dst); - } - - // RMW functions - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int InitialValueLength(AdId* key, Input* input) - { - return NumClicks.GetLength(default(NumClicks*)); - } - - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void InitialUpdater(AdId* key, Input* input, NumClicks* value) - { - NumClicks.Copy(&input->numClicks, value); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void InPlaceUpdater(AdId* key, Input* input, NumClicks* value) - { - Interlocked.Add(ref value->numClicks, input->numClicks.numClicks); - } - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static void CopyUpdater(AdId* key, Input* input, NumClicks* oldValue, NumClicks* newValue) - { - newValue->numClicks += oldValue->numClicks + input->numClicks.numClicks; - } - } -} diff --git a/cs/playground/SumStore/ICustomFaster.cs b/cs/playground/SumStore/ICustomFaster.cs deleted file mode 100644 index 03e6d119f..000000000 --- a/cs/playground/SumStore/ICustomFaster.cs +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using FASTER.core; -using System; - -namespace SumStore -{ - /// - /// Custom interface of FASTER for user-specified types - /// See cs\src\core\Index\FASTER\IFASTER.cs for template - /// - public unsafe interface ICustomFasterKv - { - /* Thread-related operations */ - Guid StartSession(); - long ContinueSession(Guid guid); - void StopSession(); - void Refresh(); - - /* Store Interface */ - Status Read(AdId* key, Input* input, Output* output, Empty* context, long lsn); - Status Upsert(AdId* key, NumClicks* value, Empty* context, long lsn); - Status RMW(AdId* key, Input* input, Empty* context, long lsn); - bool CompletePending(bool wait); - bool ShiftBeginAddress(long untilAddress); - - /* Recovery */ - bool TakeFullCheckpoint(out Guid token); - bool TakeIndexCheckpoint(out Guid token); - bool TakeHybridLogCheckpoint(out Guid token); - void Recover(Guid fullcheckpointToken); - void Recover(Guid indexToken, Guid hybridLogToken); - - /* Statistics */ - long LogTailAddress { get; } - long LogReadOnlyAddress { get; } - void DumpDistribution(); - } -} - diff --git a/cs/playground/SumStore/Input.cs b/cs/playground/SumStore/Input.cs deleted file mode 100644 index ee19fc9c8..000000000 --- a/cs/playground/SumStore/Input.cs +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using System.Runtime.CompilerServices; - -namespace SumStore -{ - public unsafe struct Input - { - public AdId adId; - public NumClicks numClicks; - - public static Input* MoveToContext(Input* value) - { - return value; - } - - } -} diff --git a/cs/playground/SumStore/NumClicks.cs b/cs/playground/SumStore/NumClicks.cs deleted file mode 100644 index a6b2d4191..000000000 --- a/cs/playground/SumStore/NumClicks.cs +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using System; -using System.IO; -using System.Runtime.CompilerServices; - -namespace SumStore -{ - public unsafe struct NumClicks - { - public const int physicalSize = sizeof(long); - public long numClicks; - - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public static int GetLength(NumClicks* input) - { - return physicalSize; - } - - public static void Copy(NumClicks* src, NumClicks* dst) - { - dst->numClicks = src->numClicks; - } - - // Shared read/write capabilities on value - public static void AcquireReadLock(NumClicks* value) - { - } - - public static void ReleaseReadLock(NumClicks* value) - { - } - - public static void AcquireWriteLock(NumClicks* value) - { - } - - public static void ReleaseWriteLock(NumClicks* value) - { - } - - public static NumClicks* MoveToContext(NumClicks* value) - { - return value; - } - - #region Serialization - public static bool HasObjectsToSerialize() - { - return false; - } - - public static void Serialize(NumClicks* key, Stream toStream) - { - throw new InvalidOperationException(); - } - - public static void Deserialize(NumClicks* key, Stream fromStream) - { - throw new InvalidOperationException(); - } - public static void Free(NumClicks* key) - { - throw new InvalidOperationException(); - } - #endregion - } -} diff --git a/cs/playground/SumStore/Output.cs b/cs/playground/SumStore/Output.cs deleted file mode 100644 index 48e719a18..000000000 --- a/cs/playground/SumStore/Output.cs +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -namespace SumStore -{ - public unsafe struct Output - { - public NumClicks value; - - public static Output* MoveToContext(Output* value) - { - return value; - } - - } -} diff --git a/cs/playground/SumStore/Program.cs b/cs/playground/SumStore/Program.cs index 4a1e664d7..31729d70a 100644 --- a/cs/playground/SumStore/Program.cs +++ b/cs/playground/SumStore/Program.cs @@ -20,7 +20,7 @@ public interface IFasterRecoveryTest } public class Program { - static unsafe void Main(string[] args) + static void Main(string[] args) { if (args.Length == 0) { diff --git a/cs/playground/SumStore/SingleThreadedRecoveryTest.cs b/cs/playground/SumStore/SingleThreadedRecoveryTest.cs index d0de67783..6128c5004 100644 --- a/cs/playground/SumStore/SingleThreadedRecoveryTest.cs +++ b/cs/playground/SumStore/SingleThreadedRecoveryTest.cs @@ -20,15 +20,17 @@ public class SingleThreadedRecoveryTest : IFasterRecoveryTest const long refreshInterval = (1 << 8); const long completePendingInterval = (1 << 12); const long checkpointInterval = (1 << 20); - ICustomFasterKv fht; + FasterKV fht; public SingleThreadedRecoveryTest() { // Create FASTER index - var log = FasterFactory.CreateLogDevice("logs\\hlog"); - fht = FasterFactory.Create - - (keySpace, new LogSettings { LogDevice = log }, new CheckpointSettings { CheckpointDir = "logs" }); + var log = Devices.CreateLogDevice("logs\\hlog"); + fht = new FasterKV + + (keySpace, new Functions(), + new LogSettings { LogDevice = log }, + new CheckpointSettings { CheckpointDir = "logs" }); } public void Continue() @@ -36,12 +38,10 @@ public void Continue() throw new NotImplementedException(); } - public unsafe void Populate() + public void Populate() { List tokens = new List(); - Empty context; - // Prepare the dataset var inputArray = new Input[numOps]; for (int i = 0; i < numOps; i++) @@ -54,28 +54,25 @@ public unsafe void Populate() fht.StartSession(); // Prpcess the batch of input data - fixed (Input* input = inputArray) + for (int i = 0; i < numOps; i++) { - for (int i = 0; i < numOps; i++) - { - fht.RMW(&((input + i)->adId), input + i, &context, i); + fht.RMW(ref inputArray[i].adId, ref inputArray[i], Empty.Default, i); - if (i % checkpointInterval == 0) + if (i % checkpointInterval == 0) + { + if(fht.TakeFullCheckpoint(out Guid token)) { - if(fht.TakeFullCheckpoint(out Guid token)) - { - tokens.Add(token); - } + tokens.Add(token); } + } - if (i % completePendingInterval == 0) - { - fht.CompletePending(false); - } - else if (i % refreshInterval == 0) - { - fht.Refresh(); - } + if (i % completePendingInterval == 0) + { + fht.CompletePending(false); + } + else if (i % refreshInterval == 0) + { + fht.Refresh(); } } @@ -93,13 +90,12 @@ public unsafe void Populate() Console.ReadLine(); } - public unsafe void RecoverAndTest(Guid indexToken, Guid hybridLogToken) + public void RecoverAndTest(Guid indexToken, Guid hybridLogToken) { // Recover fht.Recover(indexToken, hybridLogToken); // Create array for reading - Empty context; var inputArray = new Input[numUniqueKeys]; for (int i = 0; i < numUniqueKeys; i++) { @@ -109,14 +105,14 @@ public unsafe void RecoverAndTest(Guid indexToken, Guid hybridLogToken) // Register with thread fht.StartSession(); + Input input = default(Input); + Output output = default(Output); // Issue read requests - fixed (Input* input = inputArray) + for (var i = 0; i < numUniqueKeys; i++) { - for (var i = 0; i < numUniqueKeys; i++) - { - fht.Read(&((input + i)->adId), null, (Output*)&((input + i)->numClicks), &context, i); - } + var status = fht.Read(ref inputArray[i].adId, ref input, ref output, Empty.Default, i); + inputArray[i].numClicks = output.value; } // Complete all pending requests diff --git a/cs/playground/SumStore/SumStoreTypes.cs b/cs/playground/SumStore/SumStoreTypes.cs new file mode 100644 index 000000000..c0b95b814 --- /dev/null +++ b/cs/playground/SumStore/SumStoreTypes.cs @@ -0,0 +1,104 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using System; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using System.Collections.Generic; +using System.Linq; +using FASTER.core; +using System.Runtime.CompilerServices; +using System.IO; +using System.Diagnostics; + +namespace SumStore +{ + public struct AdId : IFasterEqualityComparer + { + public long adId; + + public long GetHashCode64(ref AdId key) + { + return Utility.GetHashCode(key.adId); + } + public bool Equals(ref AdId k1, ref AdId k2) + { + return k1.adId == k2.adId; + } + } + + public struct Input + { + public AdId adId; + public NumClicks numClicks; + } + + public struct NumClicks + { + public long numClicks; + } + + public struct Output + { + public NumClicks value; + } + + public class Functions : IFunctions + { + public void RMWCompletionCallback(ref AdId key, ref Input input, Empty ctx, Status status) + { + } + + public void ReadCompletionCallback(ref AdId key, ref Input input, ref Output output, Empty ctx, Status status) + { + } + + public void UpsertCompletionCallback(ref AdId key, ref NumClicks input, Empty ctx) + { + } + + public void CheckpointCompletionCallback(Guid sessionId, long serialNum) + { + Console.WriteLine("Session {0} reports persistence until {1}", sessionId, serialNum); + } + + // Read functions + public void SingleReader(ref AdId key, ref Input input, ref NumClicks value, ref Output dst) + { + dst.value = value; + } + + public void ConcurrentReader(ref AdId key, ref Input input, ref NumClicks value, ref Output dst) + { + dst.value = value; + } + + // Upsert functions + public void SingleWriter(ref AdId key, ref NumClicks src, ref NumClicks dst) + { + dst = src; + } + + public void ConcurrentWriter(ref AdId key, ref NumClicks src, ref NumClicks dst) + { + dst = src; + } + + // RMW functions + public void InitialUpdater(ref AdId key, ref Input input, ref NumClicks value) + { + value = input.numClicks; + } + + public void InPlaceUpdater(ref AdId key, ref Input input, ref NumClicks value) + { + Interlocked.Add(ref value.numClicks, input.numClicks.numClicks); + } + + public void CopyUpdater(ref AdId key, ref Input input, ref NumClicks oldValue, ref NumClicks newValue) + { + newValue.numClicks += oldValue.numClicks + input.numClicks.numClicks; + } + } +} diff --git a/cs/src/core/Allocator/AllocatorBase.cs b/cs/src/core/Allocator/AllocatorBase.cs new file mode 100644 index 000000000..26e0afa0f --- /dev/null +++ b/cs/src/core/Allocator/AllocatorBase.cs @@ -0,0 +1,1350 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using System; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Runtime.InteropServices; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Linq.Expressions; +using System.IO; +using System.Diagnostics; + +namespace FASTER.core +{ + internal enum PMMFlushStatus : int { Flushed, InProgress }; + + internal enum PMMCloseStatus : int { Closed, Open }; + + internal struct FullPageStatus + { + public long LastFlushedUntilAddress; + public FlushCloseStatus PageFlushCloseStatus; + } + + [StructLayout(LayoutKind.Explicit)] + internal struct FlushCloseStatus + { + [FieldOffset(0)] + public PMMFlushStatus PageFlushStatus; + [FieldOffset(4)] + public PMMCloseStatus PageCloseStatus; + [FieldOffset(0)] + public long value; + } + + [StructLayout(LayoutKind.Explicit)] + internal struct PageOffset + { + [FieldOffset(0)] + public int Offset; + [FieldOffset(4)] + public int Page; + [FieldOffset(0)] + public long PageAndOffset; + } + + /// + /// Base class for hybrid log memory allocator + /// + /// + /// + public unsafe abstract class AllocatorBase : IDisposable + where Key : new() + where Value : new() + { + /// + /// Epoch information + /// + protected LightEpoch epoch; + + /// + /// Comparer + /// + protected readonly IFasterEqualityComparer comparer; + + #region Protected size definitions + /// + /// Buffer size + /// + protected readonly int BufferSize; + /// + /// Log page size + /// + protected readonly int LogPageSizeBits; + + /// + /// Page size + /// + protected readonly int PageSize; + /// + /// Page size mask + /// + protected readonly int PageSizeMask; + /// + /// Buffer size mask + /// + protected readonly int BufferSizeMask; + /// + /// Aligned page size in bytes + /// + protected readonly int AlignedPageSizeBytes; + + /// + /// Total hybrid log size (bits) + /// + protected readonly int LogTotalSizeBits; + /// + /// Total hybrid log size (bytes) + /// + protected readonly long LogTotalSizeBytes; + + /// + /// Segment size in bits + /// + protected readonly int LogSegmentSizeBits; + /// + /// Segment size + /// + protected readonly long SegmentSize; + /// + /// Segment buffer size + /// + protected readonly int SegmentBufferSize; + + /// + /// HeadOffset lag (from tail) + /// + protected const int HeadOffsetLagNumPages = 4; + /// + /// HeadOffset lag size + /// + protected readonly int HeadOffsetLagSize; + /// + /// HeadOFfset lag address + /// + protected readonly long HeadOffsetLagAddress; + + /// + /// Log mutable fraction + /// + protected readonly double LogMutableFraction; + /// + /// ReadOnlyOffset lag (from tail) + /// + protected readonly long ReadOnlyLagAddress; + + #endregion + + #region Public addresses + /// + /// Read-only address + /// + public long ReadOnlyAddress; + + /// + /// Safe read-only address + /// + public long SafeReadOnlyAddress; + + /// + /// Head address + /// + public long HeadAddress; + + /// + /// Safe head address + /// + public long SafeHeadAddress; + + /// + /// Flushed until address + /// + public long FlushedUntilAddress; + + /// + /// Begin address + /// + public long BeginAddress; + + #endregion + + #region Protected device info + /// + /// Device + /// + protected readonly IDevice device; + /// + /// Sector size + /// + protected readonly int sectorSize; + #endregion + + #region Private page metadata + /// + /// Index in circular buffer, of the current tail page + /// + private volatile int TailPageIndex; + + // Array that indicates the status of each buffer page + internal readonly FullPageStatus[] PageStatusIndicator; + + /// + /// Global address of the current tail (next element to be allocated from the circular buffer) + /// + private PageOffset TailPageOffset; + + /// + /// Number of pending reads + /// + private static int numPendingReads = 0; + #endregion + + /// + /// Read buffer pool + /// + protected SectorAlignedBufferPool readBufferPool; + + #region Abstract methods + /// + /// Initialize + /// + public abstract void Initialize(); + /// + /// Get start logical address + /// + /// + /// + public abstract long GetStartLogicalAddress(long page); + /// + /// Get physical address + /// + /// + /// + public abstract long GetPhysicalAddress(long newLogicalAddress); + /// + /// Get address info + /// + /// + /// + public abstract ref RecordInfo GetInfo(long physicalAddress); + + /// + /// Get info from byte pointer + /// + /// + /// + public abstract ref RecordInfo GetInfoFromBytePointer(byte* ptr); + + /// + /// Get key + /// + /// + /// + public abstract ref Key GetKey(long physicalAddress); + /// + /// Get value + /// + /// + /// + public abstract ref Value GetValue(long physicalAddress); + /// + /// Get address info for key + /// + /// + /// + public abstract AddressInfo* GetKeyAddressInfo(long physicalAddress); + /// + /// Get address info for value + /// + /// + /// + public abstract AddressInfo* GetValueAddressInfo(long physicalAddress); + + /// + /// Get record size + /// + /// + /// + public abstract int GetRecordSize(long physicalAddress); + /// + /// Get average record size + /// + /// + public abstract int GetAverageRecordSize(); + /// + /// Get initial record size + /// + /// + /// + /// + /// + public abstract int GetInitialRecordSize(ref Key key, ref Input input); + /// + /// Get record size + /// + /// + /// + /// + public abstract int GetRecordSize(ref Key key, ref Value value); + + /// + /// Allocate page + /// + /// + protected abstract void AllocatePage(int index); + /// + /// Whether page is allocated + /// + /// + /// + protected abstract bool IsAllocated(int pageIndex); + /// + /// Populate page + /// + /// + /// + /// + internal abstract void PopulatePage(byte* src, int required_bytes, long destinationPage); + /// + /// Write async to device + /// + /// + /// + /// + /// + /// + /// + /// + protected abstract void WriteAsyncToDevice(long startPage, long flushPage, IOCompletionCallback callback, PageAsyncFlushResult result, IDevice device, IDevice objectLogDevice); + /// + /// Read objects to memory (async) + /// + /// + /// + /// + /// + /// + protected abstract void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, IOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default(SectorAlignedMemory)); + /// + /// Read page (async) + /// + /// + /// + /// + /// + /// + /// + /// + /// + protected abstract void ReadAsync(ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, IOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice); + /// + /// Clear page + /// + /// + /// + protected abstract void ClearPage(int page, bool pageZero); + /// + /// Write page (async) + /// + /// + /// + /// + /// + protected abstract void WriteAsync(long flushPage, IOCompletionCallback callback, PageAsyncFlushResult asyncResult); + /// + /// Retrieve full record + /// + /// + /// + /// + protected abstract bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx); + /// + /// Whether key has objects + /// + /// + public abstract bool KeyHasObjects(); + + /// + /// Whether value has objects + /// + /// + public abstract bool ValueHasObjects(); + + /// + /// Get segment offsets + /// + /// + public abstract long[] GetSegmentOffsets(); + #endregion + + /// + /// Instantiate base allocator + /// + /// + /// + public AllocatorBase(LogSettings settings, IFasterEqualityComparer comparer) + { + this.comparer = comparer; + settings.LogDevice.Initialize(1L << settings.SegmentSizeBits); + settings.ObjectLogDevice?.Initialize(1L << settings.SegmentSizeBits); + + // Page size + LogPageSizeBits = settings.PageSizeBits; + PageSize = 1 << LogPageSizeBits; + PageSizeMask = PageSize - 1; + + // Total HLOG size + LogTotalSizeBits = settings.MemorySizeBits; + LogTotalSizeBytes = 1L << LogTotalSizeBits; + BufferSize = (int)(LogTotalSizeBytes / (1L << LogPageSizeBits)); + BufferSizeMask = BufferSize - 1; + + // HeadOffset lag (from tail) + HeadOffsetLagSize = BufferSize - HeadOffsetLagNumPages; + HeadOffsetLagAddress = (long)HeadOffsetLagSize << LogPageSizeBits; + + // ReadOnlyOffset lag (from tail) + LogMutableFraction = settings.MutableFraction; + ReadOnlyLagAddress = (long)(LogMutableFraction * BufferSize) << LogPageSizeBits; + + // Segment size + LogSegmentSizeBits = settings.SegmentSizeBits; + SegmentSize = 1 << LogSegmentSizeBits; + SegmentBufferSize = 1 + (LogTotalSizeBytes / SegmentSize < 1 ? 1 : (int)(LogTotalSizeBytes / SegmentSize)); + + if (BufferSize < 16) + { + throw new Exception("HLOG buffer must be at least 16 pages"); + } + + PageStatusIndicator = new FullPageStatus[BufferSize]; + + device = settings.LogDevice; + sectorSize = (int)device.SectorSize; + AlignedPageSizeBytes = ((PageSize + (sectorSize - 1)) & ~(sectorSize - 1)); + } + + /// + /// Initialize allocator + /// + /// + protected void Initialize(long firstValidAddress) + { + readBufferPool = SectorAlignedBufferPool.GetPool(1, sectorSize); + + long tailPage = firstValidAddress >> LogPageSizeBits; + int tailPageIndex = (int)(tailPage % BufferSize); + Debug.Assert(tailPageIndex == 0); + AllocatePage(tailPageIndex); + + // Allocate next page as well + if (firstValidAddress > 0) + AllocatePage(tailPageIndex + 1); + + SafeReadOnlyAddress = firstValidAddress; + ReadOnlyAddress = firstValidAddress; + SafeHeadAddress = firstValidAddress; + HeadAddress = firstValidAddress; + FlushedUntilAddress = firstValidAddress; + BeginAddress = firstValidAddress; + + TailPageOffset.Page = (int)(firstValidAddress >> LogPageSizeBits); + TailPageOffset.Offset = (int)(firstValidAddress & PageSizeMask); + + TailPageIndex = 0; + } + + /// + /// Dispose allocator + /// + public virtual void Dispose() + { + for (int i=0; i + /// Segment size + /// + /// + public long GetSegmentSize() + { + return SegmentSize; + } + + /// + /// Get tail address + /// + /// + public long GetTailAddress() + { + var local = TailPageOffset; + return ((long)local.Page << LogPageSizeBits) | (uint)local.Offset; + } + + /// + /// Get page + /// + /// + /// + public long GetPage(long logicalAddress) + { + return (logicalAddress >> LogPageSizeBits); + } + + /// + /// Get page index for page + /// + /// + /// + public int GetPageIndexForPage(long page) + { + return (int)(page % BufferSize); + } + + /// + /// Get page index for address + /// + /// + /// + public int GetPageIndexForAddress(long address) + { + return (int)((address >> LogPageSizeBits) % BufferSize); + } + + /// + /// Get capacity (number of pages) + /// + /// + public int GetCapacityNumPages() + { + return BufferSize; + } + + + /// + /// Get page size + /// + /// + public long GetPageSize() + { + return PageSize; + } + + /// + /// Get offset in page + /// + /// + /// + public long GetOffsetInPage(long address) + { + return address & PageSizeMask; + } + + /// + /// Get offset lag in pages + /// + /// + public long GetHeadOffsetLagInPages() + { + return HeadOffsetLagSize; + } + + /// + /// Key function used to allocate memory for a specified number of items + /// + /// + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public long Allocate(int numSlots = 1) + { + PageOffset localTailPageOffset = default(PageOffset); + + // Determine insertion index. + // ReSharper disable once CSharpWarnings::CS0420 +#pragma warning disable 420 + localTailPageOffset.PageAndOffset = Interlocked.Add(ref TailPageOffset.PageAndOffset, numSlots); +#pragma warning restore 420 + + int page = localTailPageOffset.Page; + int offset = localTailPageOffset.Offset - numSlots; + + #region HANDLE PAGE OVERFLOW + /* To prove correctness of the following modifications + * done to TailPageOffset and the allocation itself, + * we should use the fact that only one thread will have any + * of the following cases since it is a counter and we spin-wait + * until the tail is folded onto next page accordingly. + */ + if (localTailPageOffset.Offset >= PageSize) + { + if (offset >= PageSize) + { + //The tail offset value was more than page size before atomic add + //We consider that a failed attempt and retry again + var spin = new SpinWait(); + do + { + //Just to give some more time to the thread + // that is handling this overflow + while (TailPageOffset.Offset >= PageSize) + { + spin.SpinOnce(); + } + + // ReSharper disable once CSharpWarnings::CS0420 +#pragma warning disable 420 + localTailPageOffset.PageAndOffset = Interlocked.Add(ref TailPageOffset.PageAndOffset, numSlots); +#pragma warning restore 420 + + page = localTailPageOffset.Page; + offset = localTailPageOffset.Offset - numSlots; + } while (offset >= PageSize); + } + + + if (localTailPageOffset.Offset == PageSize) + { + //Folding over at page boundary + localTailPageOffset.Page++; + localTailPageOffset.Offset = 0; + TailPageOffset = localTailPageOffset; + } + else if (localTailPageOffset.Offset >= PageSize) + { + //Overflows not allowed. We allot same space in next page. + localTailPageOffset.Page++; + localTailPageOffset.Offset = numSlots; + TailPageOffset = localTailPageOffset; + + page = localTailPageOffset.Page; + offset = 0; + } + } + #endregion + + long address = (((long)page) << LogPageSizeBits) | ((long)offset); + + // Check if TailPageIndex is appropriate and allocated! + int pageIndex = page % BufferSize; + + if (TailPageIndex == pageIndex) + { + return (address); + } + + //Invert the address if either the previous page is not flushed or if it is null + if ((PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageFlushStatus != PMMFlushStatus.Flushed) || + (PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageCloseStatus != PMMCloseStatus.Closed) || + (!IsAllocated(pageIndex))) + { + address = -address; + } + + // Update the read-only so that we can get more space for the tail + if (offset == 0) + { + if (address >= 0) + { + TailPageIndex = pageIndex; + Interlocked.MemoryBarrier(); + } + + long newPage = page + 1; + int newPageIndex = (int)((page + 1) % BufferSize); + + long tailAddress = (address < 0 ? -address : address); + PageAlignedShiftReadOnlyAddress(tailAddress); + PageAlignedShiftHeadAddress(tailAddress); + + if ((!IsAllocated(newPageIndex))) + { + AllocatePage(newPageIndex); + } + } + + return (address); + } + + /// + /// If allocator cannot allocate new memory as the head has not shifted or the previous page + /// is not yet closed, it allocates but returns the negative address. + /// This function is invoked to check if the address previously allocated has become valid to be used + /// + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void CheckForAllocateComplete(ref long address) + { + if (address >= 0) + { + throw new Exception("Address already allocated!"); + } + + PageOffset p = default(PageOffset); + p.Page = (int)((-address) >> LogPageSizeBits); + p.Offset = (int)((-address) & PageSizeMask); + + //Check write cache + int pageIndex = p.Page % BufferSize; + if (TailPageIndex == pageIndex) + { + address = -address; + return; + } + + //Check if we can move the head offset + long currentTailAddress = GetTailAddress(); + PageAlignedShiftHeadAddress(currentTailAddress); + + //Check if I can allocate pageIndex at all + if ((PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageFlushStatus != PMMFlushStatus.Flushed) || + (PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageCloseStatus != PMMCloseStatus.Closed) || + (!IsAllocated(pageIndex))) + { + return; + } + + //correct values and set write cache + address = -address; + if (p.Offset == 0) + { + TailPageIndex = pageIndex; + } + return; + } + + /// + /// Used by applications to make the current state of the database immutable quickly + /// + /// + public void ShiftReadOnlyToTail(out long tailAddress) + { + tailAddress = GetTailAddress(); + long localTailAddress = tailAddress; + long currentReadOnlyOffset = ReadOnlyAddress; + if (MonotonicUpdate(ref ReadOnlyAddress, tailAddress, out long oldReadOnlyOffset)) + { + epoch.BumpCurrentEpoch(() => OnPagesMarkedReadOnly(localTailAddress, false)); + } + } + + /// + /// Shift begin address + /// + /// + /// + public void ShiftBeginAddress(long oldBeginAddress, long newBeginAddress) + { + epoch.BumpCurrentEpoch(() + => DeleteAddressRange(oldBeginAddress, newBeginAddress)); + } + + /// + /// Delete address range + /// + /// + /// + protected virtual void DeleteAddressRange(long fromAddress, long toAddress) + { + device.DeleteAddressRange(fromAddress, toAddress); + } + + /// + /// Seal: make sure there are no longer any threads writing to the page + /// Flush: send page to secondary store + /// + /// + /// + public void OnPagesMarkedReadOnly(long newSafeReadOnlyAddress, bool waitForPendingFlushComplete = false) + { + if (MonotonicUpdate(ref SafeReadOnlyAddress, newSafeReadOnlyAddress, out long oldSafeReadOnlyAddress)) + { + Debug.WriteLine("SafeReadOnly shifted from {0:X} to {1:X}", oldSafeReadOnlyAddress, newSafeReadOnlyAddress); + long startPage = oldSafeReadOnlyAddress >> LogPageSizeBits; + + long endPage = (newSafeReadOnlyAddress >> LogPageSizeBits); + int numPages = (int)(endPage - startPage); + if (numPages > 10) + { + new Thread( + () => AsyncFlushPages(startPage, newSafeReadOnlyAddress)).Start(); + } + else + { + AsyncFlushPages(startPage, newSafeReadOnlyAddress); + } + } + } + + /// + /// Action to be performed for when all threads have + /// agreed that a page range is closed. + /// + /// + public void OnPagesClosed(long newSafeHeadAddress) + { + if (MonotonicUpdate(ref SafeHeadAddress, newSafeHeadAddress, out long oldSafeHeadAddress)) + { + Debug.WriteLine("SafeHeadOffset shifted from {0:X} to {1:X}", oldSafeHeadAddress, newSafeHeadAddress); + + for (long closePageAddress = oldSafeHeadAddress; closePageAddress < newSafeHeadAddress; closePageAddress += PageSize) + { + int closePage = (int)((closePageAddress >> LogPageSizeBits) % BufferSize); + + if (!IsAllocated(closePage)) + { + AllocatePage(closePage); + } + + while (true) + { + var oldStatus = PageStatusIndicator[closePage].PageFlushCloseStatus; + if (oldStatus.PageFlushStatus == PMMFlushStatus.Flushed) + { + ClearPage(closePage, (closePageAddress >> LogPageSizeBits) == 0); + } + else + { + throw new Exception("Impossible"); + } + + var newStatus = oldStatus; + newStatus.PageCloseStatus = PMMCloseStatus.Closed; + if (oldStatus.value == Interlocked.CompareExchange(ref PageStatusIndicator[closePage].PageFlushCloseStatus.value, newStatus.value, oldStatus.value)) + { + break; + } + } + + // Necessary to propagate this change to other threads + Interlocked.MemoryBarrier(); + } + } + } + + /// + /// Called every time a new tail page is allocated. Here the read-only is + /// shifted only to page boundaries unlike ShiftReadOnlyToTail where shifting + /// can happen to any fine-grained address. + /// + /// + private void PageAlignedShiftReadOnlyAddress(long currentTailAddress) + { + long currentReadOnlyAddress = ReadOnlyAddress; + long pageAlignedTailAddress = currentTailAddress & ~PageSizeMask; + long desiredReadOnlyAddress = (pageAlignedTailAddress - ReadOnlyLagAddress); + if (MonotonicUpdate(ref ReadOnlyAddress, desiredReadOnlyAddress, out long oldReadOnlyAddress)) + { + Debug.WriteLine("Allocate: Moving read-only offset from {0:X} to {1:X}", oldReadOnlyAddress, desiredReadOnlyAddress); + epoch.BumpCurrentEpoch(() => OnPagesMarkedReadOnly(desiredReadOnlyAddress)); + } + } + + /// + /// Called whenever a new tail page is allocated or when the user is checking for a failed memory allocation + /// Tries to shift head address based on the head offset lag size. + /// + /// + private void PageAlignedShiftHeadAddress(long currentTailAddress) + { + //obtain local values of variables that can change + long currentHeadAddress = HeadAddress; + long currentFlushedUntilAddress = FlushedUntilAddress; + long pageAlignedTailAddress = currentTailAddress & ~PageSizeMask; + long desiredHeadAddress = (pageAlignedTailAddress - HeadOffsetLagAddress); + + long newHeadAddress = desiredHeadAddress; + if (currentFlushedUntilAddress < newHeadAddress) + { + newHeadAddress = currentFlushedUntilAddress; + } + newHeadAddress = newHeadAddress & ~PageSizeMask; + + if (MonotonicUpdate(ref HeadAddress, newHeadAddress, out long oldHeadAddress)) + { + Debug.WriteLine("Allocate: Moving head offset from {0:X} to {1:X}", oldHeadAddress, newHeadAddress); + epoch.BumpCurrentEpoch(() => OnPagesClosed(newHeadAddress)); + } + } + + /// + /// Every async flush callback tries to update the flushed until address to the latest value possible + /// Is there a better way to do this with enabling fine-grained addresses (not necessarily at page boundaries)? + /// + protected void ShiftFlushedUntilAddress() + { + long currentFlushedUntilAddress = FlushedUntilAddress; + long page = GetPage(currentFlushedUntilAddress); + + bool update = false; + long pageLastFlushedAddress = PageStatusIndicator[(int)(page % BufferSize)].LastFlushedUntilAddress; + while (pageLastFlushedAddress >= currentFlushedUntilAddress) + { + currentFlushedUntilAddress = pageLastFlushedAddress; + update = true; + page++; + pageLastFlushedAddress = PageStatusIndicator[(int)(page % BufferSize)].LastFlushedUntilAddress; + } + + if (update) + { + MonotonicUpdate(ref FlushedUntilAddress, currentFlushedUntilAddress, out long oldFlushedUntilAddress); + } + } + + + + /// + /// Used by several functions to update the variable to newValue. Ignores if newValue is smaller or + /// than the current value. + /// + /// + /// + /// + /// + private bool MonotonicUpdate(ref long variable, long newValue, out long oldValue) + { + oldValue = variable; + while (oldValue < newValue) + { + var foundValue = Interlocked.CompareExchange(ref variable, newValue, oldValue); + if (foundValue == oldValue) + { + return true; + } + oldValue = foundValue; + } + return false; + } + + /// + /// Reset for recovery + /// + /// + /// + public void RecoveryReset(long tailAddress, long headAddress) + { + long tailPage = GetPage(tailAddress); + long offsetInPage = GetOffsetInPage(tailAddress); + TailPageOffset.Page = (int)tailPage; + TailPageOffset.Offset = (int)offsetInPage; + TailPageIndex = GetPageIndexForPage(TailPageOffset.Page); + + // issue read request to all pages until head lag + HeadAddress = headAddress; + SafeHeadAddress = headAddress; + FlushedUntilAddress = headAddress; + ReadOnlyAddress = tailAddress; + SafeReadOnlyAddress = tailAddress; + + for (var addr = headAddress; addr < tailAddress; addr += PageSize) + { + var pageIndex = GetPageIndexForAddress(addr); + PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageCloseStatus = PMMCloseStatus.Open; + } + } + + /// + /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read + /// the record efficiently into memory. + /// + /// + /// + /// + /// + /// + internal void AsyncReadRecordToMemory(long fromLogical, int numBytes, IOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default(SectorAlignedMemory)) + { + ulong fileOffset = (ulong)(AlignedPageSizeBytes * (fromLogical >> LogPageSizeBits) + (fromLogical & PageSizeMask)); + ulong alignedFileOffset = (ulong)(((long)fileOffset / sectorSize) * sectorSize); + + uint alignedReadLength = (uint)((long)fileOffset + numBytes - (long)alignedFileOffset); + alignedReadLength = (uint)((alignedReadLength + (sectorSize - 1)) & ~(sectorSize - 1)); + + var record = readBufferPool.Get((int)alignedReadLength); + record.valid_offset = (int)(fileOffset - alignedFileOffset); + record.available_bytes = (int)(alignedReadLength - (fileOffset - alignedFileOffset)); + record.required_bytes = numBytes; + + var asyncResult = default(AsyncGetFromDiskResult>); + asyncResult.context = context; + asyncResult.context.record = record; + device.ReadAsync(alignedFileOffset, + (IntPtr)asyncResult.context.record.aligned_pointer, + alignedReadLength, + callback, + asyncResult); + } + + /// + /// Read pages from specified device + /// + /// + /// + /// + /// + /// + /// + /// + /// + public void AsyncReadPagesFromDevice( + long readPageStart, + int numPages, + IOCompletionCallback callback, + TContext context, + long devicePageOffset = 0, + IDevice logDevice = null, IDevice objectLogDevice = null) + { + AsyncReadPagesFromDevice(readPageStart, numPages, callback, context, + out CountdownEvent completed, devicePageOffset, logDevice, objectLogDevice); + } + + /// + /// Read pages from specified device + /// + /// + /// + /// + /// + /// + /// + /// + /// + /// + private void AsyncReadPagesFromDevice( + long readPageStart, + int numPages, + IOCompletionCallback callback, + TContext context, + out CountdownEvent completed, + long devicePageOffset = 0, + IDevice device = null, IDevice objectLogDevice = null) + { + var usedDevice = device; + IDevice usedObjlogDevice = objectLogDevice; + + if (device == null) + { + usedDevice = this.device; + } + + completed = new CountdownEvent(numPages); + for (long readPage = readPageStart; readPage < (readPageStart + numPages); readPage++) + { + int pageIndex = (int)(readPage % BufferSize); + if (!IsAllocated(pageIndex)) + { + // Allocate a new page + AllocatePage(pageIndex); + } + else + { + ClearPage(pageIndex, readPage == 0); + } + var asyncResult = new PageAsyncReadResult() + { + page = readPage, + context = context, + handle = completed, + count = 1 + }; + + ulong offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); + + if (device != null) + offsetInFile = (ulong)(AlignedPageSizeBytes * (readPage - devicePageOffset)); + + ReadAsync(offsetInFile, pageIndex, (uint)PageSize, callback, asyncResult, usedDevice, usedObjlogDevice); + } + } + + /// + /// Flush page range to disk + /// Called when all threads have agreed that a page range is sealed. + /// + /// + /// + public void AsyncFlushPages(long startPage, long untilAddress) + { + long endPage = (untilAddress >> LogPageSizeBits); + int numPages = (int)(endPage - startPage); + long offsetInEndPage = GetOffsetInPage(untilAddress); + if (offsetInEndPage > 0) + { + numPages++; + } + + + /* Request asynchronous writes to the device. If waitForPendingFlushComplete + * is set, then a CountDownEvent is set in the callback handle. + */ + for (long flushPage = startPage; flushPage < (startPage + numPages); flushPage++) + { + long pageStartAddress = flushPage << LogPageSizeBits; + long pageEndAddress = (flushPage + 1) << LogPageSizeBits; + + var asyncResult = new PageAsyncFlushResult + { + page = flushPage, + count = 1 + }; + if (pageEndAddress > untilAddress) + { + asyncResult.partial = true; + asyncResult.untilAddress = untilAddress; + } + else + { + asyncResult.partial = false; + asyncResult.untilAddress = pageEndAddress; + + // Set status to in-progress + PageStatusIndicator[flushPage % BufferSize].PageFlushCloseStatus + = new FlushCloseStatus { PageFlushStatus = PMMFlushStatus.InProgress, PageCloseStatus = PMMCloseStatus.Open }; + } + + PageStatusIndicator[flushPage % BufferSize].LastFlushedUntilAddress = -1; + + WriteAsync(flushPage, AsyncFlushPageCallback, asyncResult); + } + } + + /// + /// Flush pages asynchronously + /// + /// + /// + /// + /// + /// + public void AsyncFlushPages( + long flushPageStart, + int numPages, + IOCompletionCallback callback, + TContext context) + { + for (long flushPage = flushPageStart; flushPage < (flushPageStart + numPages); flushPage++) + { + int pageIndex = GetPageIndexForPage(flushPage); + var asyncResult = new PageAsyncFlushResult() + { + page = flushPage, + context = context, + count = 1, + partial = false, + untilAddress = (flushPage + 1) << LogPageSizeBits + }; + + WriteAsync(flushPage, callback, asyncResult); + } + } + + /// + /// Flush pages from startPage (inclusive) to endPage (exclusive) + /// to specified log device and obj device + /// + /// + /// + /// + /// + /// + public void AsyncFlushPagesToDevice(long startPage, long endPage, IDevice device, IDevice objectLogDevice, out CountdownEvent completed) + { + int totalNumPages = (int)(endPage - startPage); + completed = new CountdownEvent(totalNumPages); + + for (long flushPage = startPage; flushPage < endPage; flushPage++) + { + var asyncResult = new PageAsyncFlushResult + { + handle = completed, + count = 1 + }; + + long pageStartAddress = flushPage << LogPageSizeBits; + long pageEndAddress = (flushPage + 1) << LogPageSizeBits; + + // Intended destination is flushPage + WriteAsyncToDevice(startPage, flushPage, AsyncFlushPageToDeviceCallback, asyncResult, device, objectLogDevice); + } + } + + /// + /// Async get from disk + /// + /// + /// + /// + /// + public void AsyncGetFromDisk(long fromLogical, + int numBytes, + AsyncIOContext context, + SectorAlignedMemory result = default(SectorAlignedMemory)) + { + while (numPendingReads > 120) + { + Thread.SpinWait(100); + + // Do not protect if we are not already protected + // E.g., we are in an IO thread + if (epoch.IsProtected()) + epoch.ProtectAndDrain(); + } + Interlocked.Increment(ref numPendingReads); + + if (result.buffer == null) + AsyncReadRecordToMemory(fromLogical, numBytes, AsyncGetFromDiskCallback, context, result); + else + AsyncReadRecordObjectsToMemory(fromLogical, numBytes, AsyncGetFromDiskCallback, context, result); + } + + private void AsyncGetFromDiskCallback(uint errorCode, uint numBytes, NativeOverlapped* overlap) + { + if (errorCode != 0) + { + Trace.TraceError("OverlappedStream GetQueuedCompletionStatus error: {0}", errorCode); + } + + var result = (AsyncGetFromDiskResult>)Overlapped.Unpack(overlap).AsyncResult; + Interlocked.Decrement(ref numPendingReads); + + var ctx = result.context; + + var record = ctx.record.GetValidPointer(); + int requiredBytes = GetRecordSize((long)record); + if (ctx.record.available_bytes >= requiredBytes) + { + // We have the complete record. + if (RetrievedFullRecord(record, ref ctx)) + { + if (comparer.Equals(ref ctx.request_key, ref ctx.key)) + { + // The keys are same, so I/O is complete + // ctx.record = result.record; + ctx.callbackQueue.Add(ctx); + } + else + { + var oldAddress = ctx.logicalAddress; + + //keys are not same. I/O is not complete + ctx.logicalAddress = GetInfoFromBytePointer(record).PreviousAddress; + if (ctx.logicalAddress != Constants.kInvalidAddress) + { + ctx.record.Return(); + ctx.record = ctx.objBuffer = default(SectorAlignedMemory); + AsyncGetFromDisk(ctx.logicalAddress, requiredBytes, ctx); + } + else + { + ctx.callbackQueue.Add(ctx); + } + } + } + } + else + { + ctx.record.Return(); + AsyncGetFromDisk(ctx.logicalAddress, requiredBytes, ctx); + } + + Overlapped.Free(overlap); + } + + /// + /// IOCompletion callback for page flush + /// + /// + /// + /// + private void AsyncFlushPageCallback(uint errorCode, uint numBytes, NativeOverlapped* overlap) + { + if (errorCode != 0) + { + Trace.TraceError("OverlappedStream GetQueuedCompletionStatus error: {0}", errorCode); + } + + // Set the page status to flushed + PageAsyncFlushResult result = (PageAsyncFlushResult)Overlapped.Unpack(overlap).AsyncResult; + + if (Interlocked.Decrement(ref result.count) == 0) + { + PageStatusIndicator[result.page % BufferSize].LastFlushedUntilAddress = result.untilAddress; + + if (!result.partial) + { + while (true) + { + var oldStatus = PageStatusIndicator[result.page % BufferSize].PageFlushCloseStatus; + if (oldStatus.PageCloseStatus == PMMCloseStatus.Closed) + { + ClearPage((int)(result.page % BufferSize), result.page == 0); + } + var newStatus = oldStatus; + newStatus.PageFlushStatus = PMMFlushStatus.Flushed; + if (oldStatus.value == Interlocked.CompareExchange(ref PageStatusIndicator[result.page % BufferSize].PageFlushCloseStatus.value, newStatus.value, oldStatus.value)) + { + break; + } + } + } + ShiftFlushedUntilAddress(); + result.Free(); + } + + Overlapped.Free(overlap); + } + + /// + /// IOCompletion callback for page flush + /// + /// + /// + /// + private void AsyncFlushPageToDeviceCallback(uint errorCode, uint numBytes, NativeOverlapped* overlap) + { + if (errorCode != 0) + { + Trace.TraceError("OverlappedStream GetQueuedCompletionStatus error: {0}", errorCode); + } + + PageAsyncFlushResult result = (PageAsyncFlushResult)Overlapped.Unpack(overlap).AsyncResult; + + if (Interlocked.Decrement(ref result.count) == 0) + { + result.Free(); + } + Overlapped.Free(overlap); + } + + /// + /// Shallow copy + /// + /// + /// + public virtual void ShallowCopy(ref Key src, ref Key dst) + { + dst = src; + } + + /// + /// Shallow copy + /// + /// + /// + public virtual void ShallowCopy(ref Value src, ref Value dst) + { + dst = src; + } + } +} diff --git a/cs/src/core/Allocator/AsyncIOContext.cs b/cs/src/core/Allocator/AsyncIOContext.cs index 4e9d9d9ff..3c17a9e51 100644 --- a/cs/src/core/Allocator/AsyncIOContext.cs +++ b/cs/src/core/Allocator/AsyncIOContext.cs @@ -9,7 +9,7 @@ namespace FASTER.core /// /// Async IO context for PMM /// - public unsafe struct AsyncIOContext + public unsafe struct AsyncIOContext { /// /// Id @@ -17,9 +17,19 @@ public unsafe struct AsyncIOContext public long id; /// - /// Key pointer + /// Key /// - public IntPtr key; + public Key request_key; + + /// + /// Retrieved key + /// + public Key key; + + /// + /// Retrieved value + /// + public Value value; /// /// Logical address @@ -39,6 +49,6 @@ public unsafe struct AsyncIOContext /// /// Callback queue /// - public BlockingCollection callbackQueue; + public BlockingCollection> callbackQueue; } } diff --git a/cs/src/core/Allocator/BlittableAllocator.cs b/cs/src/core/Allocator/BlittableAllocator.cs new file mode 100644 index 000000000..53e89b76b --- /dev/null +++ b/cs/src/core/Allocator/BlittableAllocator.cs @@ -0,0 +1,270 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using System; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Runtime.InteropServices; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Linq.Expressions; +using System.IO; +using System.Diagnostics; + +#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member + +namespace FASTER.core +{ + public unsafe sealed class BlittableAllocator : AllocatorBase + where Key : new() + where Value : new() + { + // Circular buffer definition + private byte[][] values; + private GCHandle[] handles; + private long[] pointers; + private readonly GCHandle ptrHandle; + private readonly long* nativePointers; + + // Record sizes + private static readonly int recordSize = Utility.GetSize(default(Record)); + private static readonly int keySize = Utility.GetSize(default(Key)); + private static readonly int valueSize = Utility.GetSize(default(Value)); + + public BlittableAllocator(LogSettings settings, IFasterEqualityComparer comparer) + : base(settings, comparer) + { + values = new byte[BufferSize][]; + handles = new GCHandle[BufferSize]; + pointers = new long[BufferSize]; + + epoch = LightEpoch.Instance; + + ptrHandle = GCHandle.Alloc(pointers, GCHandleType.Pinned); + nativePointers = (long*)ptrHandle.AddrOfPinnedObject(); + } + + public override void Initialize() + { + Initialize(Constants.kFirstValidAddress); + } + + public override ref RecordInfo GetInfo(long physicalAddress) + { + return ref Unsafe.AsRef((void*)physicalAddress); + } + + public override ref RecordInfo GetInfoFromBytePointer(byte* ptr) + { + return ref Unsafe.AsRef(ptr); + } + + public override ref Key GetKey(long physicalAddress) + { + return ref Unsafe.AsRef((byte*)physicalAddress + RecordInfo.GetLength()); + } + + public override ref Value GetValue(long physicalAddress) + { + return ref Unsafe.AsRef((byte*)physicalAddress + RecordInfo.GetLength() + keySize); + } + + public override int GetRecordSize(long physicalAddress) + { + return recordSize; + } + + public override int GetAverageRecordSize() + { + return recordSize; + } + + public override int GetInitialRecordSize(ref Key key, ref Input input) + { + return recordSize; + } + + public override int GetRecordSize(ref Key key, ref Value value) + { + return recordSize; + } + + /// + /// Dispose memory allocator + /// + public override void Dispose() + { + for (int i = 0; i < values.Length; i++) + { + if (handles[i].IsAllocated) + handles[i].Free(); + values[i] = null; + } + handles = null; + pointers = null; + values = null; + base.Dispose(); + } + + public override AddressInfo* GetKeyAddressInfo(long physicalAddress) + { + return (AddressInfo*)((byte*)physicalAddress + RecordInfo.GetLength()); + } + + public override AddressInfo* GetValueAddressInfo(long physicalAddress) + { + return (AddressInfo*)((byte*)physicalAddress + RecordInfo.GetLength() + keySize); + } + + /// + /// Allocate memory page, pinned in memory, and in sector aligned form, if possible + /// + /// + protected override void AllocatePage(int index) + { + var adjustedSize = PageSize + 2 * sectorSize; + byte[] tmp = new byte[adjustedSize]; + Array.Clear(tmp, 0, adjustedSize); + + handles[index] = GCHandle.Alloc(tmp, GCHandleType.Pinned); + long p = (long)handles[index].AddrOfPinnedObject(); + pointers[index] = (p + (sectorSize - 1)) & ~(sectorSize - 1); + values[index] = tmp; + + PageStatusIndicator[index].PageFlushCloseStatus.PageFlushStatus = PMMFlushStatus.Flushed; + PageStatusIndicator[index].PageFlushCloseStatus.PageCloseStatus = PMMCloseStatus.Closed; + Interlocked.MemoryBarrier(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public override long GetPhysicalAddress(long logicalAddress) + { + // Offset within page + int offset = (int)(logicalAddress & ((1L << LogPageSizeBits) - 1)); + + // Index of page within the circular buffer + int pageIndex = (int)((logicalAddress >> LogPageSizeBits) & (BufferSize - 1)); + return *(nativePointers + pageIndex) + offset; + } + + protected override bool IsAllocated(int pageIndex) + { + return values[pageIndex] != null; + } + + protected override void DeleteAddressRange(long fromAddress, long toAddress) + { + base.DeleteAddressRange(fromAddress, toAddress); + } + + protected override void WriteAsync(long flushPage, IOCompletionCallback callback, PageAsyncFlushResult asyncResult) + { + WriteAsync((IntPtr)pointers[flushPage % BufferSize], + (ulong)(AlignedPageSizeBytes * flushPage), + (uint)PageSize, + callback, + asyncResult, device); + } + + protected override void WriteAsyncToDevice + (long startPage, long flushPage, IOCompletionCallback callback, + PageAsyncFlushResult asyncResult, IDevice device, IDevice objectLogDevice) + { + WriteAsync((IntPtr)pointers[flushPage % BufferSize], + (ulong)(AlignedPageSizeBytes * (flushPage - startPage)), + (uint)PageSize, callback, asyncResult, + device); + } + + /// + /// Get start logical address + /// + /// + /// + public override long GetStartLogicalAddress(long page) + { + if (page == 0) + return (page << LogPageSizeBits) + Constants.kFirstValidAddress; + + return page << LogPageSizeBits; + } + + + protected override void ClearPage(int page, bool pageZero) + { + Array.Clear(values[page], 0, values[page].Length); + } + + private void WriteAsync(IntPtr alignedSourceAddress, ulong alignedDestinationAddress, uint numBytesToWrite, + IOCompletionCallback callback, PageAsyncFlushResult asyncResult, + IDevice device) + { + device.WriteAsync(alignedSourceAddress, alignedDestinationAddress, + numBytesToWrite, callback, asyncResult); + } + + protected override void ReadAsync( + ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, + IOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice) + { + device.ReadAsync(alignedSourceAddress, (IntPtr)pointers[destinationPageIndex], + aligned_read_length, callback, asyncResult); + } + + /// + /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read + /// the record efficiently into memory. + /// + /// + /// + /// + /// + /// + protected override void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, IOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default(SectorAlignedMemory)) + { + throw new InvalidOperationException("AsyncReadRecordObjectsToMemory invalid for BlittableAllocator"); + } + + /// + /// Retrieve objects from object log + /// + /// + /// + /// + protected override bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) + { + ShallowCopy(ref GetKey((long)record), ref ctx.key); + ShallowCopy(ref GetValue((long)record), ref ctx.value); + return true; + } + + /// + /// Whether KVS has keys to serialize/deserialize + /// + /// + public override bool KeyHasObjects() + { + return false; + } + + /// + /// Whether KVS has values to serialize/deserialize + /// + /// + public override bool ValueHasObjects() + { + return false; + } + + public override long[] GetSegmentOffsets() + { + return null; + } + + internal override void PopulatePage(byte* src, int required_bytes, long destinationPage) + { + throw new Exception("BlittableAllocator memory pages are sector aligned - use direct copy"); + // Buffer.MemoryCopy(src, (void*)pointers[destinationPage % BufferSize], required_bytes, required_bytes); + } + } +} diff --git a/cs/src/core/Allocator/GenericAllocator.cs b/cs/src/core/Allocator/GenericAllocator.cs new file mode 100644 index 000000000..9e328eb22 --- /dev/null +++ b/cs/src/core/Allocator/GenericAllocator.cs @@ -0,0 +1,837 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +using System; +using System.Runtime.CompilerServices; +using System.Threading; +using System.Collections.Generic; +using System.IO; +using System.Diagnostics; +using System.Runtime.InteropServices; + +#pragma warning disable CS1591 // Missing XML comment for publicly visible type or member + +namespace FASTER.core +{ + [StructLayout(LayoutKind.Sequential, Pack=1)] + public struct Record + { + public RecordInfo info; + public Key key; + public Value value; + } + + + public unsafe sealed class GenericAllocator : AllocatorBase + where Key : new() + where Value : new() + { + // Circular buffer definition + private Record[][] values; + + // Object log related variables + private readonly IDevice objectLogDevice; + // Size of object chunks beign written to storage + private const int ObjectBlockSize = 100 * (1 << 20); + // Tail offsets per segment, in object log + public readonly long[] segmentOffsets; + // Buffer pool for object log related work + SectorAlignedBufferPool ioBufferPool; + // Record sizes + private static readonly int recordSize = Utility.GetSize(default(Record)); + private readonly SerializerSettings SerializerSettings; + + public GenericAllocator(LogSettings settings, SerializerSettings serializerSettings, IFasterEqualityComparer comparer) + : base(settings, comparer) + { + SerializerSettings = serializerSettings; + + if (default(Key) == null && ((SerializerSettings == null) || (SerializerSettings.keySerializer == null))) + { + throw new Exception("Key is a class, but no serializer specified via SerializerSettings"); + } + + if (default(Value) == null && ((SerializerSettings == null) || (SerializerSettings.valueSerializer == null))) + { + throw new Exception("Value is a class, but no serializer specified via SerializerSettings"); + } + + values = new Record[BufferSize][]; + segmentOffsets = new long[SegmentBufferSize]; + + objectLogDevice = settings.ObjectLogDevice; + + if (KeyHasObjects() || ValueHasObjects()) + { + if (objectLogDevice == null) + throw new Exception("Objects in key/value, but object log not provided during creation of FASTER instance"); + } + + epoch = LightEpoch.Instance; + ioBufferPool = SectorAlignedBufferPool.GetPool(1, sectorSize); + } + + public override void Initialize() + { + Initialize(recordSize); + } + + /// + /// Get start logical address + /// + /// + /// + public override long GetStartLogicalAddress(long page) + { + if (page == 0) + return (page << LogPageSizeBits) + recordSize; + + return page << LogPageSizeBits; + } + + public override ref RecordInfo GetInfo(long physicalAddress) + { + // Offset within page + int offset = (int)(physicalAddress & PageSizeMask); + + // Index of page within the circular buffer + int pageIndex = (int)((physicalAddress >> LogPageSizeBits) & BufferSizeMask); + + return ref values[pageIndex][offset/recordSize].info; + } + + public override ref RecordInfo GetInfoFromBytePointer(byte* ptr) + { + return ref Unsafe.AsRef>(ptr).info; + } + + + public override ref Key GetKey(long physicalAddress) + { + // Offset within page + int offset = (int)(physicalAddress & PageSizeMask); + + // Index of page within the circular buffer + int pageIndex = (int)((physicalAddress >> LogPageSizeBits) & BufferSizeMask); + + return ref values[pageIndex][offset / recordSize].key; + } + + public override ref Value GetValue(long physicalAddress) + { + // Offset within page + int offset = (int)(physicalAddress & PageSizeMask); + + // Index of page within the circular buffer + int pageIndex = (int)((physicalAddress >> LogPageSizeBits) & BufferSizeMask); + + return ref values[pageIndex][offset / recordSize].value; + } + + public override int GetRecordSize(long physicalAddress) + { + return recordSize; + } + + public override int GetAverageRecordSize() + { + return recordSize; + } + + public override int GetInitialRecordSize(ref Key key, ref Input input) + { + return recordSize; + } + + public override int GetRecordSize(ref Key key, ref Value value) + { + return recordSize; + } + + /// + /// Dispose memory allocator + /// + public override void Dispose() + { + for (int i = 0; i < values.Length; i++) + { + values[i] = null; + } + values = null; + base.Dispose(); + } + + public override AddressInfo* GetKeyAddressInfo(long physicalAddress) + { + return (AddressInfo*)Unsafe.AsPointer(ref Unsafe.AsRef>((byte*)physicalAddress).key); + } + + public override AddressInfo* GetValueAddressInfo(long physicalAddress) + { + return (AddressInfo*)Unsafe.AsPointer(ref Unsafe.AsRef>((byte*)physicalAddress).value); + } + + /// + /// Allocate memory page, pinned in memory, and in sector aligned form, if possible + /// + /// + protected override void AllocatePage(int index) + { + Record[] tmp; + if (PageSize % recordSize == 0) + tmp = new Record[PageSize / recordSize]; + else + tmp = new Record[1 + (PageSize / recordSize)]; + Array.Clear(tmp, 0, tmp.Length); + + values[index] = tmp; + PageStatusIndicator[index].PageFlushCloseStatus.PageFlushStatus = PMMFlushStatus.Flushed; + PageStatusIndicator[index].PageFlushCloseStatus.PageCloseStatus = PMMCloseStatus.Closed; + Interlocked.MemoryBarrier(); + } + + public override long GetPhysicalAddress(long logicalAddress) + { + return logicalAddress; + } + + protected override bool IsAllocated(int pageIndex) + { + return values[pageIndex] != null; + } + + protected override void DeleteAddressRange(long fromAddress, long toAddress) + { + base.DeleteAddressRange(fromAddress, toAddress); + objectLogDevice.DeleteSegmentRange((int)(fromAddress >> LogSegmentSizeBits), (int)(toAddress >> LogSegmentSizeBits)); + } + + protected override void WriteAsync(long flushPage, IOCompletionCallback callback, PageAsyncFlushResult asyncResult) + { + WriteAsync(flushPage, + (ulong)(AlignedPageSizeBytes * flushPage), + (uint)PageSize, + callback, + asyncResult, device, objectLogDevice); + } + + protected override void WriteAsyncToDevice + (long startPage, long flushPage, IOCompletionCallback callback, + PageAsyncFlushResult asyncResult, IDevice device, IDevice objectLogDevice) + { + // We are writing to separate device, so use fresh segment offsets + WriteAsync(flushPage, + (ulong)(AlignedPageSizeBytes * (flushPage - startPage)), + (uint)PageSize, callback, asyncResult, + device, objectLogDevice, flushPage, new long[SegmentBufferSize]); + } + + + + protected override void ClearPage(int page, bool pageZero) + { + Array.Clear(values[page], 0, values[page].Length); + + // Close segments + var thisCloseSegment = page >> (LogSegmentSizeBits - LogPageSizeBits); + var nextClosePage = page + 1; + var nextCloseSegment = nextClosePage >> (LogSegmentSizeBits - LogPageSizeBits); + + if (thisCloseSegment != nextCloseSegment) + { + // Last page in current segment + segmentOffsets[thisCloseSegment % SegmentBufferSize] = 0; + } + } + + private void WriteAsync(long flushPage, ulong alignedDestinationAddress, uint numBytesToWrite, + IOCompletionCallback callback, PageAsyncFlushResult asyncResult, + IDevice device, IDevice objlogDevice, long intendedDestinationPage = -1, long[] localSegmentOffsets = null) + { + /* + if (!(KeyHasObjects() || ValueHasObjects())) + { + device.WriteAsync((IntPtr)pointers[flushPage % BufferSize], alignedDestinationAddress, + numBytesToWrite, callback, asyncResult); + return; + } + */ + + // Check if user did not override with special segment offsets + if (localSegmentOffsets == null) localSegmentOffsets = segmentOffsets; + + // need to write both page and object cache + asyncResult.count++; + + var src = values[flushPage % BufferSize]; + var buffer = ioBufferPool.Get(PageSize); + + fixed (RecordInfo* pin = &src[0].info) + { + Buffer.MemoryCopy(Unsafe.AsPointer(ref src[0]), buffer.aligned_pointer, numBytesToWrite, numBytesToWrite); + } + + long ptr = (long)buffer.aligned_pointer; + List addr = new List(); + asyncResult.freeBuffer1 = buffer; + + // Correct for page 0 of HLOG + //if (intendedDestinationPage < 0) + //{ + // By default, when we are not writing to a separate device, the intended + // destination page (logical) is the same as actual + // intendedDestinationPage = (long)(alignedDestinationAddress >> LogPageSizeBits); + //} + + //if (intendedDestinationPage == 0) + // ptr += Constants.kFirstValidAddress; + + addr = new List(); + MemoryStream ms = new MemoryStream(); + IObjectSerializer keySerializer = null; + IObjectSerializer valueSerializer = null; + + if (KeyHasObjects()) + { + keySerializer = SerializerSettings.keySerializer(); + keySerializer.BeginSerialize(ms); + } + if (ValueHasObjects()) + { + valueSerializer = SerializerSettings.valueSerializer(); + valueSerializer.BeginSerialize(ms); + } + for (int i=0; iAddress = pos; + key_address->Size = (int)(ms.Position - pos); + addr.Add((long)key_address); + } + + if (ValueHasObjects()) + { + long pos = ms.Position; + valueSerializer.Serialize(ref src[i].value); + var value_address = GetValueAddressInfo((long)(buffer.aligned_pointer + i * recordSize)); + value_address->Address = pos; + value_address->Size = (int)(ms.Position - pos); + addr.Add((long)value_address); + } + } + + if (ms.Position > ObjectBlockSize || i == numBytesToWrite/recordSize - 1) + { + var _s = ms.ToArray(); + ms.Close(); + ms = new MemoryStream(); + + var _objBuffer = ioBufferPool.Get(_s.Length); + + asyncResult.done = new AutoResetEvent(false); + + var _alignedLength = (_s.Length + (sectorSize - 1)) & ~(sectorSize - 1); + + var _objAddr = Interlocked.Add(ref localSegmentOffsets[(long)(alignedDestinationAddress >> LogSegmentSizeBits) % SegmentBufferSize], _alignedLength) - _alignedLength; + fixed (void* src_ = _s) + Buffer.MemoryCopy(src_, _objBuffer.aligned_pointer, _s.Length, _s.Length); + + foreach (var address in addr) + *((long*)address) += _objAddr; + + if (i < numBytesToWrite / recordSize - 1) + { + objlogDevice.WriteAsync( + (IntPtr)_objBuffer.aligned_pointer, + (int)(alignedDestinationAddress >> LogSegmentSizeBits), + (ulong)_objAddr, (uint)_alignedLength, AsyncFlushPartialObjectLogCallback, asyncResult); + + // Wait for write to complete before resuming next write + asyncResult.done.WaitOne(); + _objBuffer.Return(); + } + else + { + asyncResult.freeBuffer2 = _objBuffer; + objlogDevice.WriteAsync( + (IntPtr)_objBuffer.aligned_pointer, + (int)(alignedDestinationAddress >> LogSegmentSizeBits), + (ulong)_objAddr, (uint)_alignedLength, callback, asyncResult); + } + } + } + if (KeyHasObjects()) + { + keySerializer.EndSerialize(); + } + if (ValueHasObjects()) + { + valueSerializer.EndSerialize(); + } + + // Finally write the hlog page + device.WriteAsync((IntPtr)buffer.aligned_pointer, alignedDestinationAddress, + numBytesToWrite, callback, asyncResult); + } + + protected override void ReadAsync( + ulong alignedSourceAddress, int destinationPageIndex, uint aligned_read_length, + IOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice) + { + asyncResult.freeBuffer1 = readBufferPool.Get((int)aligned_read_length); + asyncResult.freeBuffer1.required_bytes = (int)aligned_read_length; + + if (!(KeyHasObjects() || ValueHasObjects())) + { + device.ReadAsync(alignedSourceAddress, (IntPtr)asyncResult.freeBuffer1.aligned_pointer, + aligned_read_length, callback, asyncResult); + return; + } + + asyncResult.callback = callback; + asyncResult.count++; + + if (objlogDevice == null) + { + Debug.Assert(objectLogDevice != null); + objlogDevice = objectLogDevice; + } + asyncResult.objlogDevice = objlogDevice; + + device.ReadAsync(alignedSourceAddress, (IntPtr)asyncResult.freeBuffer1.aligned_pointer, + aligned_read_length, AsyncReadPageWithObjectsCallback, asyncResult); + } + + + /// + /// IOCompletion callback for page flush + /// + /// + /// + /// + private void AsyncFlushPartialObjectLogCallback(uint errorCode, uint numBytes, NativeOverlapped* overlap) + { + if (errorCode != 0) + { + Trace.TraceError("OverlappedStream GetQueuedCompletionStatus error: {0}", errorCode); + } + + // Set the page status to flushed + PageAsyncFlushResult result = (PageAsyncFlushResult)Overlapped.Unpack(overlap).AsyncResult; + result.done.Set(); + + Overlapped.Free(overlap); + } + + private void AsyncReadPageWithObjectsCallback(uint errorCode, uint numBytes, NativeOverlapped* overlap) + { + if (errorCode != 0) + { + Trace.TraceError("OverlappedStream GetQueuedCompletionStatus error: {0}", errorCode); + } + + PageAsyncReadResult result = (PageAsyncReadResult)Overlapped.Unpack(overlap).AsyncResult; + + if (result.freeBuffer1.buffer != null && result.freeBuffer1.required_bytes > 0) + { + PopulatePage(result.freeBuffer1.GetValidPointer(), result.freeBuffer1.required_bytes, result.page); + result.freeBuffer1.required_bytes = 0; + } + + var src = values[result.page % BufferSize]; + + long ptr = 0; + + // Correct for page 0 of HLOG + //if (result.page == 0) + // ptr += Constants.kFirstValidAddress; + + // Check if we are resuming + if (result.resumeptr > ptr) + ptr = result.resumeptr; + + // Deserialize all objects until untilptr + if (ptr < result.untilptr) + { + MemoryStream ms = new MemoryStream(result.freeBuffer2.buffer); + ms.Seek(result.freeBuffer2.offset + result.freeBuffer2.valid_offset, SeekOrigin.Begin); + Deserialize(ptr, result.untilptr, ms); + ms.Dispose(); + + ptr = result.untilptr; + result.freeBuffer2.Return(); + result.freeBuffer2.buffer = null; + result.resumeptr = ptr; + } + + // If we have processed entire page, return + if (ptr >= PageSize) + { + result.Free(); + + // Call the "real" page read callback + result.callback(errorCode, numBytes, overlap); + return; + } + + // We will be re-issuing I/O, so free current overlap + Overlapped.Free(overlap); + + GetObjectInfo(result.freeBuffer1.GetValidPointer(), ref ptr, PageSize, ObjectBlockSize, out long startptr, out long size); + + // Object log fragment should be aligned by construction + Debug.Assert(startptr % sectorSize == 0); + + // We will be able to process all records until (but not including) ptr + result.untilptr = ptr; + + if (size > int.MaxValue) + throw new Exception("Unable to read object page, total size greater than 2GB: " + size); + + var objBuffer = ioBufferPool.Get((int)size); + result.freeBuffer2 = objBuffer; + var alignedLength = (size + (sectorSize - 1)) & ~(sectorSize - 1); + + // Request objects from objlog + result.objlogDevice.ReadAsync( + (int)(result.page >> (LogSegmentSizeBits - LogPageSizeBits)), + (ulong)startptr, + (IntPtr)objBuffer.aligned_pointer, (uint)alignedLength, AsyncReadPageWithObjectsCallback, result); + } + + /// + /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read + /// the record efficiently into memory. + /// + /// + /// + /// + /// + /// + protected override void AsyncReadRecordObjectsToMemory(long fromLogical, int numBytes, IOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default(SectorAlignedMemory)) + { + ulong fileOffset = (ulong)(AlignedPageSizeBytes * (fromLogical >> LogPageSizeBits) + (fromLogical & PageSizeMask)); + ulong alignedFileOffset = (ulong)(((long)fileOffset / sectorSize) * sectorSize); + + uint alignedReadLength = (uint)((long)fileOffset + numBytes - (long)alignedFileOffset); + alignedReadLength = (uint)((alignedReadLength + (sectorSize - 1)) & ~(sectorSize - 1)); + + var record = readBufferPool.Get((int)alignedReadLength); + record.valid_offset = (int)(fileOffset - alignedFileOffset); + record.available_bytes = (int)(alignedReadLength - (fileOffset - alignedFileOffset)); + record.required_bytes = numBytes; + + var asyncResult = default(AsyncGetFromDiskResult>); + asyncResult.context = context; + asyncResult.context.record = result; + asyncResult.context.objBuffer = record; + objectLogDevice.ReadAsync( + (int)(context.logicalAddress >> LogSegmentSizeBits), + alignedFileOffset, + (IntPtr)asyncResult.context.objBuffer.aligned_pointer, + alignedReadLength, + callback, + asyncResult); + } + + + + + #region Page handlers for objects + /// + /// Deseialize part of page from stream + /// + /// From pointer + /// Until pointer + /// Stream + public void Deserialize(long ptr, long untilptr, Stream stream) + { + IObjectSerializer keySerializer = null; + IObjectSerializer valueSerializer = null; + + if (KeyHasObjects()) + { + keySerializer = SerializerSettings.keySerializer(); + keySerializer.BeginDeserialize(stream); + } + if (ValueHasObjects()) + { + valueSerializer = SerializerSettings.valueSerializer(); + valueSerializer.BeginDeserialize(stream); + } + + while (ptr < untilptr) + { + if (!GetInfo(ptr).Invalid) + { + if (KeyHasObjects()) + { + GetKey(ptr) = new Key(); + keySerializer.Deserialize(ref GetKey(ptr)); + } + + if (ValueHasObjects()) + { + GetValue(ptr) = new Value(); + valueSerializer.Deserialize(ref GetValue(ptr)); + } + } + ptr += GetRecordSize(ptr); + } + if (KeyHasObjects()) + { + keySerializer.EndDeserialize(); + } + if (ValueHasObjects()) + { + valueSerializer.EndDeserialize(); + } + } + + /// + /// Serialize part of page to stream + /// + /// From pointer + /// Until pointer + /// Stream + /// Size of blocks to serialize in chunks of + /// List of addresses that need to be updated with offsets + public void Serialize(ref long ptr, long untilptr, Stream stream, int objectBlockSize, out List addr) + { + IObjectSerializer keySerializer = null; + IObjectSerializer valueSerializer = null; + + if (KeyHasObjects()) + { + keySerializer = SerializerSettings.keySerializer(); + keySerializer.BeginSerialize(stream); + } + if (ValueHasObjects()) + { + valueSerializer = SerializerSettings.valueSerializer(); + valueSerializer.BeginSerialize(stream); + } + + addr = new List(); + while (ptr < untilptr) + { + if (!GetInfo(ptr).Invalid) + { + long pos = stream.Position; + + if (KeyHasObjects()) + { + keySerializer.Serialize(ref GetKey(ptr)); + var key_address = GetKeyAddressInfo(ptr); + key_address->Address = pos; + key_address->Size = (int)(stream.Position - pos); + addr.Add((long)key_address); + } + + if (ValueHasObjects()) + { + pos = stream.Position; + var value_address = GetValueAddressInfo(ptr); + valueSerializer.Serialize(ref GetValue(ptr)); + value_address->Address = pos; + value_address->Size = (int)(stream.Position - pos); + addr.Add((long)value_address); + } + + } + ptr += GetRecordSize(ptr); + + if (stream.Position > objectBlockSize) + break; + } + + if (KeyHasObjects()) + { + keySerializer.EndSerialize(); + } + if (ValueHasObjects()) + { + valueSerializer.EndSerialize(); + } + } + + /// + /// Get location and range of object log addresses for specified log page + /// + /// + /// + /// + /// + /// + /// + public void GetObjectInfo(byte* raw, ref long ptr, long untilptr, int objectBlockSize, out long startptr, out long size) + { + long minObjAddress = long.MaxValue; + long maxObjAddress = long.MinValue; + + while (ptr < untilptr) + { + if (!GetInfo(ptr).Invalid) + { + if (KeyHasObjects()) + { + var key_addr = GetKeyAddressInfo((long)raw + ptr); + var addr = key_addr->Address; + + // If object pointer is greater than kObjectSize from starting object pointer + if (minObjAddress != long.MaxValue && (addr - minObjAddress > objectBlockSize)) + { + break; + } + + if (addr < minObjAddress) minObjAddress = addr; + addr += key_addr->Size; + if (addr > maxObjAddress) maxObjAddress = addr; + } + + + if (ValueHasObjects()) + { + var value_addr = GetValueAddressInfo((long)raw + ptr); + var addr = value_addr->Address; + + // If object pointer is greater than kObjectSize from starting object pointer + if (minObjAddress != long.MaxValue && (addr - minObjAddress > objectBlockSize)) + { + break; + } + + if (addr < minObjAddress) minObjAddress = addr; + addr += value_addr->Size; + if (addr > maxObjAddress) maxObjAddress = addr; + } + } + ptr += GetRecordSize(ptr); + } + + // Handle the case where no objects are to be written + if (minObjAddress == long.MaxValue && maxObjAddress == long.MinValue) + { + minObjAddress = 0; + maxObjAddress = 0; + } + + startptr = minObjAddress; + size = maxObjAddress - minObjAddress; + } + + /// + /// Retrieve objects from object log + /// + /// + /// + /// + protected override bool RetrievedFullRecord(byte* record, ref AsyncIOContext ctx) + { + if (!KeyHasObjects()) + { + ShallowCopy(ref Unsafe.AsRef>(record).key, ref ctx.key); + } + if (!ValueHasObjects()) + { + ShallowCopy(ref Unsafe.AsRef>(record).value, ref ctx.value); + } + + if (!(KeyHasObjects() || ValueHasObjects())) + return true; + + if (ctx.objBuffer.buffer == null) + { + // Issue IO for objects + long startAddress = -1; + long endAddress = -1; + if (KeyHasObjects()) + { + var x = GetKeyAddressInfo((long)record); + startAddress = x->Address; + endAddress = x->Address + x->Size; + } + + if (ValueHasObjects()) + { + var x = GetValueAddressInfo((long)record); + if (startAddress == -1) + startAddress = x->Address; + endAddress = x->Address + x->Size; + } + + // We are limited to a 2GB size per key-value + if (endAddress-startAddress > int.MaxValue) + throw new Exception("Size of key-value exceeds max of 2GB: " + (endAddress - startAddress)); + + AsyncGetFromDisk(startAddress, (int)(endAddress - startAddress), ctx, ctx.record); + return false; + } + + // Parse the key and value objects + MemoryStream ms = new MemoryStream(ctx.objBuffer.buffer); + ms.Seek(ctx.objBuffer.offset + ctx.objBuffer.valid_offset, SeekOrigin.Begin); + + if (KeyHasObjects()) + { + ctx.key = new Key(); + + var keySerializer = SerializerSettings.keySerializer(); + keySerializer.BeginDeserialize(ms); + keySerializer.Deserialize(ref ctx.key); + keySerializer.EndDeserialize(); + } + + if (ValueHasObjects()) + { + ctx.value = new Value(); + + var valueSerializer = SerializerSettings.valueSerializer(); + valueSerializer.BeginDeserialize(ms); + valueSerializer.Deserialize(ref ctx.value); + valueSerializer.EndDeserialize(); + } + + ctx.objBuffer.Return(); + return true; + } + + /// + /// Whether KVS has keys to serialize/deserialize + /// + /// + public override bool KeyHasObjects() + { + return SerializerSettings.keySerializer != null; + } + + /// + /// Whether KVS has values to serialize/deserialize + /// + /// + public override bool ValueHasObjects() + { + return SerializerSettings.valueSerializer != null; + } + #endregion + + public override long[] GetSegmentOffsets() + { + return segmentOffsets; + } + + internal override void PopulatePage(byte* src, int required_bytes, long destinationPage) + { + fixed (RecordInfo* pin = &values[destinationPage % BufferSize][0].info) + { + Buffer.MemoryCopy(src, Unsafe.AsPointer(ref values[destinationPage % BufferSize][0]), required_bytes, required_bytes); + } + } + } +} diff --git a/cs/src/core/Allocator/IPageHandlers.cs b/cs/src/core/Allocator/IPageHandlers.cs deleted file mode 100644 index fe6cd1d26..000000000 --- a/cs/src/core/Allocator/IPageHandlers.cs +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using System.Collections.Generic; -using System.IO; - -namespace FASTER.core -{ - /// - /// Interface for KVS to operate on pages - /// - public interface IPageHandlers - { - /// - /// Clear page - /// - /// From pointer - /// Until pointer - void ClearPage(long ptr, long endptr); - - /// - /// Deseialize part of page from stream - /// - /// From pointer - /// Until pointer - /// Stream - void Deserialize(long ptr, long untilptr, Stream stream); - - /// - /// Serialize part of page to stream - /// - /// From pointer - /// Until pointer - /// Stream - /// Size of blocks to serialize in chunks of - /// List of addresses that need to be updated with offsets - void Serialize(ref long ptr, long untilptr, Stream stream, int objectBlockSize, out List addr); - - /// - /// Get location and range of object log addresses for specified log page - /// - /// - /// - /// - /// - /// - void GetObjectInfo(ref long ptr, long untilptr, int objectBlockSize, out long startptr, out long size); - - /// - /// Whether KVS has objects to serialize/deserialize - /// - /// - bool HasObjects(); - } -} diff --git a/cs/src/core/Allocator/PMMAsyncIO.cs b/cs/src/core/Allocator/PMMAsyncIO.cs deleted file mode 100644 index c55be7763..000000000 --- a/cs/src/core/Allocator/PMMAsyncIO.cs +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Diagnostics; -using System.IO; -using System.Linq; -using System.Runtime.CompilerServices; -using System.Runtime.Serialization; -using System.Threading; - -namespace FASTER.core -{ - - /// - /// Async IO related functions of PMM - /// - public unsafe partial class PersistentMemoryMalloc : IAllocator - { - #region Async file operations - - /// - /// Invoked by users to obtain a record from disk. It uses sector aligned memory to read - /// the record efficiently into memory. - /// - /// - /// - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void AsyncReadRecordToMemory(long fromLogical, int numRecords, IOCompletionCallback callback, AsyncIOContext context, SectorAlignedMemory result = default(SectorAlignedMemory)) - { - ulong fileOffset = (ulong)(AlignedPageSizeBytes * (fromLogical >> LogPageSizeBits) + (fromLogical & PageSizeMask)); - ulong alignedFileOffset = (ulong)(((long)fileOffset / sectorSize) * sectorSize); - - uint alignedReadLength = (uint)((long)fileOffset + numRecords - (long)alignedFileOffset); - alignedReadLength = (uint)((alignedReadLength + (sectorSize - 1)) & ~(sectorSize - 1)); - - var record = readBufferPool.Get((int)alignedReadLength); - record.valid_offset = (int)(fileOffset - alignedFileOffset); - record.available_bytes = (int)(alignedReadLength - (fileOffset - alignedFileOffset)); - record.required_bytes = numRecords; - - var asyncResult = default(AsyncGetFromDiskResult); - asyncResult.context = context; - if (result.buffer == null) - { - asyncResult.context.record = record; - device.ReadAsync(alignedFileOffset, - (IntPtr)asyncResult.context.record.aligned_pointer, - alignedReadLength, - callback, - asyncResult); - } - else - { - asyncResult.context.record = result; - asyncResult.context.objBuffer = record; - objectLogDevice.ReadAsync( - (int)(context.logicalAddress >> LogSegmentSizeBits), - alignedFileOffset, - (IntPtr)asyncResult.context.objBuffer.aligned_pointer, - alignedReadLength, - callback, - asyncResult); - } - } - - /// - /// Read pages from specified device - /// - /// - /// - /// - /// - /// - /// - /// - /// - public void AsyncReadPagesFromDevice( - long readPageStart, - int numPages, - IOCompletionCallback callback, - TContext context, - long devicePageOffset = 0, - IDevice logDevice = null, IDevice objectLogDevice = null) - { - AsyncReadPagesFromDevice(readPageStart, numPages, callback, context, - out CountdownEvent completed, devicePageOffset, logDevice, objectLogDevice); - } - - /// - /// Read pages from specified device - /// - /// - /// - /// - /// - /// - /// - /// - /// - /// - public void AsyncReadPagesFromDevice( - long readPageStart, - int numPages, - IOCompletionCallback callback, - TContext context, - out CountdownEvent completed, - long devicePageOffset = 0, - IDevice device = null, IDevice objectLogDevice = null) - { - var usedDevice = device; - IDevice usedObjlogDevice = objectLogDevice; - - if (device == null) - { - usedDevice = this.device; - usedObjlogDevice = this.objectLogDevice; - } - - - if (pageHandlers.HasObjects()) - { - if (usedObjlogDevice == null) - throw new Exception("Object log device not provided"); - } - - completed = new CountdownEvent(numPages); - for (long readPage = readPageStart; readPage < (readPageStart + numPages); readPage++) - { - int pageIndex = (int)(readPage % BufferSize); - if (values[pageIndex] == null) - { - // Allocate a new page - AllocatePage(pageIndex); - } - else - { - ClearPage(pageIndex, readPage == 0); - } - var asyncResult = new PageAsyncReadResult() - { - page = readPage, - context = context, - handle = completed, - count = 1 - }; - - ulong offsetInFile = (ulong)(AlignedPageSizeBytes * readPage); - - if (device != null) - offsetInFile = (ulong)(AlignedPageSizeBytes * (readPage - devicePageOffset)); - - ReadAsync(offsetInFile, (IntPtr)pointers[pageIndex], PageSize, callback, asyncResult, usedDevice, usedObjlogDevice); - } - } - - /// - /// Flush pages asynchronously - /// - /// - /// - /// - /// - /// - public void AsyncFlushPages( - long flushPageStart, - int numPages, - IOCompletionCallback callback, - TContext context) - { - for (long flushPage = flushPageStart; flushPage < (flushPageStart + numPages); flushPage++) - { - int pageIndex = GetPageIndexForPage(flushPage); - var asyncResult = new PageAsyncFlushResult() - { - page = flushPage, - context = context, - count = 1, - partial = false, - untilAddress = (flushPage + 1) << LogPageSizeBits - }; - - WriteAsync((IntPtr)pointers[flushPage % BufferSize], - (ulong)(AlignedPageSizeBytes * flushPage), - PageSize, callback, asyncResult, device, objectLogDevice); - } - } - - - /// - /// Flush page range to disk - /// Called when all threads have agreed that a page range is sealed. - /// - /// - /// - private void AsyncFlushPages(long startPage, long untilAddress) - { - long endPage = (untilAddress >> LogPageSizeBits); - int numPages = (int)(endPage - startPage); - long offsetInEndPage = GetOffsetInPage(untilAddress); - if (offsetInEndPage > 0) - { - numPages++; - } - - - /* Request asynchronous writes to the device. If waitForPendingFlushComplete - * is set, then a CountDownEvent is set in the callback handle. - */ - for (long flushPage = startPage; flushPage < (startPage + numPages); flushPage++) - { - long pageStartAddress = flushPage << LogPageSizeBits; - long pageEndAddress = (flushPage + 1) << LogPageSizeBits; - - var asyncResult = new PageAsyncFlushResult - { - page = flushPage, - count = 1 - }; - if (pageEndAddress > untilAddress) - { - asyncResult.partial = true; - asyncResult.untilAddress = untilAddress; - } - else - { - asyncResult.partial = false; - asyncResult.untilAddress = pageEndAddress; - - // Set status to in-progress - PageStatusIndicator[flushPage % BufferSize].PageFlushCloseStatus - = new FlushCloseStatus { PageFlushStatus = PMMFlushStatus.InProgress, PageCloseStatus = PMMCloseStatus.Open }; - } - - PageStatusIndicator[flushPage % BufferSize].LastFlushedUntilAddress = -1; - - WriteAsync((IntPtr)pointers[flushPage % BufferSize], - (ulong)(AlignedPageSizeBytes * flushPage), - PageSize, - AsyncFlushPageCallback, - asyncResult, device, objectLogDevice); - } - } - - /// - /// Flush pages from startPage (inclusive) to endPage (exclusive) - /// to specified log device and obj device - /// - /// - /// - /// - /// - /// - public void AsyncFlushPagesToDevice(long startPage, long endPage, IDevice device, IDevice objectLogDevice, out CountdownEvent completed) - { - int totalNumPages = (int)(endPage - startPage); - completed = new CountdownEvent(totalNumPages); - - // We are writing to separate device, so use fresh segment offsets - var _segmentOffsets = new long[SegmentBufferSize]; - - for (long flushPage = startPage; flushPage < endPage; flushPage++) - { - var asyncResult = new PageAsyncFlushResult - { - handle = completed, - count = 1 - }; - - long pageStartAddress = flushPage << LogPageSizeBits; - long pageEndAddress = (flushPage + 1) << LogPageSizeBits; - - // Intended destination is flushPage - WriteAsync((IntPtr)pointers[flushPage % BufferSize], - (ulong)(AlignedPageSizeBytes * (flushPage - startPage)), - PageSize, - AsyncFlushPageToDeviceCallback, - asyncResult, device, objectLogDevice, flushPage, _segmentOffsets); - } - } - - private void WriteAsync(IntPtr alignedSourceAddress, ulong alignedDestinationAddress, uint numBytesToWrite, - IOCompletionCallback callback, PageAsyncFlushResult asyncResult, - IDevice device, IDevice objlogDevice, long intendedDestinationPage = -1, long[] localSegmentOffsets = null) - { - if (!pageHandlers.HasObjects()) - { - device.WriteAsync(alignedSourceAddress, alignedDestinationAddress, - numBytesToWrite, callback, asyncResult); - return; - } - - // Check if user did not override with special segment offsets - if (localSegmentOffsets == null) localSegmentOffsets = segmentOffsets; - - // need to write both page and object cache - asyncResult.count++; - - var buffer = ioBufferPool.Get(PageSize); - Buffer.MemoryCopy((void*)alignedSourceAddress, buffer.aligned_pointer, numBytesToWrite, numBytesToWrite); - - long ptr = (long)buffer.aligned_pointer; - List addr = new List(); - asyncResult.freeBuffer1 = buffer; - - // Correct for page 0 of HLOG - if (intendedDestinationPage < 0) - { - // By default, when we are not writing to a separate device, the intended - // destination page (logical) is the same as actual - intendedDestinationPage = (long)(alignedDestinationAddress >> LogPageSizeBits); - } - - if (intendedDestinationPage == 0) - ptr += Constants.kFirstValidAddress; - - var untilptr = (long)buffer.aligned_pointer + numBytesToWrite; - - - while (ptr < untilptr) - { - MemoryStream ms = new MemoryStream(); - pageHandlers.Serialize(ref ptr, untilptr, ms, kObjectBlockSize, out List addresses); - var _s = ms.ToArray(); - ms.Close(); - - var _objBuffer = ioBufferPool.Get(_s.Length); - - asyncResult.done = new AutoResetEvent(false); - - var _alignedLength = (_s.Length + (sectorSize - 1)) & ~(sectorSize - 1); - - var _objAddr = Interlocked.Add(ref localSegmentOffsets[(long)(alignedDestinationAddress >> LogSegmentSizeBits) % SegmentBufferSize], _alignedLength) - _alignedLength; - fixed (void* src = _s) - Buffer.MemoryCopy(src, _objBuffer.aligned_pointer, _s.Length, _s.Length); - - foreach (var address in addresses) - *((long*)address) += _objAddr; - - if (ptr < untilptr) - { - objlogDevice.WriteAsync( - (IntPtr)_objBuffer.aligned_pointer, - (int)(alignedDestinationAddress >> LogSegmentSizeBits), - (ulong)_objAddr, (uint)_alignedLength, AsyncFlushPartialObjectLogCallback, asyncResult); - - // Wait for write to complete before resuming next write - asyncResult.done.WaitOne(); - _objBuffer.Return(); - } - else - { - asyncResult.freeBuffer2 = _objBuffer; - objlogDevice.WriteAsync( - (IntPtr)_objBuffer.aligned_pointer, - (int)(alignedDestinationAddress >> LogSegmentSizeBits), - (ulong)_objAddr, (uint)_alignedLength, callback, asyncResult); - } - } - - // Finally write the hlog page - device.WriteAsync((IntPtr)buffer.aligned_pointer, alignedDestinationAddress, - numBytesToWrite, callback, asyncResult); - } - - private void ReadAsync( - ulong alignedSourceAddress, IntPtr alignedDestinationAddress, uint aligned_read_length, - IOCompletionCallback callback, PageAsyncReadResult asyncResult, IDevice device, IDevice objlogDevice) - { - if (!pageHandlers.HasObjects()) - { - device.ReadAsync(alignedSourceAddress, alignedDestinationAddress, - aligned_read_length, callback, asyncResult); - return; - } - - asyncResult.callback = callback; - asyncResult.count++; - asyncResult.objlogDevice = objlogDevice; - - device.ReadAsync(alignedSourceAddress, alignedDestinationAddress, - aligned_read_length, AsyncReadPageWithObjectsCallback, asyncResult); - } - #endregion - - - #region Async callbacks - - - - /// - /// IOCompletion callback for page flush - /// - /// - /// - /// - private void AsyncFlushPageCallback(uint errorCode, uint numBytes, NativeOverlapped* overlap) - { - if (errorCode != 0) - { - Trace.TraceError("OverlappedStream GetQueuedCompletionStatus error: {0}", errorCode); - } - - // Set the page status to flushed - PageAsyncFlushResult result = (PageAsyncFlushResult)Overlapped.Unpack(overlap).AsyncResult; - - if (Interlocked.Decrement(ref result.count) == 0) - { - PageStatusIndicator[result.page % BufferSize].LastFlushedUntilAddress = result.untilAddress; - - if (!result.partial) - { - while (true) - { - var oldStatus = PageStatusIndicator[result.page % BufferSize].PageFlushCloseStatus; - if (oldStatus.PageCloseStatus == PMMCloseStatus.Closed) - { - ClearPage((int)(result.page % BufferSize), result.page == 0); - } - var newStatus = oldStatus; - newStatus.PageFlushStatus = PMMFlushStatus.Flushed; - if (oldStatus.value == Interlocked.CompareExchange(ref PageStatusIndicator[result.page % BufferSize].PageFlushCloseStatus.value, newStatus.value, oldStatus.value)) - { - break; - } - } - } - ShiftFlushedUntilAddress(); - result.Free(); - } - - Overlapped.Free(overlap); - } - - /// - /// IOCompletion callback for page flush - /// - /// - /// - /// - private void AsyncFlushPartialObjectLogCallback(uint errorCode, uint numBytes, NativeOverlapped* overlap) - { - if (errorCode != 0) - { - Trace.TraceError("OverlappedStream GetQueuedCompletionStatus error: {0}", errorCode); - } - - // Set the page status to flushed - PageAsyncFlushResult result = (PageAsyncFlushResult)Overlapped.Unpack(overlap).AsyncResult; - result.done.Set(); - - Overlapped.Free(overlap); - } - - /// - /// IOCompletion callback for page flush - /// - /// - /// - /// - private void AsyncFlushPageToDeviceCallback(uint errorCode, uint numBytes, NativeOverlapped* overlap) - { - if (errorCode != 0) - { - Trace.TraceError("OverlappedStream GetQueuedCompletionStatus error: {0}", errorCode); - } - - PageAsyncFlushResult result = (PageAsyncFlushResult)Overlapped.Unpack(overlap).AsyncResult; - - if (Interlocked.Decrement(ref result.count) == 0) - { - result.Free(); - } - Overlapped.Free(overlap); - } - - private void AsyncReadPageWithObjectsCallback(uint errorCode, uint numBytes, NativeOverlapped* overlap) - { - if (errorCode != 0) - { - Trace.TraceError("OverlappedStream GetQueuedCompletionStatus error: {0}", errorCode); - } - - PageAsyncReadResult result = (PageAsyncReadResult)Overlapped.Unpack(overlap).AsyncResult; - - long ptr = pointers[result.page % BufferSize]; - - // Correct for page 0 of HLOG - if (result.page == 0) - ptr += Constants.kFirstValidAddress; - - // Check if we are resuming - if (result.resumeptr > ptr) - ptr = result.resumeptr; - - // Deserialize all objects until untilptr - if (ptr < result.untilptr) - { - MemoryStream ms = new MemoryStream(result.freeBuffer1.buffer); - ms.Seek(result.freeBuffer1.offset + result.freeBuffer1.valid_offset, SeekOrigin.Begin); - pageHandlers.Deserialize(ptr, result.untilptr, ms); - ms.Dispose(); - - ptr = result.untilptr; - result.freeBuffer1.Return(); - result.freeBuffer1.buffer = null; - result.resumeptr = ptr; - } - - // If we have processed entire page, return - if (ptr >= pointers[result.page % BufferSize] + PageSize) - { - - result.Free(); - - // Call the "real" page read callback - result.callback(errorCode, numBytes, overlap); - return; - } - - // We will be re-issuing I/O, so free current overlap - Overlapped.Free(overlap); - - pageHandlers.GetObjectInfo(ref ptr, pointers[result.page % BufferSize] + PageSize, kObjectBlockSize, out long startptr, out long size); - - // Object log fragment should be aligned by construction - Debug.Assert(startptr % sectorSize == 0); - - // We will be able to process all records until (but not including) ptr - result.untilptr = ptr; - - if (size > int.MaxValue) - throw new Exception("Unable to read object page, total size greater than 2GB: " + size); - - var objBuffer = ioBufferPool.Get((int)size); - result.freeBuffer1 = objBuffer; - var alignedLength = (size + (sectorSize - 1)) & ~(sectorSize - 1); - - // Request objects from objlog - result.objlogDevice.ReadAsync( - (int)(result.page >> (LogSegmentSizeBits-LogPageSizeBits)), - (ulong)startptr, - (IntPtr)objBuffer.aligned_pointer, (uint)alignedLength, AsyncReadPageWithObjectsCallback, result); - - } - #endregion - } -} diff --git a/cs/src/core/Allocator/PersistentMemoryMalloc.cs b/cs/src/core/Allocator/PersistentMemoryMalloc.cs deleted file mode 100644 index e8c03ab64..000000000 --- a/cs/src/core/Allocator/PersistentMemoryMalloc.cs +++ /dev/null @@ -1,846 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using System; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Runtime.InteropServices; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Linq.Expressions; -using System.IO; -using System.Diagnostics; - -namespace FASTER.core -{ - internal interface IAllocator : IDisposable - { - long Allocate(int numSlots); - long GetPhysicalAddress(long logicalAddress); - void CheckForAllocateComplete(ref long address); - } - - internal enum PMMFlushStatus : int { Flushed, InProgress }; - - internal enum PMMCloseStatus : int { Closed, Open }; - - internal struct FullPageStatus - { - public long LastFlushedUntilAddress; - public FlushCloseStatus PageFlushCloseStatus; - } - - [StructLayout(LayoutKind.Explicit)] - internal struct FlushCloseStatus - { - [FieldOffset(0)] - public PMMFlushStatus PageFlushStatus; - [FieldOffset(4)] - public PMMCloseStatus PageCloseStatus; - [FieldOffset(0)] - public long value; - } - - [StructLayout(LayoutKind.Explicit)] - internal struct PageOffset - { - [FieldOffset(0)] - public int Offset; - [FieldOffset(4)] - public int Page; - [FieldOffset(0)] - public long PageAndOffset; - } - - public unsafe partial class PersistentMemoryMalloc : IAllocator - { - // Epoch information - private LightEpoch epoch; - - // Read buffer pool - NativeSectorAlignedBufferPool readBufferPool; - - private readonly IDevice device; - private readonly IDevice objectLogDevice; - private readonly int sectorSize; - - // Page size - private const int LogPageSizeBits = 25; - private const int PageSize = 1 << LogPageSizeBits; - private const int PageSizeMask = PageSize - 1; - private readonly int AlignedPageSizeBytes; - - // Segment size - private readonly int LogSegmentSizeBits; - private readonly long SegmentSize; - private readonly long SegmentSizeMask; - private readonly int SegmentBufferSize; - - // Total HLOG size - private readonly int LogTotalSizeBits; - private readonly long LogTotalSizeBytes; - private readonly int BufferSize; - - // HeadOffset lag (from tail) - private const int HeadOffsetLagNumPages = 4; - private readonly int HeadOffsetLagSize; - private readonly long HeadOffsetLagAddress; - - // ReadOnlyOffset lag (from tail) - private readonly double LogMutableFraction; - private readonly long ReadOnlyLagAddress; - - // Circular buffer definition - private byte[][] values; - private GCHandle[] handles; - private long[] pointers; - private readonly GCHandle ptrHandle; - private readonly long* nativePointers; - - // Array that indicates the status of each buffer page - private readonly FullPageStatus[] PageStatusIndicator; - - // Size of object chunks beign written to storage - private const int kObjectBlockSize = 100 * (1 << 20); - - /// - /// Tail offsets per segment, in object log - /// - public readonly long[] segmentOffsets; - - NativeSectorAlignedBufferPool ioBufferPool; - - // Index in circular buffer, of the current tail page - private volatile int TailPageIndex; - - // Global address of the current tail (next element to be allocated from the circular buffer) - private PageOffset TailPageOffset; - - - /// - /// Read-only address - /// - public long ReadOnlyAddress; - - /// - /// Safe read-only address - /// - public long SafeReadOnlyAddress; - - /// - /// Head address - /// - public long HeadAddress; - - /// - /// Safe head address - /// - public long SafeHeadAddress; - - /// - /// Flushed until address - /// - public long FlushedUntilAddress; - - /// - /// Begin address - /// - public long BeginAddress; - - private IPageHandlers pageHandlers; - - /// - /// Create instance of PMM - /// - /// - /// - public PersistentMemoryMalloc(LogSettings settings, IPageHandlers pageHandlers) : this(settings, 0, pageHandlers) - { - - - Allocate(Constants.kFirstValidAddress); // null pointer - ReadOnlyAddress = GetTailAddress(); - SafeReadOnlyAddress = ReadOnlyAddress; - HeadAddress = ReadOnlyAddress; - SafeHeadAddress = ReadOnlyAddress; - BeginAddress = ReadOnlyAddress; - this.pageHandlers = pageHandlers; - } - - /// - /// Create instance of PMM - /// - /// - /// - /// - internal PersistentMemoryMalloc(LogSettings settings, long startAddress, IPageHandlers pageHandlers) - { - // Segment size - LogSegmentSizeBits = settings.SegmentSizeBits; - SegmentSize = 1 << LogSegmentSizeBits; - SegmentSizeMask = SegmentSize - 1; - SegmentBufferSize = 1 + - (LogTotalSizeBytes / SegmentSize < 1 ? 1 : (int)(LogTotalSizeBytes / SegmentSize)); - - // Total HLOG size - LogTotalSizeBits = settings.MemorySizeBits; - LogTotalSizeBytes = 1L << LogTotalSizeBits; - BufferSize = (int)(LogTotalSizeBytes / (1L << LogPageSizeBits)); - - // HeadOffset lag (from tail) - HeadOffsetLagSize = BufferSize - HeadOffsetLagNumPages; - HeadOffsetLagAddress = (long)HeadOffsetLagSize << LogPageSizeBits; - - // ReadOnlyOffset lag (from tail) - LogMutableFraction = settings.MutableFraction; - ReadOnlyLagAddress = (long)(LogMutableFraction * BufferSize) << LogPageSizeBits; - - values = new byte[BufferSize][]; - handles = new GCHandle[BufferSize]; - pointers = new long[BufferSize]; - PageStatusIndicator = new FullPageStatus[BufferSize]; - segmentOffsets = new long[SegmentBufferSize]; - - - if (BufferSize < 16) - { - throw new Exception("HLOG buffer must be at least 16 pages"); - } - - device = settings.LogDevice; - objectLogDevice = settings.ObjectLogDevice; - - if (pageHandlers.HasObjects()) - { - if (objectLogDevice == null) - throw new Exception("Objects in key/value, but object log not provided during creation of FASTER instance"); - } - - sectorSize = (int)device.SectorSize; - epoch = LightEpoch.Instance; - ioBufferPool = NativeSectorAlignedBufferPool.GetPool(1, sectorSize); - AlignedPageSizeBytes = ((PageSize + (sectorSize - 1)) & ~(sectorSize - 1)); - - ptrHandle = GCHandle.Alloc(pointers, GCHandleType.Pinned); - nativePointers = (long*)ptrHandle.AddrOfPinnedObject(); - - Initialize(startAddress); - } - - /// - /// Get sector size - /// - /// - public int GetSectorSize() - { - return sectorSize; - } - - internal void Initialize(long startAddress) - { - readBufferPool = NativeSectorAlignedBufferPool.GetPool(1, sectorSize); - long tailPage = startAddress >> LogPageSizeBits; - int tailPageIndex = (int)(tailPage % BufferSize); - - AllocatePage(tailPageIndex); - - SafeReadOnlyAddress = startAddress; - ReadOnlyAddress = startAddress; - SafeHeadAddress = startAddress; - HeadAddress = startAddress; - FlushedUntilAddress = startAddress; - BeginAddress = startAddress; - - TailPageOffset.Page = (int)(startAddress >> LogPageSizeBits); - TailPageOffset.Offset = (int)(startAddress & PageSizeMask); - - TailPageIndex = -1; - - //Handle the case when startAddress + pageSize overflows - //onto the next pageIndex in our buffer pages array - if (0 != (startAddress & PageSizeMask)) - { - // Update write cache to point to current level. - TailPageIndex = tailPageIndex; - Interlocked.MemoryBarrier(); - - // Allocate for next page - int newPageIndex = (tailPageIndex + 1) % BufferSize; - AllocatePage(newPageIndex); - } - } - - /// - /// Dispose memory allocator - /// - public void Dispose() - { - for (int i = 0; i < values.Length; i++) - { - if (handles[i].IsAllocated) - handles[i].Free(); - values[i] = null; - PageStatusIndicator[i].PageFlushCloseStatus = new FlushCloseStatus { PageFlushStatus = PMMFlushStatus.Flushed, PageCloseStatus = PMMCloseStatus.Closed }; - } - handles = null; - pointers = null; - values = null; - - TailPageOffset.Page = 0; - TailPageOffset.Offset = 0; - SafeReadOnlyAddress = 0; - ReadOnlyAddress = 0; - SafeHeadAddress = 0; - HeadAddress = 0; - BeginAddress = 1; - } - - /// - /// Get tail address - /// - /// - public long GetTailAddress() - { - var local = TailPageOffset; - return ((long)local.Page << LogPageSizeBits) | (uint)local.Offset; - } - - /// - /// Get page - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long GetPage(long logicalAddress) - { - return (logicalAddress >> LogPageSizeBits); - } - - /// - /// Get page index for page - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public int GetPageIndexForPage(long page) - { - return (int)(page % BufferSize); - } - - /// - /// Get page index for address - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public int GetPageIndexForAddress(long address) - { - return (int)((address >> LogPageSizeBits) % BufferSize); - } - - /// - /// Get capacity (number of pages) - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public int GetCapacityNumPages() - { - return BufferSize; - } - - /// - /// Get start logical address - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long GetStartLogicalAddress(long page) - { - return page << LogPageSizeBits; - } - - /// - /// Get page size - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long GetPageSize() - { - return PageSize; - } - - /// - /// Get offset in page - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long GetOffsetInPage(long address) - { - return address & PageSizeMask; - } - - /// - /// Get offset lag in pages - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long GetHeadOffsetLagInPages() - { - return HeadOffsetLagSize; - } - - /// - /// Used to obtain the physical address corresponding to a logical address - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long GetPhysicalAddress(long logicalAddress) - { - // Offset within page - int offset = (int)(logicalAddress & ((1L << LogPageSizeBits) -1)); - - // Index of page within the circular buffer - int pageIndex = (int)(logicalAddress >> LogPageSizeBits); - return *(nativePointers+pageIndex) + offset; - } - - /// - /// Key function used to allocate memory for a specified number of items - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public long Allocate(int numSlots = 1) - { - PageOffset localTailPageOffset = default(PageOffset); - - // Determine insertion index. - // ReSharper disable once CSharpWarnings::CS0420 -#pragma warning disable 420 - localTailPageOffset.PageAndOffset = Interlocked.Add(ref TailPageOffset.PageAndOffset, numSlots); -#pragma warning restore 420 - - int page = localTailPageOffset.Page; - int offset = localTailPageOffset.Offset - numSlots; - -#region HANDLE PAGE OVERFLOW - /* To prove correctness of the following modifications - * done to TailPageOffset and the allocation itself, - * we should use the fact that only one thread will have any - * of the following cases since it is a counter and we spin-wait - * until the tail is folded onto next page accordingly. - */ - if (localTailPageOffset.Offset >= PageSize) - { - if (offset >= PageSize) - { - //The tail offset value was more than page size before atomic add - //We consider that a failed attempt and retry again - var spin = new SpinWait(); - do - { - //Just to give some more time to the thread - // that is handling this overflow - while (TailPageOffset.Offset >= PageSize) - { - spin.SpinOnce(); - } - - // ReSharper disable once CSharpWarnings::CS0420 -#pragma warning disable 420 - localTailPageOffset.PageAndOffset = Interlocked.Add(ref TailPageOffset.PageAndOffset, numSlots); -#pragma warning restore 420 - - page = localTailPageOffset.Page; - offset = localTailPageOffset.Offset - numSlots; - } while (offset >= PageSize); - } - - - if (localTailPageOffset.Offset == PageSize) - { - //Folding over at page boundary - localTailPageOffset.Page++; - localTailPageOffset.Offset = 0; - TailPageOffset = localTailPageOffset; - } - else if (localTailPageOffset.Offset >= PageSize) - { - //Overflows not allowed. We allot same space in next page. - localTailPageOffset.Page++; - localTailPageOffset.Offset = numSlots; - TailPageOffset = localTailPageOffset; - - page = localTailPageOffset.Page; - offset = 0; - } - } -#endregion - - long address = (((long)page) << LogPageSizeBits) | ((long)offset); - - // Check if TailPageIndex is appropriate and allocated! - int pageIndex = page % BufferSize; - - if (TailPageIndex == pageIndex) - { - return (address); - } - - //Invert the address if either the previous page is not flushed or if it is null - if ((PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageFlushStatus != PMMFlushStatus.Flushed) || - (PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageCloseStatus != PMMCloseStatus.Closed) || - (values[pageIndex] == null)) - { - address = -address; - } - - // Update the read-only so that we can get more space for the tail - if (offset == 0) - { - if (address >= 0) - { - TailPageIndex = pageIndex; - Interlocked.MemoryBarrier(); - } - - long newPage = page + 1; - int newPageIndex = (int)((page + 1) % BufferSize); - - long tailAddress = (address < 0 ? -address : address); - PageAlignedShiftReadOnlyAddress(tailAddress); - PageAlignedShiftHeadAddress(tailAddress); - - if (values[newPageIndex] == null) - { - AllocatePage(newPageIndex); - } - } - - return (address); - } - - /// - /// If allocator cannot allocate new memory as the head has not shifted or the previous page - /// is not yet closed, it allocates but returns the negative address. - /// This function is invoked to check if the address previously allocated has become valid to be used - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void CheckForAllocateComplete(ref long address) - { - if (address >= 0) - { - throw new Exception("Address already allocated!"); - } - - PageOffset p = default(PageOffset); - p.Page = (int)((-address) >> LogPageSizeBits); - p.Offset = (int)((-address) & PageSizeMask); - - //Check write cache - int pageIndex = p.Page % BufferSize; - if (TailPageIndex == pageIndex) - { - address = -address; - return; - } - - //Check if we can move the head offset - long currentTailAddress = GetTailAddress(); - PageAlignedShiftHeadAddress(currentTailAddress); - - //Check if I can allocate pageIndex at all - if ((PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageFlushStatus != PMMFlushStatus.Flushed) || - (PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageCloseStatus != PMMCloseStatus.Closed) || - (values[pageIndex] == null)) - { - return; - } - - //correct values and set write cache - address = -address; - if (p.Offset == 0) - { - TailPageIndex = pageIndex; - } - return; - } - - /// - /// Used by applications to make the current state of the database immutable quickly - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void ShiftReadOnlyToTail(out long tailAddress) - { - tailAddress = GetTailAddress(); - long localTailAddress = tailAddress; - long currentReadOnlyOffset = ReadOnlyAddress; - if (MonotonicUpdate(ref ReadOnlyAddress, tailAddress, out long oldReadOnlyOffset)) - { - epoch.BumpCurrentEpoch(() => OnPagesMarkedReadOnly(localTailAddress, false)); - } - } - - /// - /// Shift begin address - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - public void ShiftBeginAddress(long oldBeginAddress, long newBeginAddress) - { - epoch.BumpCurrentEpoch(() => - { - device.DeleteAddressRange(oldBeginAddress, newBeginAddress); - objectLogDevice.DeleteSegmentRange((int)(oldBeginAddress >> LogSegmentSizeBits), (int)(newBeginAddress >> LogSegmentSizeBits)); - }); - } - - /// - /// Seal: make sure there are no longer any threads writing to the page - /// Flush: send page to secondary store - /// - /// - /// - public void OnPagesMarkedReadOnly(long newSafeReadOnlyAddress, bool waitForPendingFlushComplete = false) - { - if(MonotonicUpdate(ref SafeReadOnlyAddress, newSafeReadOnlyAddress, out long oldSafeReadOnlyAddress)) - { - Debug.WriteLine("SafeReadOnly shifted from {0:X} to {1:X}", oldSafeReadOnlyAddress, newSafeReadOnlyAddress); - long startPage = oldSafeReadOnlyAddress >> LogPageSizeBits; - - long endPage = (newSafeReadOnlyAddress >> LogPageSizeBits); - int numPages = (int)(endPage - startPage); - if (numPages > 10) - { - new Thread( - () => AsyncFlushPages(startPage, newSafeReadOnlyAddress)).Start(); - } - else - { - AsyncFlushPages(startPage, newSafeReadOnlyAddress); - } - } - } - - /// - /// Action to be performed for when all threads have - /// agreed that a page range is closed. - /// - /// - public void OnPagesClosed(long newSafeHeadAddress) - { - if (MonotonicUpdate(ref SafeHeadAddress, newSafeHeadAddress, out long oldSafeHeadAddress)) - { - Debug.WriteLine("SafeHeadOffset shifted from {0:X} to {1:X}", oldSafeHeadAddress, newSafeHeadAddress); - - for (long closePageAddress = oldSafeHeadAddress; closePageAddress < newSafeHeadAddress; closePageAddress += PageSize) - { - int closePage = (int)((closePageAddress >> LogPageSizeBits) % BufferSize); - - if (values[closePage] == null) - { - AllocatePage(closePage); - } - - while (true) - { - var oldStatus = PageStatusIndicator[closePage].PageFlushCloseStatus; - if (oldStatus.PageFlushStatus == PMMFlushStatus.Flushed) - { - ClearPage(closePage, (closePageAddress >> LogPageSizeBits) == 0); - - var thisCloseSegment = closePageAddress >> LogSegmentSizeBits; - var nextClosePage = (closePageAddress >> LogPageSizeBits) + 1; - var nextCloseSegment = nextClosePage >> (LogSegmentSizeBits - LogPageSizeBits); - - if (thisCloseSegment != nextCloseSegment) - { - // Last page in current segment - segmentOffsets[thisCloseSegment % SegmentBufferSize] = 0; - } - } - else - { - throw new Exception("Impossible"); - } - var newStatus = oldStatus; - newStatus.PageCloseStatus = PMMCloseStatus.Closed; - if (oldStatus.value == Interlocked.CompareExchange(ref PageStatusIndicator[closePage].PageFlushCloseStatus.value, newStatus.value, oldStatus.value)) - { - break; - } - } - - //Necessary to propagate this change to other threads - Interlocked.MemoryBarrier(); - } - } - } - - private void ClearPage(int page, bool pageZero) - { - if (pageHandlers.HasObjects()) - { - long ptr = pointers[page]; - int numBytes = PageSize; - long endptr = ptr + numBytes; - - if (pageZero) ptr += Constants.kFirstValidAddress; - pageHandlers.ClearPage(ptr, endptr); - } - Array.Clear(values[page], 0, values[page].Length); - } - - - /// - /// Allocate memory page, pinned in memory, and in sector aligned form, if possible - /// - /// - private void AllocatePage(int index) - { - var adjustedSize = PageSize + 2 * sectorSize; - byte[] tmp = new byte[adjustedSize]; - Array.Clear(tmp, 0, adjustedSize); - - handles[index] = GCHandle.Alloc(tmp, GCHandleType.Pinned); - long p = (long)handles[index].AddrOfPinnedObject(); - pointers[index] = (p + (sectorSize - 1)) & ~(sectorSize - 1); - values[index] = tmp; - - PageStatusIndicator[index].PageFlushCloseStatus.PageFlushStatus = PMMFlushStatus.Flushed; - PageStatusIndicator[index].PageFlushCloseStatus.PageCloseStatus = PMMCloseStatus.Closed; - Interlocked.MemoryBarrier(); - } - - /// - /// Called every time a new tail page is allocated. Here the read-only is - /// shifted only to page boundaries unlike ShiftReadOnlyToTail where shifting - /// can happen to any fine-grained address. - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private void PageAlignedShiftReadOnlyAddress(long currentTailAddress) - { - long currentReadOnlyAddress = ReadOnlyAddress; - long pageAlignedTailAddress = currentTailAddress & ~PageSizeMask; - long desiredReadOnlyAddress = (pageAlignedTailAddress - ReadOnlyLagAddress); - if (MonotonicUpdate(ref ReadOnlyAddress, desiredReadOnlyAddress, out long oldReadOnlyAddress)) - { - Debug.WriteLine("Allocate: Moving read-only offset from {0:X} to {1:X}", oldReadOnlyAddress, desiredReadOnlyAddress); - epoch.BumpCurrentEpoch(() => OnPagesMarkedReadOnly(desiredReadOnlyAddress)); - } - } - - /// - /// Called whenever a new tail page is allocated or when the user is checking for a failed memory allocation - /// Tries to shift head address based on the head offset lag size. - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private void PageAlignedShiftHeadAddress(long currentTailAddress) - { - //obtain local values of variables that can change - long currentHeadAddress = HeadAddress; - long currentFlushedUntilAddress = FlushedUntilAddress; - long pageAlignedTailAddress = currentTailAddress & ~PageSizeMask; - long desiredHeadAddress = (pageAlignedTailAddress - HeadOffsetLagAddress); - - long newHeadAddress = desiredHeadAddress; - if(currentFlushedUntilAddress < newHeadAddress) - { - newHeadAddress = currentFlushedUntilAddress; - } - newHeadAddress = newHeadAddress & ~PageSizeMask; - - if (MonotonicUpdate(ref HeadAddress, newHeadAddress, out long oldHeadAddress)) - { - Debug.WriteLine("Allocate: Moving head offset from {0:X} to {1:X}", oldHeadAddress, newHeadAddress); - epoch.BumpCurrentEpoch(() => OnPagesClosed(newHeadAddress)); - } - } - - /// - /// Every async flush callback tries to update the flushed until address to the latest value possible - /// Is there a better way to do this with enabling fine-grained addresses (not necessarily at page boundaries)? - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private void ShiftFlushedUntilAddress() - { - long currentFlushedUntilAddress = FlushedUntilAddress; - long page = GetPage(currentFlushedUntilAddress); - - bool update = false; - long pageLastFlushedAddress = PageStatusIndicator[(int)(page % BufferSize)].LastFlushedUntilAddress; - while (pageLastFlushedAddress >= currentFlushedUntilAddress) - { - currentFlushedUntilAddress = pageLastFlushedAddress; - update = true; - page++; - pageLastFlushedAddress = PageStatusIndicator[(int)(page % BufferSize)].LastFlushedUntilAddress; - } - - if (update) - { - MonotonicUpdate(ref FlushedUntilAddress, currentFlushedUntilAddress, out long oldFlushedUntilAddress); - } - } - - - - /// - /// Used by several functions to update the variable to newValue. Ignores if newValue is smaller or - /// than the current value. - /// - /// - /// - /// - /// - [MethodImpl(MethodImplOptions.AggressiveInlining)] - private bool MonotonicUpdate(ref long variable, long newValue, out long oldValue) - { - oldValue = variable; - while (oldValue < newValue) - { - var foundValue = Interlocked.CompareExchange(ref variable, newValue, oldValue); - if (foundValue == oldValue) - { - return true; - } - oldValue = foundValue; - } - return false; - } - - /// - /// Reset for recovery - /// - /// - /// - public void RecoveryReset(long tailAddress, long headAddress) - { - long tailPage = GetPage(tailAddress); - long offsetInPage = GetOffsetInPage(tailAddress); - TailPageOffset.Page = (int)tailPage; - TailPageOffset.Offset = (int)offsetInPage; - TailPageIndex = GetPageIndexForPage(TailPageOffset.Page); - - // issue read request to all pages until head lag - HeadAddress = headAddress; - SafeHeadAddress = headAddress; - FlushedUntilAddress = headAddress; - ReadOnlyAddress = tailAddress; - SafeReadOnlyAddress = tailAddress; - - for (var addr = headAddress; addr < tailAddress; addr += PageSize) - { - var pageIndex = GetPageIndexForAddress(addr); - PageStatusIndicator[pageIndex].PageFlushCloseStatus.PageCloseStatus = PMMCloseStatus.Open; - } - } - } -} diff --git a/cs/src/core/Codegen/CompilerBase.cs b/cs/src/core/Codegen/CompilerBase.cs deleted file mode 100644 index 1239dac8c..000000000 --- a/cs/src/core/Codegen/CompilerBase.cs +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT license. - -using Microsoft.CodeAnalysis; -using Microsoft.CodeAnalysis.CSharp; -using System; -using System.Collections.Generic; -using System.IO; -using System.Linq; -using System.Reflection; -using System.Runtime.CompilerServices; -using System.Text; -using static FASTER.core.Roslyn.Helper; - -namespace FASTER.core.Roslyn -{ - class CompilerBase - { - protected CSharpCompilation compilation; - protected Dictionary metadataReferences = new Dictionary(); - protected IEnumerable