From ed4ace91a4cb192fc0038fa8d8a151380c29faa2 Mon Sep 17 00:00:00 2001 From: Jian Zhang Date: Tue, 20 Aug 2024 23:36:18 +0800 Subject: [PATCH 1/4] chore: reduce std::function overhead --- CMakePresets.json | 2 +- include/leanstore/LeanStore.hpp | 3 +- .../leanstore/concurrency/WorkerThread.hpp | 2 +- include/leanstore/utils/RandomGenerator.hpp | 4 - src/LeanStore.cpp | 12 +- src/leanstore-c/leanstore-c.cpp | 2 +- tests/CMakeLists.txt | 3 +- tests/{ => btree}/BasicKVTest.cpp | 111 ++++++++++++++++-- 8 files changed, 112 insertions(+), 27 deletions(-) rename tests/{ => btree}/BasicKVTest.cpp (75%) diff --git a/CMakePresets.json b/CMakePresets.json index 477ad8a9..6e84ca1d 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -28,7 +28,7 @@ "cacheVariables": { "CMAKE_BUILD_TYPE": "Release", "COUNTERS_LEVEL": "none", - "ENABLE_PROFILING": "OFF" + "ENABLE_PROFILING": "ON" } }, { diff --git a/include/leanstore/LeanStore.hpp b/include/leanstore/LeanStore.hpp index 0f24d9b1..23418722 100644 --- a/include/leanstore/LeanStore.hpp +++ b/include/leanstore/LeanStore.hpp @@ -69,7 +69,8 @@ class LeanStore { std::unique_ptr mBufferManager; //! The concurrent resource manager - std::unique_ptr mCRManager; + //! NOTE: Ownerd by LeanStore instance, should be destroyed together with it + cr::CRManager* mCRManager; //! The global timestamp oracle, used to generate start and commit timestamps //! for all transactions in the store. Start from a positive number, 0 diff --git a/include/leanstore/concurrency/WorkerThread.hpp b/include/leanstore/concurrency/WorkerThread.hpp index c0fc82b2..f37f9710 100644 --- a/include/leanstore/concurrency/WorkerThread.hpp +++ b/include/leanstore/concurrency/WorkerThread.hpp @@ -118,7 +118,7 @@ inline void WorkerThread::SetJob(std::function job) { mCv.wait(guard, [&]() { return mJob == nullptr && !mJobDone; }); // set a new job, change the worker state to (jobSet, jobNotDone), notify the worker thread - mJob = job; + mJob = std::move(job); guard.unlock(); mCv.notify_all(); diff --git a/include/leanstore/utils/RandomGenerator.hpp b/include/leanstore/utils/RandomGenerator.hpp index ca21d5e6..60c47830 100644 --- a/include/leanstore/utils/RandomGenerator.hpp +++ b/include/leanstore/utils/RandomGenerator.hpp @@ -43,10 +43,6 @@ class RandomGenerator { //! range [min, max) static uint64_t RandU64(uint64_t min, uint64_t max) { uint64_t rand = min + (tlsMtGenerator.Rand() % (max - min)); - LS_DCHECK(min <= rand && rand < max, - "Random number should be in range [min, max), but min={}, " - "max={}, rand={}", - min, max, rand); return rand; } diff --git a/src/LeanStore.cpp b/src/LeanStore.cpp index 5606680b..6a9f9e97 100644 --- a/src/LeanStore.cpp +++ b/src/LeanStore.cpp @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -81,7 +82,7 @@ LeanStore::LeanStore(StoreOption* option) : mStoreOption(option), mMetricsManage // // TODO(jian.z): Deserialize buffer manager before creating CRManager. We need to initialize // nextPageId for each buffer partition before creating history tree in CRManager - mCRManager = std::make_unique(this); + mCRManager = new cr::CRManager(this); // recover from disk if (!mStoreOption->mCreateFromScratch) { @@ -178,7 +179,10 @@ LeanStore::~LeanStore() { mBufferManager->SyncAllPageWrites(); // destroy and Stop all foreground workers - mCRManager = nullptr; + if (mCRManager != nullptr) { + delete mCRManager; + mCRManager = nullptr; + } // destroy buffer manager (buffer frame providers) mBufferManager->StopPageEvictors(); @@ -209,12 +213,12 @@ LeanStore::~LeanStore() { } void LeanStore::ExecSync(uint64_t workerId, std::function job) { - mCRManager->mWorkerThreads[workerId]->SetJob(job); + mCRManager->mWorkerThreads[workerId]->SetJob(std::move(job)); mCRManager->mWorkerThreads[workerId]->Wait(); } void LeanStore::ExecAsync(uint64_t workerId, std::function job) { - mCRManager->mWorkerThreads[workerId]->SetJob(job); + mCRManager->mWorkerThreads[workerId]->SetJob(std::move(job)); } void LeanStore::Wait(WORKERID workerId) { diff --git a/src/leanstore-c/leanstore-c.cpp b/src/leanstore-c/leanstore-c.cpp index 7918634f..ed761346 100644 --- a/src/leanstore-c/leanstore-c.cpp +++ b/src/leanstore-c/leanstore-c.cpp @@ -129,7 +129,7 @@ String* BasicKvLookup(BasicKvHandle* handle, uint64_t workerId, StringSlice key) auto copyValueOut = [&](leanstore::Slice valSlice) { val = CreateString(reinterpret_cast(valSlice.data()), valSlice.size()); }; - handle->mBtree->Lookup(leanstore::Slice(key.mData, key.mSize), copyValueOut); + handle->mBtree->Lookup(leanstore::Slice(key.mData, key.mSize), std::move(copyValueOut)); }); return val; } diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index e5ae27bd..7a5db219 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -43,7 +43,6 @@ endfunction(leanstore_add_test_in_dir) # Add tests -leanstore_add_test(BasicKVTest) leanstore_add_test(RecoveryTest) leanstore_add_test(OptimisticGuardedTest) leanstore_add_test(TransactionKVTest) @@ -52,7 +51,7 @@ leanstore_add_test(AnomaliesTest) leanstore_add_test(AbortTest) leanstore_add_test(LongRunningTxTest) -# tests in btree +# tests in sub-directories leanstore_add_test_in_dir(btree) leanstore_add_test_in_dir(buffer-manager) leanstore_add_test_in_dir(concurrency) diff --git a/tests/BasicKVTest.cpp b/tests/btree/BasicKVTest.cpp similarity index 75% rename from tests/BasicKVTest.cpp rename to tests/btree/BasicKVTest.cpp index 69532a03..3afa9e28 100644 --- a/tests/BasicKVTest.cpp +++ b/tests/btree/BasicKVTest.cpp @@ -9,7 +9,15 @@ #include +#include #include +#include +#include +#include +#include +#include + +#include namespace leanstore::test { @@ -25,17 +33,22 @@ class BasicKVTest : public ::testing::Test { // Create a leanstore instance for the test case StoreOption* option = CreateStoreOption(getTestDataDir().c_str()); option->mWorkerThreads = 2; - option->mEnableEagerGc = true; auto res = LeanStore::Open(option); ASSERT_TRUE(res); mStore = std::move(res.value()); + ASSERT_NE(mStore, nullptr); } -private: +protected: std::string getTestDataDir() { auto* curTest = ::testing::UnitTest::GetInstance()->current_test_info(); - return std::string("/tmp/leanstore/") + curTest->test_case_name() + "_" + curTest->name(); + return std::string("/tmp/leanstore/") + curTest->name(); + } + + std::string genBtreeName(const std::string& suffix = "") { + auto* curTest = ::testing::UnitTest::GetInstance()->current_test_info(); + return std::string(curTest->name()) + suffix; } }; @@ -91,20 +104,16 @@ TEST_F(BasicKVTest, BasicKVInsertAndLookup) { // insert some values btree = res.value(); - cr::Worker::My().StartTx(); for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; EXPECT_EQ(btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())), OpCode::kOK); } - cr::Worker::My().CommitTx(); }); // query on the created btree in the same worker mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); std::string copiedValue; auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); @@ -119,8 +128,6 @@ TEST_F(BasicKVTest, BasicKVInsertAndLookup) { // query on the created btree in another worker mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); std::string copiedValue; auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); @@ -153,7 +160,6 @@ TEST_F(BasicKVTest, BasicKVInsertDuplicatedKey) { EXPECT_NE(res.value(), nullptr); btree = res.value(); - cr::Worker::My().StartTx(); for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; EXPECT_EQ(btree->Insert(Slice((const uint8_t*)key.data(), key.size()), @@ -178,13 +184,10 @@ TEST_F(BasicKVTest, BasicKVInsertDuplicatedKey) { Slice((const uint8_t*)val.data(), val.size())), OpCode::kDuplicated); } - cr::Worker::My().CommitTx(); }); // insert duplicated keys in another worker mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); // duplicated keys will failed for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; @@ -194,6 +197,7 @@ TEST_F(BasicKVTest, BasicKVInsertDuplicatedKey) { } }); } + TEST_F(BasicKVTest, BasicKVScanAscAndScanDesc) { storage::btree::BasicKV* btree; // prepare key-value pairs to insert @@ -286,4 +290,85 @@ TEST_F(BasicKVTest, BasicKVScanAscAndScanDesc) { } }); } + +TEST_F(BasicKVTest, SameKeyInsertRemoveMultiTimes) { + // create a basickv + storage::btree::BasicKV* btree; + mStore->ExecSync(0, [&]() { + auto res = mStore->CreateBasicKV(genBtreeName("_tree1")); + ASSERT_TRUE(res); + ASSERT_NE(res.value(), nullptr); + btree = res.value(); + }); + + // insert 100 key-values to the btree + size_t numKVs(1000); + std::vector> kvToTest; + for (size_t i = 0; i < numKVs; ++i) { + std::string key("key_" + std::to_string(i) + std::string(10, 'x')); + std::string val("val_" + std::to_string(i) + std::string(200, 'x')); + mStore->ExecSync(0, [&]() { EXPECT_EQ(btree->Insert(key, val), OpCode::kOK); }); + kvToTest.emplace_back(std::move(key), std::move(val)); + } + + // // start a new thread, remove-insert the key-values to the btree + // std::atomic stop{false}; + // std::thread t1([&]() { + // while (!stop) { + // for (const auto& [key, val] : kvToTest) { + // mStore->ExecSync(0, [&]() { EXPECT_EQ(btree->Remove(key), OpCode::kOK); }); + // mStore->ExecSync(0, [&]() { EXPECT_EQ(btree->Insert(key, val), OpCode::kOK); }); + // } + // } + // }); + + // // start another thread, remove-insert the key-values to the btree + // std::thread t2([&]() { + // while (!stop) { + // for (const auto& [key, val] : kvToTest) { + // mStore->ExecSync(1, [&]() { EXPECT_EQ(btree->Remove(key), OpCode::kOK); }); + // mStore->ExecSync(1, [&]() { EXPECT_EQ(btree->Insert(key, val), OpCode::kOK); }); + // } + // } + // }); + + // // sleep for 1 seconds + // std::this_thread::sleep_for(std::chrono::seconds(1)); + // stop = true; + // t1.join(); + // t2.join(); + + // 1. remove the key-values from the btree + // 2. insert the key-values to the btree again + const auto& [key, val] = kvToTest[numKVs / 2]; + std::atomic stop{false}; + + std::thread t1([&]() { + while (!stop) { + mStore->ExecSync(0, [&]() { btree->Remove(key); }); + mStore->ExecSync(0, [&]() { btree->Insert(key, val); }); + } + }); + + std::thread t2([&]() { + std::string copiedValue; + auto copyValueOut = [&](Slice valSlice) { + copiedValue = std::string((const char*)valSlice.data(), valSlice.size()); + EXPECT_EQ(copiedValue, val); + }; + while (!stop) { + mStore->ExecSync(0, [&]() { btree->Lookup(key, std::move(copyValueOut)); }); + } + }); + + // sleep for 1 seconds + std::this_thread::sleep_for(std::chrono::seconds(20)); + stop = true; + t1.join(); + t2.join(); + + // count the key-values in the btree + mStore->ExecSync(0, [&]() { EXPECT_EQ(btree->CountEntries(), numKVs); }); +} + } // namespace leanstore::test From d2eb9e188a456d766b1ad5321fcc19aa62d7e431 Mon Sep 17 00:00:00 2001 From: Jian Zhang Date: Wed, 21 Aug 2024 11:19:38 +0800 Subject: [PATCH 2/4] chore: rename Worker to WorkerContext --- .clang-format | 1 + CMakeLists.txt | 2 +- CMakePresets.json | 9 +- .../micro-benchmarks/InsertUpdateBench.cpp | 8 +- benchmarks/shared/Adapter.hpp | 1 - benchmarks/shared/LMDBAdapter.hpp | 1 - benchmarks/shared/LeanStoreAdapter.hpp | 14 +- benchmarks/shared/RocksDBAdapter.hpp | 1 - benchmarks/ycsb/YcsbFlags.cpp | 2 +- benchmarks/ycsb/YcsbLeanStore.hpp | 20 +-- examples/c/BasicKvExample.c | 12 +- examples/cpp/BasicKvExample.cpp | 2 +- include/leanstore-c/StoreOption.h | 2 +- include/leanstore-c/leanstore-c.h | 18 +- include/leanstore/Exceptions.hpp | 2 + include/leanstore/KVInterface.hpp | 2 + include/leanstore/LeanStore.hpp | 4 + include/leanstore/btree/BasicKV.hpp | 19 +- include/leanstore/btree/ChainedTuple.hpp | 14 +- include/leanstore/btree/TransactionKV.hpp | 4 +- include/leanstore/btree/core/WALMacros.hpp | 115 ------------ .../buffer-manager/GuardedBufferFrame.hpp | 30 ++-- include/leanstore/buffer-manager/Swip.hpp | 2 +- include/leanstore/concurrency/CRManager.hpp | 4 +- .../leanstore/concurrency/GroupCommitter.hpp | 9 +- include/leanstore/concurrency/LoggingImpl.hpp | 4 +- .../concurrency/WalPayloadHandler.hpp | 4 +- .../{Worker.hpp => WorkerContext.hpp} | 30 ++-- src/LeanStore.cpp | 19 +- src/btree/BasicKV.cpp | 39 +++- src/btree/ChainedTuple.cpp | 14 +- src/btree/TransactionKV.cpp | 105 +++++------ src/btree/Tuple.cpp | 20 +-- src/btree/core/BTreeGeneric.cpp | 2 +- src/concurrency/CRManager.cpp | 16 +- src/concurrency/ConcurrencyControl.cpp | 59 +++--- src/concurrency/GroupCommitter.cpp | 14 +- src/concurrency/Logging.cpp | 17 +- .../{Worker.cpp => WorkerContext.cpp} | 17 +- src/leanstore-c/StoreOption.cpp | 2 +- src/leanstore-c/leanstore-c.cpp | 53 +++++- src/telemetry/MetricsHttpExposer.cpp | 6 +- src/telemetry/MetricsHttpExposer.hpp | 3 + src/telemetry/MetricsManager.hpp | 23 ++- tests/LongRunningTxTest.cpp | 94 +++++----- tests/MvccTest.cpp | 40 ++--- tests/OptimisticGuardedTest.cpp | 8 +- tests/RecoveryTest.cpp | 44 ++--- tests/TransactionKVTest.cpp | 168 +++++++++--------- tests/TxKV.hpp | 36 ++-- tests/btree/BasicKvIteratorTest.cpp | 10 +- 51 files changed, 571 insertions(+), 574 deletions(-) delete mode 100644 include/leanstore/btree/core/WALMacros.hpp rename include/leanstore/concurrency/{Worker.hpp => WorkerContext.hpp} (65%) rename src/concurrency/{Worker.cpp => WorkerContext.cpp} (93%) diff --git a/.clang-format b/.clang-format index a702cad7..8cf12934 100644 --- a/.clang-format +++ b/.clang-format @@ -15,6 +15,7 @@ AllowShortFunctionsOnASingleLine: None AllowShortIfStatementsOnASingleLine: false AllowShortLoopsOnASingleLine: false EmptyLineBeforeAccessModifier: Always +Cpp11BracedListStyle: true # constructor initializers PackConstructorInitializers: CurrentLine diff --git a/CMakeLists.txt b/CMakeLists.txt index 498c4275..e0383c36 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -15,7 +15,7 @@ set(CMAKE_CXX_STANDARD 23) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror -Wextra -rdynamic -fno-omit-frame-pointer -pthread") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Werror -Wextra -rdynamic -fno-omit-frame-pointer -pthread -Wno-vla-cxx-extension") if (CMAKE_BUILD_TYPE MATCHES Debug) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDEBUG -O0 -g3") else() diff --git a/CMakePresets.json b/CMakePresets.json index 6e84ca1d..0934efac 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -9,7 +9,8 @@ "CMAKE_CXX_COMPILER": "g++-13", "CMAKE_TOOLCHAIN_FILE": "$env{VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake", "CMAKE_EXPORT_COMPILE_COMMANDS": "ON", - "BUILD_SHARED_LIBS": "ON" + "BUILD_SHARED_LIBS": "ON", + "ENABLE_PROFILING": "ON" } }, { @@ -17,8 +18,7 @@ "inherits": "base", "hidden": true, "cacheVariables": { - "CMAKE_BUILD_TYPE": "Debug", - "ENABLE_PROFILING": "ON" + "CMAKE_BUILD_TYPE": "Debug" } }, { @@ -27,8 +27,7 @@ "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "Release", - "COUNTERS_LEVEL": "none", - "ENABLE_PROFILING": "ON" + "COUNTERS_LEVEL": "none" } }, { diff --git a/benchmarks/micro-benchmarks/InsertUpdateBench.cpp b/benchmarks/micro-benchmarks/InsertUpdateBench.cpp index 55eaf9cb..9da5871d 100644 --- a/benchmarks/micro-benchmarks/InsertUpdateBench.cpp +++ b/benchmarks/micro-benchmarks/InsertUpdateBench.cpp @@ -41,7 +41,7 @@ static void BenchUpdateInsert(benchmark::State& state) { std::unordered_set dedup; for (auto _ : state) { sLeanStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); std::string key; std::string val; for (size_t i = 0; i < 16; i++) { @@ -50,13 +50,13 @@ static void BenchUpdateInsert(benchmark::State& state) { btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())); } - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } sLeanStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); sLeanStore->DropTransactionKV(btreeName); }); } diff --git a/benchmarks/shared/Adapter.hpp b/benchmarks/shared/Adapter.hpp index 9dc9f3f3..68a458c2 100644 --- a/benchmarks/shared/Adapter.hpp +++ b/benchmarks/shared/Adapter.hpp @@ -3,7 +3,6 @@ #include "Types.hpp" #include "leanstore/Exceptions.hpp" #include "leanstore/KVInterface.hpp" -#include "leanstore/btree/core/WALMacros.hpp" #include #include diff --git a/benchmarks/shared/LMDBAdapter.hpp b/benchmarks/shared/LMDBAdapter.hpp index ac752792..ca7c3f2c 100644 --- a/benchmarks/shared/LMDBAdapter.hpp +++ b/benchmarks/shared/LMDBAdapter.hpp @@ -4,7 +4,6 @@ #include "Types.hpp" // ------------------------------------------------------------------------------------- #include "leanstore/KVInterface.hpp" -#include "leanstore/btree/core/WALMacros.hpp" #include "leanstore/utils/JumpMU.hpp" #include "lmdb++.hpp" // Using C++ Wrapper from LMDB // ------------------------------------------------------------------------------------- diff --git a/benchmarks/shared/LeanStoreAdapter.hpp b/benchmarks/shared/LeanStoreAdapter.hpp index b5a12262..971f1cfd 100644 --- a/benchmarks/shared/LeanStoreAdapter.hpp +++ b/benchmarks/shared/LeanStoreAdapter.hpp @@ -50,7 +50,7 @@ struct LeanStoreAdapter : Adapter { return cb(typed_key, record); }); if (ret == leanstore::OpCode::kAbortTx) { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } } @@ -61,7 +61,7 @@ struct LeanStoreAdapter : Adapter { btree->Insert(Slice(foldedKey, foldedKeySize), Slice((uint8_t*)(&record), sizeof(Record))); LS_DCHECK(res == leanstore::OpCode::kOK || res == leanstore::OpCode::kAbortTx); if (res == leanstore::OpCode::kAbortTx) { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } } @@ -74,7 +74,7 @@ struct LeanStoreAdapter : Adapter { cb(record); }); if (res == leanstore::OpCode::kAbortTx) { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } LS_DCHECK(res == leanstore::OpCode::kOK); } @@ -94,7 +94,7 @@ struct LeanStoreAdapter : Adapter { updateDesc); LS_DCHECK(res != leanstore::OpCode::kNotFound); if (res == leanstore::OpCode::kAbortTx) { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } } @@ -103,7 +103,7 @@ struct LeanStoreAdapter : Adapter { uint16_t foldedKeySize = Record::foldKey(foldedKey, key); const auto res = btree->Remove(Slice(foldedKey, foldedKeySize)); if (res == leanstore::OpCode::kAbortTx) { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } return (res == leanstore::OpCode::kOK); } @@ -124,7 +124,7 @@ struct LeanStoreAdapter : Adapter { return cb(typed_key, record); }); if (ret == leanstore::OpCode::kAbortTx) { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } } @@ -139,7 +139,7 @@ struct LeanStoreAdapter : Adapter { local_f = (record).*f; }); if (res == leanstore::OpCode::kAbortTx) { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } LS_DCHECK(res == OpCode::kOK); return local_f; diff --git a/benchmarks/shared/RocksDBAdapter.hpp b/benchmarks/shared/RocksDBAdapter.hpp index be8b9628..d9b2494c 100644 --- a/benchmarks/shared/RocksDBAdapter.hpp +++ b/benchmarks/shared/RocksDBAdapter.hpp @@ -3,7 +3,6 @@ #include "Types.hpp" // ------------------------------------------------------------------------------------- #include "leanstore/KVInterface.hpp" -#include "leanstore/btree/core/WALMacros.hpp" #include "leanstore/utils/JumpMU.hpp" #include "rocksdb/db.h" diff --git a/benchmarks/ycsb/YcsbFlags.cpp b/benchmarks/ycsb/YcsbFlags.cpp index 582529b3..e6fc0ddc 100644 --- a/benchmarks/ycsb/YcsbFlags.cpp +++ b/benchmarks/ycsb/YcsbFlags.cpp @@ -5,7 +5,7 @@ DEFINE_string(ycsb_target, "leanstore", "Ycsb target, available: unordered_map, leanstore, rocksdb, leveldb"); DEFINE_string(ycsb_cmd, "run", "Ycsb command, available: run, load"); DEFINE_string(ycsb_workload, "a", "Ycsb workload, available: a, b, c, d, e, f"); -DEFINE_uint32(ycsb_threads, 4, "Worker threads"); +DEFINE_uint32(ycsb_threads, 4, "WorkerContext threads"); DEFINE_uint64(ycsb_mem_kb, 1, "Max memory in KB to use"); DEFINE_uint64(ycsb_run_for_seconds, 300, "Run the benchmark for x seconds"); diff --git a/benchmarks/ycsb/YcsbLeanStore.hpp b/benchmarks/ycsb/YcsbLeanStore.hpp index 50ea2f15..51a55800 100644 --- a/benchmarks/ycsb/YcsbLeanStore.hpp +++ b/benchmarks/ycsb/YcsbLeanStore.hpp @@ -5,7 +5,7 @@ #include "leanstore/btree/BasicKV.hpp" #include "leanstore/btree/TransactionKV.hpp" #include "leanstore/concurrency/CRManager.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/utils/Defer.hpp" #include "leanstore/utils/JumpMU.hpp" #include "leanstore/utils/Log.hpp" @@ -129,7 +129,7 @@ class YcsbLeanStore : public YcsbExecutor { utils::RandomGenerator::RandString(val, FLAGS_ycsb_val_size); if (mBenchTransactionKv) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); } auto opCode = table->Insert(Slice(key, FLAGS_ycsb_key_size), Slice(val, FLAGS_ycsb_val_size)); @@ -137,7 +137,7 @@ class YcsbLeanStore : public YcsbExecutor { Log::Fatal("Failed to insert, opCode={}", static_cast(opCode)); } if (mBenchTransactionKv) { - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } } }); @@ -194,10 +194,10 @@ class YcsbLeanStore : public YcsbExecutor { // generate key for read GenYcsbKey(zipfRandom, key); if (mBenchTransactionKv) { - cr::Worker::My().StartTx(TxMode::kShortRunning, - IsolationLevel::kSnapshotIsolation, true); + cr::WorkerContext::My().StartTx(TxMode::kShortRunning, + IsolationLevel::kSnapshotIsolation, true); table->Lookup(Slice(key, FLAGS_ycsb_key_size), copyValue); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } else { table->Lookup(Slice(key, FLAGS_ycsb_key_size), copyValue); } @@ -206,10 +206,10 @@ class YcsbLeanStore : public YcsbExecutor { GenYcsbKey(zipfRandom, key); // generate val for update if (mBenchTransactionKv) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); table->UpdatePartial(Slice(key, FLAGS_ycsb_key_size), updateCallBack, *updateDesc); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } else { table->UpdatePartial(Slice(key, FLAGS_ycsb_key_size), updateCallBack, *updateDesc); @@ -221,10 +221,10 @@ class YcsbLeanStore : public YcsbExecutor { Log::Fatal("Unsupported workload type: {}", static_cast(workloadType)); } } - threadCommitted[cr::Worker::My().mWorkerId]++; + threadCommitted[cr::WorkerContext::My().mWorkerId]++; } JUMPMU_CATCH() { - threadAborted[cr::Worker::My().mWorkerId]++; + threadAborted[cr::WorkerContext::My().mWorkerId]++; } } }); diff --git a/examples/c/BasicKvExample.c b/examples/c/BasicKvExample.c index 813da149..82364fc6 100644 --- a/examples/c/BasicKvExample.c +++ b/examples/c/BasicKvExample.c @@ -49,16 +49,16 @@ int main() { // lookup a key { - String* valStr = BasicKvLookup(kvHandle, 1, keySlice); - if (valStr == NULL) { + String* val = CreateString(nullptr, 0); + bool found = BasicKvLookup(kvHandle, 1, keySlice, &val); + if (!found) { printf("lookup value failed, value may not exist, key=%.*s\n", (int)keySlice.mSize, keySlice.mData); + DestroyString(val); return -1; } - printf("%.*s, %.*s\n", (int)keySlice.mSize, keySlice.mData, (int)valStr->mSize, valStr->mData); - - // cleanup the value string - DestroyString(valStr); + printf("%.*s, %.*s\n", (int)keySlice.mSize, keySlice.mData, (int)val->mSize, val->mData); + DestroyString(val); } // insert more key-values diff --git a/examples/cpp/BasicKvExample.cpp b/examples/cpp/BasicKvExample.cpp index e54e4c88..0acb2930 100644 --- a/examples/cpp/BasicKvExample.cpp +++ b/examples/cpp/BasicKvExample.cpp @@ -1,7 +1,7 @@ #include #include #include -#include +#include #include #include diff --git a/include/leanstore-c/StoreOption.h b/include/leanstore-c/StoreOption.h index 2233137e..db6b5685 100644 --- a/include/leanstore-c/StoreOption.h +++ b/include/leanstore-c/StoreOption.h @@ -36,7 +36,7 @@ typedef struct StoreOption { LogLevel mLogLevel; // --------------------------------------------------------------------------- - // Worker thread related options + // WorkerContext thread related options // --------------------------------------------------------------------------- //! The number of worker threads. diff --git a/include/leanstore-c/leanstore-c.h b/include/leanstore-c/leanstore-c.h index 97889b64..5765b71b 100644 --- a/include/leanstore-c/leanstore-c.h +++ b/include/leanstore-c/leanstore-c.h @@ -21,6 +21,9 @@ typedef struct String { //! The size of the data uint64_t mSize; + + //! The capacity of the data + uint64_t mCapacity; } String; //! Creates a new string, copying the data from the given buffer to the new string @@ -76,9 +79,8 @@ void DestroyBasicKV(BasicKvHandle* handle); bool BasicKvInsert(BasicKvHandle* handle, uint64_t workerId, StringSlice key, StringSlice val); //! Lookup a key in a basic key-value store at workerId -//! NOTE: The caller should destroy the val after use via DestroyString() -//! @return the value if the key exists, nullptr otherwise -String* BasicKvLookup(BasicKvHandle* handle, uint64_t workerId, StringSlice key); +//! @return whether the value exists, The input val is untouched if the key is not found +bool BasicKvLookup(BasicKvHandle* handle, uint64_t workerId, StringSlice key, String** val); //! Remove a key in a basic key-value store at workerId //! @return true if the key is found and removed, false otherwise @@ -154,6 +156,16 @@ StringSlice BasicKvIterKey(BasicKvIterHandle* handle); //! @return the read-only value slice StringSlice BasicKvIterVal(BasicKvIterHandle* handle); +//------------------------------------------------------------------------------ +// Interfaces for metrics +//------------------------------------------------------------------------------ + +//! Start the global http metrics exposer +void StartMetricsHttpExposer(int32_t port); + +//! Stop the global http metrics exposer +void StopMetricsHttpExposer(); + #ifdef __cplusplus } #endif diff --git a/include/leanstore/Exceptions.hpp b/include/leanstore/Exceptions.hpp index d3265d6c..1dac15ff 100644 --- a/include/leanstore/Exceptions.hpp +++ b/include/leanstore/Exceptions.hpp @@ -5,6 +5,8 @@ #include #include +#include + //-------------------------------------------------------------------------------------- #define Generic_Exception(name) \ struct name : public std::exception { \ diff --git a/include/leanstore/KVInterface.hpp b/include/leanstore/KVInterface.hpp index 9d91a931..fc315698 100644 --- a/include/leanstore/KVInterface.hpp +++ b/include/leanstore/KVInterface.hpp @@ -2,7 +2,9 @@ #include "leanstore/Slice.hpp" +#include #include +#include namespace leanstore { diff --git a/include/leanstore/LeanStore.hpp b/include/leanstore/LeanStore.hpp index 23418722..242bdb67 100644 --- a/include/leanstore/LeanStore.hpp +++ b/include/leanstore/LeanStore.hpp @@ -16,6 +16,7 @@ namespace leanstore::telemetry { class MetricsManager; +class MetricsHttpExposer; } // namespace leanstore::telemetry @@ -80,6 +81,9 @@ class LeanStore { //! The metrics manager std::unique_ptr mMetricsManager; + //! The http metrics exposer + std::unique_ptr mMetricsExposer; + #ifdef DEBUG utils::DebugFlagsRegistry mDebugFlagsRegistry; #endif diff --git a/include/leanstore/btree/BasicKV.hpp b/include/leanstore/btree/BasicKV.hpp index 121cdce3..c0e62033 100644 --- a/include/leanstore/btree/BasicKV.hpp +++ b/include/leanstore/btree/BasicKV.hpp @@ -17,7 +17,6 @@ class BasicKV : public KVInterface, public BTreeGeneric { mTreeType = BTreeType::kBasicKV; } -public: virtual OpCode Lookup(Slice key, ValCallback valCallback) override; virtual OpCode Insert(Slice key, Slice val) override; @@ -39,10 +38,8 @@ class BasicKV : public KVInterface, public BTreeGeneric { virtual uint64_t CountEntries() override; -public: bool IsRangeEmpty(Slice startKey, Slice endKey); -public: static Result Create(leanstore::LeanStore* store, const std::string& treeName, BTreeConfig config); @@ -52,8 +49,7 @@ class BasicKV : public KVInterface, public BTreeGeneric { //! update. //! @param[in] value The value to copy the slots from. //! @param[out] buffer The buffer to copy the slots to. - inline static void CopyToBuffer(const UpdateDesc& updateDesc, const uint8_t* value, - uint8_t* buffer) { + static void CopyToBuffer(const UpdateDesc& updateDesc, const uint8_t* value, uint8_t* buffer) { uint64_t bufferOffset = 0; for (uint64_t i = 0; i < updateDesc.mNumSlots; i++) { const auto& slot = updateDesc.mUpdateSlots[i]; @@ -68,8 +64,7 @@ class BasicKV : public KVInterface, public BTreeGeneric { //! update. //! @param[in] buffer The buffer to copy the slots from. //! @param[out] value The value to update the slots in. - inline static void CopyToValue(const UpdateDesc& updateDesc, const uint8_t* buffer, - uint8_t* value) { + static void CopyToValue(const UpdateDesc& updateDesc, const uint8_t* buffer, uint8_t* value) { uint64_t bufferOffset = 0; for (uint64_t i = 0; i < updateDesc.mNumSlots; i++) { const auto& slot = updateDesc.mUpdateSlots[i]; @@ -78,8 +73,7 @@ class BasicKV : public KVInterface, public BTreeGeneric { } } - inline static void XorToBuffer(const UpdateDesc& updateDesc, const uint8_t* value, - uint8_t* buffer) { + static void XorToBuffer(const UpdateDesc& updateDesc, const uint8_t* value, uint8_t* buffer) { uint64_t bufferOffset = 0; for (uint64_t i = 0; i < updateDesc.mNumSlots; i++) { const auto& slot = updateDesc.mUpdateSlots[i]; @@ -90,8 +84,7 @@ class BasicKV : public KVInterface, public BTreeGeneric { } } - inline static void XorToValue(const UpdateDesc& updateDesc, const uint8_t* buffer, - uint8_t* value) { + static void XorToValue(const UpdateDesc& updateDesc, const uint8_t* buffer, uint8_t* value) { uint64_t bufferOffset = 0; for (uint64_t i = 0; i < updateDesc.mNumSlots; i++) { const auto& slot = updateDesc.mUpdateSlots[i]; @@ -101,6 +94,10 @@ class BasicKV : public KVInterface, public BTreeGeneric { bufferOffset += slot.mSize; } } + +private: + OpCode lookupOptimistic(Slice key, ValCallback valCallback); + OpCode lookupPessimistic(Slice key, ValCallback valCallback); }; } // namespace leanstore::storage::btree diff --git a/include/leanstore/btree/ChainedTuple.hpp b/include/leanstore/btree/ChainedTuple.hpp index b4e115ae..0f923e7a 100644 --- a/include/leanstore/btree/ChainedTuple.hpp +++ b/include/leanstore/btree/ChainedTuple.hpp @@ -5,7 +5,7 @@ #include "leanstore/btree/BasicKV.hpp" #include "leanstore/btree/core/PessimisticExclusiveIterator.hpp" #include "leanstore/concurrency/CRManager.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" namespace leanstore::storage::btree { @@ -61,10 +61,11 @@ class __attribute__((packed)) ChainedTuple : public Tuple { std::tuple GetVisibleTuple(Slice payload, ValCallback callback) const; void UpdateStats() { - if (cr::Worker::My().mCc.VisibleForAll(mTxId) || + if (cr::WorkerContext::My().mCc.VisibleForAll(mTxId) || mOldestTx != static_cast( - cr::Worker::My().mStore->mCRManager->mGlobalWmkInfo.mOldestActiveTx & 0xFFFF)) { + cr::WorkerContext::My().mStore->mCRManager->mGlobalWmkInfo.mOldestActiveTx & + 0xFFFF)) { mOldestTx = 0; mTotalUpdates = 0; return; @@ -75,10 +76,11 @@ class __attribute__((packed)) ChainedTuple : public Tuple { bool ShouldConvertToFatTuple() { bool commandValid = mCommandId != kInvalidCommandid; bool hasLongRunningOLAP = - cr::Worker::My().mStore->mCRManager->mGlobalWmkInfo.HasActiveLongRunningTx(); - bool frequentlyUpdated = mTotalUpdates > cr::Worker::My().mStore->mStoreOption->mWorkerThreads; + cr::WorkerContext::My().mStore->mCRManager->mGlobalWmkInfo.HasActiveLongRunningTx(); + bool frequentlyUpdated = + mTotalUpdates > cr::WorkerContext::My().mStore->mStoreOption->mWorkerThreads; bool recentUpdatedByOthers = - mWorkerId != cr::Worker::My().mWorkerId || mTxId != cr::ActiveTx().mStartTs; + mWorkerId != cr::WorkerContext::My().mWorkerId || mTxId != cr::ActiveTx().mStartTs; return commandValid && hasLongRunningOLAP && recentUpdatedByOthers && frequentlyUpdated; } diff --git a/include/leanstore/btree/TransactionKV.hpp b/include/leanstore/btree/TransactionKV.hpp index 0aa5bc50..eebf98eb 100644 --- a/include/leanstore/btree/TransactionKV.hpp +++ b/include/leanstore/btree/TransactionKV.hpp @@ -8,7 +8,7 @@ #include "leanstore/btree/core/BTreeGeneric.hpp" #include "leanstore/btree/core/PessimisticExclusiveIterator.hpp" #include "leanstore/buffer-manager/GuardedBufferFrame.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/utils/Result.hpp" #include @@ -109,7 +109,7 @@ class TransactionKV : public BasicKV { } inline static uint64_t ConvertToFatTupleThreshold() { - return cr::Worker::My().mStore->mStoreOption->mWorkerThreads; + return cr::WorkerContext::My().mStore->mStoreOption->mWorkerThreads; } //! Updates the value stored in FatTuple. The former newest version value is diff --git a/include/leanstore/btree/core/WALMacros.hpp b/include/leanstore/btree/core/WALMacros.hpp deleted file mode 100644 index f2bc8d52..00000000 --- a/include/leanstore/btree/core/WALMacros.hpp +++ /dev/null @@ -1,115 +0,0 @@ -#pragma once - -// TODO: Works only for update same size -#define DELTA_COPY -#ifdef DELTA_XOR -// Obsolete -#define beforeBody(Type, Attribute, tuple, entry) \ - const auto Attribute##_offset = offsetof(Type, Attribute); \ - const auto Attribute##_size = sizeof(Type::Attribute); \ - *reinterpret_cast(entry) = Attribute##_offset; \ - entry += sizeof(uint16_t); \ - *reinterpret_cast(entry) = Attribute##_size; \ - entry += sizeof(uint16_t); \ - std::memcpy(entry, tuple + Attribute##_offset, Attribute##_size); \ - entry += Attribute##_size; - -#define afterBody(Type, Attribute, tuple, entry) \ - const auto Attribute##_offset = offsetof(Type, Attribute); \ - const auto Attribute##_size = sizeof(Type::Attribute); \ - entry += (sizeof(uint16_t) * 2); \ - for (uint64_t b_i = 0; b_i < Attribute##_size; b_i++) { \ - *(entry + b_i) ^= *(tuple + Attribute##_offset + b_i); \ - } \ - entry += Attribute##_size; -#endif -#ifdef DELTA_COPY -#define beforeBody(Type, Attribute, tuple, entry) \ - const auto Attribute##_offset = offsetof(Type, Attribute); \ - const auto Attribute##_size = sizeof(Type::Attribute); \ - *reinterpret_cast(entry) = Attribute##_offset; \ - entry += sizeof(uint16_t); \ - *reinterpret_cast(entry) = Attribute##_size; \ - entry += sizeof(uint16_t); \ - std::memcpy(entry, tuple + Attribute##_offset, Attribute##_size); \ - entry += 2 * Attribute##_size; - -#define afterBody(Type, Attribute, tuple, entry) \ - const auto Attribute##_offset = offsetof(Type, Attribute); \ - const auto Attribute##_size = sizeof(Type::Attribute); \ - entry += (sizeof(uint16_t) * 2); \ - entry += Attribute##_size; \ - std::memcpy(entry, tuple + Attribute##_offset, Attribute##_size); \ - entry += 1 * Attribute##_size; -#endif - -#define beforeWrapper1(Type, A1) \ - [](uint8_t* tuple, uint8_t* entry) { beforeBody(Type, A1, tuple, entry); } -#define beforeWrapper2(Type, A1, A2) \ - [](uint8_t* tuple, uint8_t* entry) { \ - beforeBody(Type, A1, tuple, entry); \ - beforeBody(Type, A2, tuple, entry); \ - } -#define beforeWrapper3(Type, A1, A2, A3) \ - [](uint8_t* tuple, uint8_t* entry) { \ - beforeBody(Type, A1, tuple, entry); \ - beforeBody(Type, A2, tuple, entry); \ - beforeBody(Type, A3, tuple, entry); \ - } -#define beforeWrapper4(Type, A1, A2, A3, A4) \ - [](uint8_t* tuple, uint8_t* entry) { \ - beforeBody(Type, A1, tuple, entry); \ - beforeBody(Type, A2, tuple, entry); \ - beforeBody(Type, A3, tuple, entry); \ - beforeBody(Type, A4, tuple, entry); \ - } - -#define afterWrapper1(Type, A1) \ - [](uint8_t* tuple, uint8_t* entry) { afterBody(Type, A1, tuple, entry); } -#define afterWrapper2(Type, A1, A2) \ - [](uint8_t* tuple, uint8_t* entry) { \ - afterBody(Type, A1, tuple, entry); \ - afterBody(Type, A2, tuple, entry); \ - } - -#define afterWrapper3(Type, A1, A2, A3) \ - [](uint8_t* tuple, uint8_t* entry) { \ - afterBody(Type, A1, tuple, entry); \ - afterBody(Type, A2, tuple, entry); \ - afterBody(Type, A3, tuple, entry); \ - } - -#define afterWrapper4(Type, A1, A2, A3, A4) \ - [](uint8_t* tuple, uint8_t* entry) { \ - afterBody(Type, A1, tuple, entry); \ - afterBody(Type, A2, tuple, entry); \ - afterBody(Type, A3, tuple, entry); \ - afterBody(Type, A4, tuple, entry); \ - } - -#ifdef DELTA_XOR -#define entrySize1(Type, A1) ((2 * sizeof(uint16_t)) + (1 * sizeof(Type::A1))) -#endif -#ifdef DELTA_COPY -#define entrySize1(Type, A1) ((2 * sizeof(uint16_t)) + (2 * sizeof(Type::A1))) -#endif -#define entrySize2(Type, A1, A2) entrySize1(Type, A1) + entrySize1(Type, A2) -#define entrySize3(Type, A1, A2, A3) \ - entrySize1(Type, A1) + entrySize1(Type, A2) + entrySize1(Type, A3) -#define entrySize4(Type, A1, A2, A3, A4) \ - entrySize1(Type, A1) + entrySize1(Type, A2) + entrySize1(Type, A3) + entrySize1(Type, A4) - -#define WALUpdate1(Type, A1) \ - { beforeWrapper1(Type, A1), afterWrapper1(Type, A1), entrySize1(Type, A1) } -#define WALUpdate2(Type, A1, A2) \ - { beforeWrapper2(Type, A1, A2), afterWrapper2(Type, A1, A2), entrySize2(Type, A1, A2) } -#define WALUpdate3(Type, A1, A2, A3) \ - { \ - beforeWrapper3(Type, A1, A2, A3), afterWrapper3(Type, A1, A2, A3), \ - entrySize3(Type, A1, A2, A3) \ - } -#define WALUpdate4(Type, A1, A2, A3, A4) \ - { \ - beforeWrapper4(Type, A1, A2, A3, A4), afterWrapper4(Type, A1, A2, A3, A4), \ - entrySize4(Type, A1, A2, A3, A4) \ - } diff --git a/include/leanstore/buffer-manager/GuardedBufferFrame.hpp b/include/leanstore/buffer-manager/GuardedBufferFrame.hpp index f4e046d3..8ef02b34 100644 --- a/include/leanstore/buffer-manager/GuardedBufferFrame.hpp +++ b/include/leanstore/buffer-manager/GuardedBufferFrame.hpp @@ -4,7 +4,7 @@ #include "leanstore/buffer-manager/BufferManager.hpp" #include "leanstore/concurrency/LoggingImpl.hpp" #include "leanstore/concurrency/WalPayloadHandler.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/sync/HybridGuard.hpp" #include "leanstore/sync/HybridLatch.hpp" #include "leanstore/utils/Log.hpp" @@ -144,40 +144,40 @@ class GuardedBufferFrame { public: inline void SyncGSNBeforeWrite() { LS_DCHECK(mBf != nullptr); - LS_DCHECK(mBf->mPage.mGSN <= cr::Worker::My().mLogging.GetCurrentGsn(), + LS_DCHECK(mBf->mPage.mGSN <= cr::WorkerContext::My().mLogging.GetCurrentGsn(), "Page GSN should <= worker GSN, pageGSN={}, workerGSN={}", mBf->mPage.mGSN, - cr::Worker::My().mLogging.GetCurrentGsn()); + cr::WorkerContext::My().mLogging.GetCurrentGsn()); // update last writer worker - mBf->mHeader.mLastWriterWorker = cr::Worker::My().mWorkerId; + mBf->mHeader.mLastWriterWorker = cr::WorkerContext::My().mWorkerId; // increase GSN - const auto workerGSN = cr::Worker::My().mLogging.GetCurrentGsn(); + const auto workerGSN = cr::WorkerContext::My().mLogging.GetCurrentGsn(); mBf->mPage.mGSN = workerGSN + 1; - cr::Worker::My().mLogging.SetCurrentGsn(workerGSN + 1); + cr::WorkerContext::My().mLogging.SetCurrentGsn(workerGSN + 1); } // TODO: don't sync on temporary table pages like history trees inline void SyncGSNBeforeRead() { // skip if not running inside a worker - if (!cr::Worker::InWorker()) { + if (!cr::WorkerContext::InWorker()) { return; } - if (!cr::Worker::My().mLogging.mHasRemoteDependency && - mBf->mPage.mGSN > cr::Worker::My().mLogging.mTxReadSnapshot && - mBf->mHeader.mLastWriterWorker != cr::Worker::My().mWorkerId) { - cr::Worker::My().mLogging.mHasRemoteDependency = true; + if (!cr::WorkerContext::My().mLogging.mHasRemoteDependency && + mBf->mPage.mGSN > cr::WorkerContext::My().mLogging.mTxReadSnapshot && + mBf->mHeader.mLastWriterWorker != cr::WorkerContext::My().mWorkerId) { + cr::WorkerContext::My().mLogging.mHasRemoteDependency = true; LS_DLOG("Detected remote dependency, workerId={}, " "txReadSnapshot(GSN)={}, pageLastWriterWorker={}, pageGSN={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mLogging.mTxReadSnapshot, + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mLogging.mTxReadSnapshot, mBf->mHeader.mLastWriterWorker, mBf->mPage.mGSN); } - const auto workerGSN = cr::Worker::My().mLogging.GetCurrentGsn(); + const auto workerGSN = cr::WorkerContext::My().mLogging.GetCurrentGsn(); const auto pageGSN = mBf->mPage.mGSN; if (workerGSN < pageGSN) { - cr::Worker::My().mLogging.SetCurrentGsn(pageGSN); + cr::WorkerContext::My().mLogging.SetCurrentGsn(pageGSN); } } @@ -189,7 +189,7 @@ class GuardedBufferFrame { const auto pageId = mBf->mHeader.mPageId; const auto treeId = mBf->mPage.mBTreeId; walSize = ((walSize - 1) / 8 + 1) * 8; - auto handler = cr::Worker::My().mLogging.ReserveWALEntryComplex( + auto handler = cr::WorkerContext::My().mLogging.ReserveWALEntryComplex( sizeof(WT) + walSize, pageId, mBf->mPage.mGSN, treeId, std::forward(args)...); SyncGSNBeforeWrite(); diff --git a/include/leanstore/buffer-manager/Swip.hpp b/include/leanstore/buffer-manager/Swip.hpp index 8d144da7..e8eb5765 100644 --- a/include/leanstore/buffer-manager/Swip.hpp +++ b/include/leanstore/buffer-manager/Swip.hpp @@ -27,7 +27,7 @@ class Swip { }; //! Create an empty swip. - Swip() : mPageId(0){}; + Swip() : mPageId(0) {}; //! Create an swip pointing to the buffer frame. Swip(BufferFrame* bf) : mBf(bf) { diff --git a/include/leanstore/concurrency/CRManager.hpp b/include/leanstore/concurrency/CRManager.hpp index 85be08d4..9a91dd14 100644 --- a/include/leanstore/concurrency/CRManager.hpp +++ b/include/leanstore/concurrency/CRManager.hpp @@ -1,7 +1,7 @@ #pragma once #include "leanstore/Units.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/concurrency/WorkerThread.hpp" #include @@ -26,7 +26,7 @@ class CRManager { std::vector> mWorkerThreads; //! All the thread-local worker references - std::vector mWorkers; + std::vector mWorkerCtxs; WaterMarkInfo mGlobalWmkInfo; diff --git a/include/leanstore/concurrency/GroupCommitter.hpp b/include/leanstore/concurrency/GroupCommitter.hpp index 30bf6ba4..f5231ee7 100644 --- a/include/leanstore/concurrency/GroupCommitter.hpp +++ b/include/leanstore/concurrency/GroupCommitter.hpp @@ -14,7 +14,7 @@ namespace leanstore::cr { -class Worker; +class WorkerContext; class WalFlushReq; class GroupCommitter : public leanstore::utils::UserThread { @@ -38,20 +38,21 @@ class GroupCommitter : public leanstore::utils::UserThread { std::atomic mGlobalMaxFlushedGSN; //! All the workers. - std::vector& mWorkers; + std::vector& mWorkerCtxs; //! The libaio wrapper. utils::AsyncIo mAIo; public: - GroupCommitter(leanstore::LeanStore* store, int32_t walFd, std::vector& workers, int cpu) + GroupCommitter(leanstore::LeanStore* store, int32_t walFd, std::vector& workers, + int cpu) : UserThread(store, "GroupCommitter", cpu), mStore(store), mWalFd(walFd), mWalSize(0), mGlobalMinFlushedGSN(0), mGlobalMaxFlushedGSN(0), - mWorkers(workers), + mWorkerCtxs(workers), mAIo(workers.size() * 2 + 2) { } diff --git a/include/leanstore/concurrency/LoggingImpl.hpp b/include/leanstore/concurrency/LoggingImpl.hpp index 3b831dca..a74d577e 100644 --- a/include/leanstore/concurrency/LoggingImpl.hpp +++ b/include/leanstore/concurrency/LoggingImpl.hpp @@ -2,7 +2,7 @@ #include "leanstore/concurrency/Logging.hpp" #include "leanstore/concurrency/WalEntry.hpp" #include "leanstore/concurrency/WalPayloadHandler.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/utils/Defer.hpp" namespace leanstore::cr { @@ -27,7 +27,7 @@ WalPayloadHandler Logging::ReserveWALEntryComplex(uint64_t payloadSize, PID p ReserveContiguousBuffer(entrySize); mActiveWALEntryComplex = - new (entryPtr) WalEntryComplex(entryLSN, prevLsn, entrySize, Worker::My().mWorkerId, + new (entryPtr) WalEntryComplex(entryLSN, prevLsn, entrySize, WorkerContext::My().mWorkerId, ActiveTx().mStartTs, gsn, pageId, treeId); auto* payloadPtr = mActiveWALEntryComplex->mPayload; diff --git a/include/leanstore/concurrency/WalPayloadHandler.hpp b/include/leanstore/concurrency/WalPayloadHandler.hpp index 6de28dad..33a8e097 100644 --- a/include/leanstore/concurrency/WalPayloadHandler.hpp +++ b/include/leanstore/concurrency/WalPayloadHandler.hpp @@ -1,7 +1,7 @@ #pragma once #include "leanstore/concurrency/GroupCommitter.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" namespace leanstore::cr { @@ -37,7 +37,7 @@ class WalPayloadHandler { template inline void WalPayloadHandler::SubmitWal() { - cr::Worker::My().mLogging.SubmitWALEntryComplex(mTotalSize); + cr::WorkerContext::My().mLogging.SubmitWALEntryComplex(mTotalSize); } } // namespace leanstore::cr \ No newline at end of file diff --git a/include/leanstore/concurrency/Worker.hpp b/include/leanstore/concurrency/WorkerContext.hpp similarity index 65% rename from include/leanstore/concurrency/Worker.hpp rename to include/leanstore/concurrency/WorkerContext.hpp index 3b84870c..cd115a81 100644 --- a/include/leanstore/concurrency/Worker.hpp +++ b/include/leanstore/concurrency/WorkerContext.hpp @@ -20,7 +20,7 @@ namespace leanstore::cr { class Logging; class ConcurrencyControl; -class Worker { +class WorkerContext { public: //! The store it belongs to. leanstore::LeanStore* mStore = nullptr; @@ -28,6 +28,7 @@ class Worker { //! The write-ahead logging component. Logging mLogging; + //! The concurrent control component. ConcurrencyControl mCc; //! The ID of the current command in the current transaction. @@ -36,21 +37,21 @@ class Worker { //! The current running transaction. Transaction mActiveTx; - //! The ID of the current transaction. It's set by the current worker thread - //! and read by the garbage collection process to determine the lower - //! watermarks of the transactions. + //! The ID of the current transaction. It's set by the current worker thread and read by the + //! garbage collection process to determine the lower watermarks of the transactions. std::atomic mActiveTxId = 0; //! ID of the current worker itself. const uint64_t mWorkerId; //! All the workers. - std::vector& mAllWorkers; + std::vector& mAllWorkers; public: - Worker(uint64_t workerId, std::vector& allWorkers, leanstore::LeanStore* store); + WorkerContext(uint64_t workerId, std::vector& allWorkers, + leanstore::LeanStore* store); - ~Worker(); + ~WorkerContext(); public: bool IsTxStarted() { @@ -65,26 +66,29 @@ class Worker { void AbortTx(); public: - static thread_local std::unique_ptr sTlsWorker; - static thread_local Worker* sTlsWorkerRaw; + //! Thread-local storage for WorkerContext. + static thread_local std::unique_ptr sTlsWorkerCtx; + + //! Raw pointer to sTlsWorkerCtx to avoid the overhead of std::unique_ptr. + static thread_local WorkerContext* sTlsWorkerCtxRaw; static constexpr uint64_t kRcBit = (1ull << 63); static constexpr uint64_t kLongRunningBit = (1ull << 62); static constexpr uint64_t kCleanBitsMask = ~(kRcBit | kLongRunningBit); public: - static Worker& My() { - return *Worker::sTlsWorkerRaw; + static WorkerContext& My() { + return *WorkerContext::sTlsWorkerCtxRaw; } static bool InWorker() { - return Worker::sTlsWorkerRaw != nullptr; + return WorkerContext::sTlsWorkerCtxRaw != nullptr; } }; // Shortcuts inline Transaction& ActiveTx() { - return cr::Worker::My().mActiveTx; + return cr::WorkerContext::My().mActiveTx; } } // namespace leanstore::cr diff --git a/src/LeanStore.cpp b/src/LeanStore.cpp index 6a9f9e97..08c2f009 100644 --- a/src/LeanStore.cpp +++ b/src/LeanStore.cpp @@ -13,6 +13,7 @@ #include "leanstore/utils/Misc.hpp" #include "leanstore/utils/Result.hpp" #include "leanstore/utils/UserThread.hpp" +#include "telemetry/MetricsHttpExposer.hpp" #include "telemetry/MetricsManager.hpp" #include @@ -57,7 +58,10 @@ Result> LeanStore::Open(StoreOption* option) { return std::make_unique(option); } -LeanStore::LeanStore(StoreOption* option) : mStoreOption(option), mMetricsManager(nullptr) { +LeanStore::LeanStore(StoreOption* option) + : mStoreOption(option), + mMetricsManager(nullptr), + mMetricsExposer(nullptr) { utils::tlsStore = this; Log::Info("LeanStore starting ..."); @@ -65,8 +69,12 @@ LeanStore::LeanStore(StoreOption* option) : mStoreOption(option), mMetricsManage // Expose the metrics if (mStoreOption->mEnableMetrics) { - mMetricsManager = std::make_unique(this); - mMetricsManager->Expose(); + mMetricsManager = std::make_unique(); + + //! Expose the metrics via HTTP + mMetricsExposer = std::make_unique(this); + mMetricsExposer->SetCollectable(mMetricsManager->GetRegistry()); + mMetricsExposer->Start(); } initPageAndWalFd(); @@ -152,6 +160,7 @@ LeanStore::~LeanStore() { // stop metrics manager in the last if (mStoreOption->mEnableMetrics) { mMetricsManager = nullptr; + mMetricsExposer = nullptr; } DestroyStoreOption(mStoreOption); Log::Info("LeanStore stopped"); @@ -452,7 +461,7 @@ void LeanStore::GetBasicKV(const std::string& name, storage::btree::BasicKV** bt } void LeanStore::DropBasicKV(const std::string& name) { - LS_DCHECK(cr::Worker::My().IsTxStarted()); + LS_DCHECK(cr::WorkerContext::My().IsTxStarted()); auto* btree = dynamic_cast(mTreeRegistry->GetTree(name)); leanstore::storage::btree::BTreeGeneric::FreeAndReclaim(*btree); @@ -498,7 +507,7 @@ void LeanStore::GetTransactionKV(const std::string& name, storage::btree::Transa } void LeanStore::DropTransactionKV(const std::string& name) { - LS_DCHECK(cr::Worker::My().IsTxStarted()); + LS_DCHECK(cr::WorkerContext::My().IsTxStarted()); auto* btree = DownCast(mTreeRegistry->GetTree(name)); leanstore::storage::btree::BTreeGeneric::FreeAndReclaim(*btree); auto res = mTreeRegistry->UnregisterTree(name); diff --git a/src/btree/BasicKV.cpp b/src/btree/BasicKV.cpp index f42796bd..c4cdb462 100644 --- a/src/btree/BasicKV.cpp +++ b/src/btree/BasicKV.cpp @@ -30,7 +30,27 @@ Result BasicKV::Create(leanstore::LeanStore* store, const std::string& return tree; } -OpCode BasicKV::Lookup(Slice key, ValCallback valCallback) { +OpCode BasicKV::lookupOptimistic(Slice key, ValCallback valCallback) { + JUMPMU_TRY() { + GuardedBufferFrame guardedLeaf; + FindLeafCanJump(key, guardedLeaf, LatchMode::kOptimisticOrJump); + auto slotId = guardedLeaf->LowerBound(key); + if (slotId != -1) { + valCallback(guardedLeaf->Value(slotId)); + guardedLeaf.JumpIfModifiedByOthers(); + JUMPMU_RETURN OpCode::kOK; + } + + guardedLeaf.JumpIfModifiedByOthers(); + JUMPMU_RETURN OpCode::kNotFound; + } + JUMPMU_CATCH() { + WorkerCounters::MyCounters().dt_restarts_read[mTreeId]++; + return OpCode::kOther; + } +} + +OpCode BasicKV::lookupPessimistic(Slice key, ValCallback valCallback) { while (true) { JUMPMU_TRY() { GuardedBufferFrame guardedLeaf; @@ -38,19 +58,22 @@ OpCode BasicKV::Lookup(Slice key, ValCallback valCallback) { auto slotId = guardedLeaf->LowerBound(key); if (slotId != -1) { valCallback(guardedLeaf->Value(slotId)); - guardedLeaf.JumpIfModifiedByOthers(); JUMPMU_RETURN OpCode::kOK; } - guardedLeaf.JumpIfModifiedByOthers(); JUMPMU_RETURN OpCode::kNotFound; } JUMPMU_CATCH() { WorkerCounters::MyCounters().dt_restarts_read[mTreeId]++; } } - UNREACHABLE(); - return OpCode::kOther; +} + +OpCode BasicKV::Lookup(Slice key, ValCallback valCallback) { + if (auto ret = lookupOptimistic(key, valCallback); ret != OpCode::kOther) { + return ret; + } + return lookupPessimistic(std::move(key), std::move(valCallback)); } bool BasicKV::IsRangeEmpty(Slice startKey, Slice endKey) { @@ -143,13 +166,13 @@ OpCode BasicKV::Insert(Slice key, Slice val) { auto ret = xIter.InsertKV(key, val); if (ret == OpCode::kDuplicated) { - Log::Info("Insert duplicated, workerId={}, key={}, treeId={}", cr::Worker::My().mWorkerId, - key.ToString(), mTreeId); + Log::Info("Insert duplicated, workerId={}, key={}, treeId={}", + cr::WorkerContext::My().mWorkerId, key.ToString(), mTreeId); JUMPMU_RETURN OpCode::kDuplicated; } if (ret != OpCode::kOK) { - Log::Info("Insert failed, workerId={}, key={}, ret={}", cr::Worker::My().mWorkerId, + Log::Info("Insert failed, workerId={}, key={}, ret={}", cr::WorkerContext::My().mWorkerId, key.ToString(), ToString(ret)); JUMPMU_RETURN ret; } diff --git a/src/btree/ChainedTuple.cpp b/src/btree/ChainedTuple.cpp index 36214037..a5a069c0 100644 --- a/src/btree/ChainedTuple.cpp +++ b/src/btree/ChainedTuple.cpp @@ -7,7 +7,7 @@ namespace leanstore::storage::btree { std::tuple ChainedTuple::GetVisibleTuple(Slice payload, ValCallback callback) const { - if (cr::Worker::My().mCc.VisibleForMe(mWorkerId, mTxId)) { + if (cr::WorkerContext::My().mCc.VisibleForMe(mWorkerId, mTxId)) { if (mIsTombstone) { return {OpCode::kNotFound, 1}; } @@ -32,7 +32,7 @@ std::tuple ChainedTuple::GetVisibleTuple(Slice payload, uint16_t versionsRead = 1; while (true) { - bool found = cr::Worker::My().mCc.GetVersion( + bool found = cr::WorkerContext::My().mCc.GetVersion( newerWorkerId, newerTxId, newerCommandId, [&](const uint8_t* versionBuf, uint64_t versionSize) { auto& version = *reinterpret_cast(versionBuf); @@ -76,12 +76,12 @@ std::tuple ChainedTuple::GetVisibleTuple(Slice payload, Log::Error("Not found in the version tree, workerId={}, startTs={}, " "versionsRead={}, newerWorkerId={}, newerTxId={}, " "newerCommandId={}", - cr::Worker::My().mWorkerId, cr::ActiveTx().mStartTs, versionsRead, newerWorkerId, - newerTxId, newerCommandId); + cr::WorkerContext::My().mWorkerId, cr::ActiveTx().mStartTs, versionsRead, + newerWorkerId, newerTxId, newerCommandId); return {OpCode::kNotFound, versionsRead}; } - if (cr::Worker::My().mCc.VisibleForMe(newerWorkerId, newerTxId)) { + if (cr::WorkerContext::My().mCc.VisibleForMe(newerWorkerId, newerTxId)) { callback(Slice(valueBuf.get(), valueSize)); return {OpCode::kOK, versionsRead}; } @@ -98,7 +98,7 @@ void ChainedTuple::Update(PessimisticExclusiveIterator& xIter, Slice key, // Move the newest tuple to the history version tree. auto treeId = xIter.mBTree.mTreeId; auto currCommandId = - cr::Worker::My().mCc.PutVersion(treeId, false, versionSize, [&](uint8_t* versionBuf) { + cr::WorkerContext::My().mCc.PutVersion(treeId, false, versionSize, [&](uint8_t* versionBuf) { auto& updateVersion = *new (versionBuf) UpdateVersion(mWorkerId, mTxId, mCommandId, true); std::memcpy(updateVersion.mPayload, &updateDesc, updateDesc.Size()); auto* dest = updateVersion.mPayload + updateDesc.Size(); @@ -109,7 +109,7 @@ void ChainedTuple::Update(PessimisticExclusiveIterator& xIter, Slice key, auto mutRawVal = xIter.MutableVal(); auto userValSize = mutRawVal.Size() - sizeof(ChainedTuple); updateCallBack(MutableSlice(mPayload, userValSize)); - mWorkerId = cr::Worker::My().mWorkerId; + mWorkerId = cr::WorkerContext::My().mWorkerId; mTxId = cr::ActiveTx().mStartTs; mCommandId = currCommandId; }; diff --git a/src/btree/TransactionKV.cpp b/src/btree/TransactionKV.cpp index 310d6479..7cac13fb 100644 --- a/src/btree/TransactionKV.cpp +++ b/src/btree/TransactionKV.cpp @@ -9,7 +9,7 @@ #include "leanstore/btree/Tuple.hpp" #include "leanstore/btree/core/BTreeGeneric.hpp" #include "leanstore/btree/core/PessimisticSharedIterator.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/sync/HybridGuard.hpp" #include "leanstore/telemetry/MetricOnlyTimer.hpp" #include "leanstore/utils/Defer.hpp" @@ -78,9 +78,9 @@ OpCode TransactionKV::Lookup(Slice key, ValCallback valCallback) { METRIC_COUNTER_INC(mStore->mMetricsManager, tx_kv_lookup_total, 1); METRIC_HIST_OBSERVE(mStore->mMetricsManager, tx_kv_lookup_us, timer.ElaspedUs()); }); - LS_DCHECK(cr::Worker::My().IsTxStarted(), - "Worker is not in a transaction, workerId={}, startTs={}", cr::Worker::My().mWorkerId, - cr::Worker::My().mActiveTx.mStartTs); + LS_DCHECK(cr::WorkerContext::My().IsTxStarted(), + "WorkerContext is not in a transaction, workerId={}, startTs={}", + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs); auto lookupInGraveyard = [&]() { auto gIter = mGraveyard->GetIterator(); if (gIter.SeekToEqual(key); !gIter.Valid()) { @@ -124,7 +124,7 @@ OpCode TransactionKV::UpdatePartial(Slice key, MutValCallback updateCallBack, METRIC_COUNTER_INC(mStore->mMetricsManager, tx_kv_update_total, 1); METRIC_HIST_OBSERVE(mStore->mMetricsManager, tx_kv_update_us, timer.ElaspedUs()); }); - LS_DCHECK(cr::Worker::My().IsTxStarted()); + LS_DCHECK(cr::WorkerContext::My().IsTxStarted()); JUMPMU_TRY() { auto xIter = GetExclusiveIterator(); if (xIter.SeekToEqual(key); !xIter.Valid()) { @@ -142,7 +142,7 @@ OpCode TransactionKV::UpdatePartial(Slice key, MutValCallback updateCallBack, while (true) { auto mutRawVal = xIter.MutableVal(); auto& tuple = *Tuple::From(mutRawVal.Data()); - auto visibleForMe = cr::Worker::My().mCc.VisibleForMe(tuple.mWorkerId, tuple.mTxId); + auto visibleForMe = cr::WorkerContext::My().mCc.VisibleForMe(tuple.mWorkerId, tuple.mTxId); if (tuple.IsWriteLocked() || !visibleForMe) { // conflict detected, the tuple is write locked by other worker or not // visible for me @@ -203,7 +203,7 @@ OpCode TransactionKV::UpdatePartial(Slice key, MutValCallback updateCallBack, } OpCode TransactionKV::Insert(Slice key, Slice val) { - LS_DCHECK(cr::Worker::My().IsTxStarted()); + LS_DCHECK(cr::WorkerContext::My().IsTxStarted()); uint16_t payloadSize = val.size() + sizeof(ChainedTuple); while (true) { @@ -219,11 +219,11 @@ OpCode TransactionKV::Insert(Slice key, Slice val) { LS_DCHECK(!chainedTuple->mWriteLocked, "Duplicate tuple should not be write locked, workerId={}, startTs={}, key={}, " "tupleLastWriter={}, tupleLastStartTs={}, tupleWriteLocked={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, key.ToString(), - lastWorkerId, lastTxId, isWriteLocked); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + key.ToString(), lastWorkerId, lastTxId, isWriteLocked); auto visibleForMe = - cr::Worker::My().mCc.VisibleForMe(chainedTuple->mWorkerId, chainedTuple->mTxId); + cr::WorkerContext::My().mCc.VisibleForMe(chainedTuple->mWorkerId, chainedTuple->mTxId); if (chainedTuple->mIsTombstone && visibleForMe) { insertAfterRemove(xIter, key, val); @@ -239,8 +239,8 @@ OpCode TransactionKV::Insert(Slice key, Slice val) { Log::Info("Insert conflicted, current transaction should be aborted, workerId={}, " "startTs={}, key={}, tupleLastWriter={}, tupleLastTxId={}, " "tupleIsWriteLocked={}, tupleIsRemoved={}, tupleVisibleForMe={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, ToString(key), - lastWorkerId, lastTxId, isWriteLocked, isTombsone, visibleForMe); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + ToString(key), lastWorkerId, lastTxId, isWriteLocked, isTombsone, visibleForMe); return OpCode::kAbortTx; } @@ -248,8 +248,8 @@ OpCode TransactionKV::Insert(Slice key, Slice val) { auto isTombsone = chainedTuple->mIsTombstone; Log::Info("Insert duplicated, workerId={}, startTs={}, key={}, tupleLastWriter={}, " "tupleLastTxId={}, tupleIsWriteLocked={}, tupleIsRemoved={}, tupleVisibleForMe={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, key.ToString(), - lastWorkerId, lastTxId, isWriteLocked, isTombsone, visibleForMe); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + key.ToString(), lastWorkerId, lastTxId, isWriteLocked, isTombsone, visibleForMe); return OpCode::kDuplicated; } @@ -263,7 +263,7 @@ OpCode TransactionKV::Insert(Slice key, Slice val) { kInvalidCommandid); // insert - TransactionKV::InsertToNode(xIter.mGuardedLeaf, key, val, cr::Worker::My().mWorkerId, + TransactionKV::InsertToNode(xIter.mGuardedLeaf, key, val, cr::WorkerContext::My().mWorkerId, cr::ActiveTx().mStartTs, xIter.mSlotId); return OpCode::kOK; } @@ -306,13 +306,13 @@ void TransactionKV::insertAfterRemove(PessimisticExclusiveIterator& xIter, Slice "Tuple should be removed before insert, workerId={}, " "startTs={}, key={}, tupleLastWriter={}, " "tupleLastStartTs={}, tupleWriteLocked={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, key.ToString(), - lastWorkerId, lastTxId, isWriteLocked); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + key.ToString(), lastWorkerId, lastTxId, isWriteLocked); // create an insert version auto versionSize = sizeof(InsertVersion) + val.size() + key.size(); auto commandId = - cr::Worker::My().mCc.PutVersion(mTreeId, false, versionSize, [&](uint8_t* versionBuf) { + cr::WorkerContext::My().mCc.PutVersion(mTreeId, false, versionSize, [&](uint8_t* versionBuf) { new (versionBuf) InsertVersion(lastWorkerId, lastTxId, lastCommandId, key, val); }); @@ -335,8 +335,8 @@ void TransactionKV::insertAfterRemove(PessimisticExclusiveIterator& xIter, Slice "Failed to extend btree node slot to store the expanded " "chained tuple, workerId={}, startTs={}, key={}, " "curRawValSize={}, chainedTupleSize={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, key.ToString(), - mutRawVal.Size(), chainedTupleSize); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + key.ToString(), mutRawVal.Size(), chainedTupleSize); } else if (mutRawVal.Size() > chainedTupleSize) { xIter.ShortenWithoutCompaction(chainedTupleSize); @@ -345,14 +345,14 @@ void TransactionKV::insertAfterRemove(PessimisticExclusiveIterator& xIter, Slice // get the new value place and recreate a new chained tuple there auto newMutRawVal = xIter.MutableVal(); auto* newChainedTuple = new (newMutRawVal.Data()) - ChainedTuple(cr::Worker::My().mWorkerId, cr::ActiveTx().mStartTs, commandId, val); + ChainedTuple(cr::WorkerContext::My().mWorkerId, cr::ActiveTx().mStartTs, commandId, val); newChainedTuple->mTotalUpdates = totalUpdatesCopy; newChainedTuple->mOldestTx = oldestTxCopy; newChainedTuple->UpdateStats(); } OpCode TransactionKV::Remove(Slice key) { - LS_DCHECK(cr::Worker::My().IsTxStarted()); + LS_DCHECK(cr::WorkerContext::My().IsTxStarted()); JUMPMU_TRY() { auto xIter = GetExclusiveIterator(); if (xIter.SeekToEqual(key); !xIter.Valid()) { @@ -378,11 +378,12 @@ OpCode TransactionKV::Remove(Slice key) { auto lastWorkerId = chainedTuple.mWorkerId; auto lastTxId = chainedTuple.mTxId; if (chainedTuple.IsWriteLocked() || - !cr::Worker::My().mCc.VisibleForMe(lastWorkerId, lastTxId)) { + !cr::WorkerContext::My().mCc.VisibleForMe(lastWorkerId, lastTxId)) { Log::Info("Remove conflicted, current transaction should be aborted, workerId={}, " "startTs={}, key={}, tupleLastWriter={}, tupleLastStartTs={}, tupleVisibleForMe={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, key.ToString(), - lastWorkerId, lastTxId, cr::Worker::My().mCc.VisibleForMe(lastWorkerId, lastTxId)); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + key.ToString(), lastWorkerId, lastTxId, + cr::WorkerContext::My().mCc.VisibleForMe(lastWorkerId, lastTxId)); JUMPMU_RETURN OpCode::kAbortTx; } @@ -401,8 +402,8 @@ OpCode TransactionKV::Remove(Slice key) { auto valSize = xIter.Val().size() - sizeof(ChainedTuple); auto val = chainedTuple.GetValue(valSize); auto versionSize = sizeof(RemoveVersion) + val.size() + key.size(); - auto commandId = - cr::Worker::My().mCc.PutVersion(mTreeId, true, versionSize, [&](uint8_t* versionBuf) { + auto commandId = cr::WorkerContext::My().mCc.PutVersion( + mTreeId, true, versionSize, [&](uint8_t* versionBuf) { new (versionBuf) RemoveVersion(chainedTuple.mWorkerId, chainedTuple.mTxId, chainedTuple.mCommandId, key, val, danglingPointer); }); @@ -419,7 +420,7 @@ OpCode TransactionKV::Remove(Slice key) { xIter.ShortenWithoutCompaction(sizeof(ChainedTuple)); } chainedTuple.mIsTombstone = true; - chainedTuple.mWorkerId = cr::Worker::My().mWorkerId; + chainedTuple.mWorkerId = cr::WorkerContext::My().mWorkerId; chainedTuple.mTxId = cr::ActiveTx().mStartTs; chainedTuple.mCommandId = commandId; @@ -433,7 +434,7 @@ OpCode TransactionKV::Remove(Slice key) { } OpCode TransactionKV::ScanDesc(Slice startKey, ScanCallback callback) { - LS_DCHECK(cr::Worker::My().IsTxStarted()); + LS_DCHECK(cr::WorkerContext::My().IsTxStarted()); if (cr::ActiveTx().IsLongRunning()) { TODOException(); return OpCode::kAbortTx; @@ -442,7 +443,7 @@ OpCode TransactionKV::ScanDesc(Slice startKey, ScanCallback callback) { } OpCode TransactionKV::ScanAsc(Slice startKey, ScanCallback callback) { - LS_DCHECK(cr::Worker::My().IsTxStarted()); + LS_DCHECK(cr::WorkerContext::My().IsTxStarted()); if (cr::ActiveTx().IsLongRunning()) { return scan4LongRunningTx(startKey, callback); } @@ -477,7 +478,8 @@ void TransactionKV::undoLastInsert(const WalTxInsert* walInsert) { LS_DCHECK(xIter.Valid(), "Cannot find the inserted key in btree, workerId={}, " "startTs={}, key={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, key.ToString()); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + key.ToString()); // TODO(jian.z): write compensation wal entry if (walInsert->mPrevCommandId != kInvalidCommandid) { // only remove the inserted value and mark the chained tuple as @@ -501,7 +503,7 @@ void TransactionKV::undoLastInsert(const WalTxInsert* walInsert) { if (ret != OpCode::kOK) { Log::Error("Undo last insert failed, failed to remove current key, " "workerId={}, startTs={}, key={}, ret={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, key.ToString(), ToString(ret)); } } @@ -510,8 +512,8 @@ void TransactionKV::undoLastInsert(const WalTxInsert* walInsert) { JUMPMU_RETURN; } JUMPMU_CATCH() { - Log::Warn("Undo insert failed, workerId={}, startTs={}", cr::Worker::My().mWorkerId, - cr::Worker::My().mActiveTx.mStartTs); + Log::Warn("Undo insert failed, workerId={}, startTs={}", cr::WorkerContext::My().mWorkerId, + cr::WorkerContext::My().mActiveTx.mStartTs); } } } @@ -525,12 +527,14 @@ void TransactionKV::undoLastUpdate(const WalTxUpdate* walUpdate) { LS_DCHECK(xIter.Valid(), "Cannot find the updated key in btree, workerId={}, " "startTs={}, key={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, key.ToString()); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + key.ToString()); auto mutRawVal = xIter.MutableVal(); auto& tuple = *Tuple::From(mutRawVal.Data()); LS_DCHECK(!tuple.IsWriteLocked(), "Tuple is write locked, workerId={}, startTs={}, key={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, key.ToString()); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + key.ToString()); if (tuple.mFormat == TupleFormat::kFat) { FatTuple::From(mutRawVal.Data())->UndoLastUpdate(); } else { @@ -555,8 +559,8 @@ void TransactionKV::undoLastUpdate(const WalTxUpdate* walUpdate) { JUMPMU_RETURN; } JUMPMU_CATCH() { - Log::Warn("Undo update failed, workerId={}, startTs={}", cr::Worker::My().mWorkerId, - cr::Worker::My().mActiveTx.mStartTs); + Log::Warn("Undo update failed, workerId={}, startTs={}", cr::WorkerContext::My().mWorkerId, + cr::WorkerContext::My().mActiveTx.mStartTs); } } } @@ -570,7 +574,7 @@ void TransactionKV::undoLastRemove(const WalTxRemove* walRemove) { LS_DCHECK(xIter.Valid(), "Cannot find the tombstone of removed key, workerId={}, " "startTs={}, removedKey={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, removedKey.ToString()); // resize the current slot to store the removed tuple @@ -582,7 +586,7 @@ void TransactionKV::undoLastRemove(const WalTxRemove* walRemove) { "Failed to extend btree node slot to store the " "recovered chained tuple, workerId={}, startTs={}, " "removedKey={}, curRawValSize={}, chainedTupleSize={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, removedKey.ToString(), curRawVal.size(), chainedTupleSize); } else if (curRawVal.size() > chainedTupleSize) { xIter.ShortenWithoutCompaction(chainedTupleSize); @@ -595,8 +599,8 @@ void TransactionKV::undoLastRemove(const WalTxRemove* walRemove) { JUMPMU_RETURN; } JUMPMU_CATCH() { - Log::Warn("Undo remove failed, workerId={}, startTs={}", cr::Worker::My().mWorkerId, - cr::Worker::My().mActiveTx.mStartTs); + Log::Warn("Undo remove failed, workerId={}, startTs={}", cr::WorkerContext::My().mWorkerId, + cr::WorkerContext::My().mActiveTx.mStartTs); } } } @@ -624,9 +628,9 @@ bool TransactionKV::UpdateInFatTuple(PessimisticExclusiveIterator& xIter, Slice auto performUpdate = [&]() { fatTuple->Append(updateDesc); - fatTuple->mWorkerId = cr::Worker::My().mWorkerId; + fatTuple->mWorkerId = cr::WorkerContext::My().mWorkerId; fatTuple->mTxId = cr::ActiveTx().mStartTs; - fatTuple->mCommandId = cr::Worker::My().mCommandId++; + fatTuple->mCommandId = cr::WorkerContext::My().mCommandId++; updateCallBack(fatTuple->GetMutableValue()); LS_DCHECK(fatTuple->mPayloadCapacity >= fatTuple->mPayloadSize); }; @@ -704,7 +708,7 @@ void TransactionKV::GarbageCollect(const uint8_t* versionData, WORKERID versionW const auto& version = *RemoveVersion::From(versionData); // Delete tombstones caused by transactions below mCc.mLocalWmkOfAllTx. - if (versionTxId <= cr::Worker::My().mCc.mLocalWmkOfAllTx) { + if (versionTxId <= cr::WorkerContext::My().mCc.mLocalWmkOfAllTx) { LS_DLOG("Delete tombstones caused by transactions below " "mCc.mLocalWmkOfAllTx, versionWorkerId={}, versionTxId={}", versionWorkerId, versionTxId); @@ -789,13 +793,13 @@ void TransactionKV::GarbageCollect(const uint8_t* versionData, WORKERID versionW if (chainedTuple.mWorkerId == versionWorkerId && chainedTuple.mTxId == versionTxId && chainedTuple.mIsTombstone) { - LS_DCHECK(chainedTuple.mTxId > cr::Worker::My().mCc.mLocalWmkOfAllTx, + LS_DCHECK(chainedTuple.mTxId > cr::WorkerContext::My().mCc.mLocalWmkOfAllTx, "The removedKey is under mCc.mLocalWmkOfAllTx, should " "not happen, mCc.mLocalWmkOfAllTx={}, " "versionWorkerId={}, versionTxId={}, removedKey={}", - cr::Worker::My().mCc.mLocalWmkOfAllTx, versionWorkerId, versionTxId, + cr::WorkerContext::My().mCc.mLocalWmkOfAllTx, versionWorkerId, versionTxId, removedKey.ToString()); - if (chainedTuple.mTxId <= cr::Worker::My().mCc.mLocalWmkOfShortTx) { + if (chainedTuple.mTxId <= cr::WorkerContext::My().mCc.mLocalWmkOfShortTx) { LS_DLOG("Move the removedKey to graveyard, versionWorkerId={}, " "versionTxId={}, removedKey={}", versionWorkerId, versionTxId, removedKey.ToString()); @@ -824,7 +828,7 @@ void TransactionKV::GarbageCollect(const uint8_t* versionData, WORKERID versionW Log::Fatal("Meet a remove version upper than mCc.mLocalWmkOfShortTx, " "should not happen, mCc.mLocalWmkOfShortTx={}, " "versionWorkerId={}, versionTxId={}, removedKey={}", - cr::Worker::My().mCc.mLocalWmkOfShortTx, versionWorkerId, versionTxId, + cr::WorkerContext::My().mCc.mLocalWmkOfShortTx, versionWorkerId, versionTxId, removedKey.ToString()); } } else { @@ -870,7 +874,8 @@ void TransactionKV::unlock(const uint8_t* walEntryPtr) { auto xIter = GetExclusiveIterator(); xIter.SeekToEqual(key); LS_DCHECK(xIter.Valid(), "Cannot find the key in btree, workerId={}, startTs={}, key={}", - cr::Worker::My().mWorkerId, cr::Worker::My().mActiveTx.mStartTs, key.ToString()); + cr::WorkerContext::My().mWorkerId, cr::WorkerContext::My().mActiveTx.mStartTs, + key.ToString()); auto& tuple = *Tuple::From(xIter.MutableVal().Data()); ENSURE(tuple.mFormat == TupleFormat::kChained); } diff --git a/src/btree/Tuple.cpp b/src/btree/Tuple.cpp index 62fff9d0..776c8d37 100644 --- a/src/btree/Tuple.cpp +++ b/src/btree/Tuple.cpp @@ -5,7 +5,7 @@ #include "leanstore/btree/TransactionKV.hpp" #include "leanstore/btree/core/BTreeNode.hpp" #include "leanstore/concurrency/CRManager.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/utils/Log.hpp" #include "leanstore/utils/Misc.hpp" @@ -63,13 +63,13 @@ bool Tuple::ToFat(PessimisticExclusiveIterator& xIter) { bool abortConversion = false; uint16_t numDeltasToReplace = 0; while (!abortConversion) { - if (cr::Worker::My().mCc.VisibleForAll(newerTxId)) { + if (cr::WorkerContext::My().mCc.VisibleForAll(newerTxId)) { // No need to convert versions that are visible for all to the FatTuple, // these old version can be GCed. Pruning versions space might get delayed break; } - if (!cr::Worker::My().mCc.GetVersion( + if (!cr::WorkerContext::My().mCc.GetVersion( newerWorkerId, newerTxId, newerCommandId, [&](const uint8_t* version, uint64_t) { numDeltasToReplace++; const auto& chainedDelta = *UpdateVersion::From(version); @@ -192,7 +192,7 @@ void FatTuple::GarbageCollection() { }; // Delete for all visible deltas, atm using cheap visibility check - if (cr::Worker::My().mCc.VisibleForAll(mTxId)) { + if (cr::WorkerContext::My().mCc.VisibleForAll(mTxId)) { mNumDeltas = 0; mDataOffset = mPayloadCapacity; mPayloadSize = mValSize; @@ -202,16 +202,16 @@ void FatTuple::GarbageCollection() { uint16_t deltasVisibleForAll = 0; for (int32_t i = mNumDeltas - 1; i >= 1; i--) { auto& delta = getDelta(i); - if (cr::Worker::My().mCc.VisibleForAll(delta.mTxId)) { + if (cr::WorkerContext::My().mCc.VisibleForAll(delta.mTxId)) { deltasVisibleForAll = i - 1; break; } } const TXID local_oldest_oltp = - cr::Worker::My().mStore->mCRManager->mGlobalWmkInfo.mOldestActiveShortTx.load(); + cr::WorkerContext::My().mStore->mCRManager->mGlobalWmkInfo.mOldestActiveShortTx.load(); const TXID local_newest_olap = - cr::Worker::My().mStore->mCRManager->mGlobalWmkInfo.mNewestActiveLongTx.load(); + cr::WorkerContext::My().mStore->mCRManager->mGlobalWmkInfo.mNewestActiveLongTx.load(); if (deltasVisibleForAll == 0 && local_newest_olap > local_oldest_oltp) { return; // Nothing to do here } @@ -355,7 +355,7 @@ void FatTuple::Append(UpdateDesc& updateDesc) { std::tuple FatTuple::GetVisibleTuple(ValCallback valCallback) const { // Latest version is visible - if (cr::Worker::My().mCc.VisibleForMe(mWorkerId, mTxId)) { + if (cr::WorkerContext::My().mCc.VisibleForMe(mWorkerId, mTxId)) { valCallback(GetValue()); return {OpCode::kOK, 1}; } @@ -372,7 +372,7 @@ std::tuple FatTuple::GetVisibleTuple(ValCallback valCallback) const auto& updateDesc = delta.GetUpdateDesc(); auto* xorData = delta.GetDeltaPtr(); BasicKV::CopyToValue(updateDesc, xorData, copiedVal->get()); - if (cr::Worker::My().mCc.VisibleForMe(delta.mWorkerId, delta.mTxId)) { + if (cr::WorkerContext::My().mCc.VisibleForMe(delta.mWorkerId, delta.mTxId)) { valCallback(Slice(copiedVal->get(), mValSize)); return {OpCode::kOK, numVisitedVersions}; } @@ -420,7 +420,7 @@ void FatTuple::ConvertToChained(TREEID treeId) { auto& updateDesc = delta.GetUpdateDesc(); auto sizeOfDescAndDelta = updateDesc.SizeWithDelta(); auto versionSize = sizeOfDescAndDelta + sizeof(UpdateVersion); - cr::Worker::My() + cr::WorkerContext::My() .mCc.Other(prevWorkerId) .mHistoryStorage.PutVersion( prevTxId, prevCommandId, treeId, false, versionSize, diff --git a/src/btree/core/BTreeGeneric.cpp b/src/btree/core/BTreeGeneric.cpp index 66dd9880..93c9d1f3 100644 --- a/src/btree/core/BTreeGeneric.cpp +++ b/src/btree/core/BTreeGeneric.cpp @@ -438,7 +438,7 @@ BTreeGeneric::XMergeReturnCode BTreeGeneric::XMerge(GuardedBufferFramemStoreOption->mXMergeK; + const int64_t maxMergePages = mStore->mStoreOption->mXMergeK; GuardedBufferFrame guardedNodes[maxMergePages]; bool fullyMerged[maxMergePages]; diff --git a/src/concurrency/CRManager.cpp b/src/concurrency/CRManager.cpp index 8fe5cb41..eb550f5a 100644 --- a/src/concurrency/CRManager.cpp +++ b/src/concurrency/CRManager.cpp @@ -4,7 +4,7 @@ #include "leanstore/btree/BasicKV.hpp" #include "leanstore/concurrency/GroupCommitter.hpp" #include "leanstore/concurrency/HistoryStorage.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/concurrency/WorkerThread.hpp" #include "leanstore/utils/Log.hpp" @@ -16,7 +16,7 @@ namespace leanstore::cr { CRManager::CRManager(leanstore::LeanStore* store) : mStore(store), mGroupCommitter(nullptr) { auto* storeOption = store->mStoreOption; // start all worker threads - mWorkers.resize(storeOption->mWorkerThreads); + mWorkerCtxs.resize(storeOption->mWorkerThreads); mWorkerThreads.reserve(storeOption->mWorkerThreads); for (uint64_t workerId = 0; workerId < storeOption->mWorkerThreads; workerId++) { auto workerThread = std::make_unique(store, workerId, workerId); @@ -24,9 +24,9 @@ CRManager::CRManager(leanstore::LeanStore* store) : mStore(store), mGroupCommitt // create thread-local transaction executor on each worker thread workerThread->SetJob([&]() { - Worker::sTlsWorker = std::make_unique(workerId, mWorkers, mStore); - Worker::sTlsWorkerRaw = Worker::sTlsWorker.get(); - mWorkers[workerId] = Worker::sTlsWorker.get(); + WorkerContext::sTlsWorkerCtx = std::make_unique(workerId, mWorkerCtxs, mStore); + WorkerContext::sTlsWorkerCtxRaw = WorkerContext::sTlsWorkerCtx.get(); + mWorkerCtxs[workerId] = WorkerContext::sTlsWorkerCtx.get(); }); workerThread->Wait(); mWorkerThreads.emplace_back(std::move(workerThread)); @@ -35,7 +35,7 @@ CRManager::CRManager(leanstore::LeanStore* store) : mStore(store), mGroupCommitt // start group commit thread if (mStore->mStoreOption->mEnableWal) { const int cpu = storeOption->mWorkerThreads; - mGroupCommitter = std::make_unique(mStore, mStore->mWalFd, mWorkers, cpu); + mGroupCommitter = std::make_unique(mStore, mStore->mWalFd, mWorkerCtxs, cpu); mGroupCommitter->Start(); } @@ -82,8 +82,8 @@ void CRManager::setupHistoryStorage4EachWorker() { removeBtreeName, res.error().ToString()); } auto* removeIndex = res.value(); - mWorkers[i]->mCc.mHistoryStorage.SetUpdateIndex(updateIndex); - mWorkers[i]->mCc.mHistoryStorage.SetRemoveIndex(removeIndex); + mWorkerCtxs[i]->mCc.mHistoryStorage.SetUpdateIndex(updateIndex); + mWorkerCtxs[i]->mCc.mHistoryStorage.SetRemoveIndex(removeIndex); } } diff --git a/src/concurrency/ConcurrencyControl.cpp b/src/concurrency/ConcurrencyControl.cpp index 551f37e4..3aa02d5f 100644 --- a/src/concurrency/ConcurrencyControl.cpp +++ b/src/concurrency/ConcurrencyControl.cpp @@ -4,7 +4,7 @@ #include "leanstore/Units.hpp" #include "leanstore/buffer-manager/TreeRegistry.hpp" #include "leanstore/concurrency/CRManager.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/profiling/counters/WorkerCounters.hpp" #include "leanstore/utils/Defer.hpp" #include "leanstore/utils/Log.hpp" @@ -27,8 +27,8 @@ void CommitTree::AppendCommitLog(TXID startTs, TXID commitTs) { utils::Timer timer(CRCounters::MyCounters().cc_ms_committing); std::unique_lock xGuard(mMutex); mCommitLog.push_back({commitTs, startTs}); - LS_DLOG("Commit log appended, workerId={}, startTs={}, commitTs={}", Worker::My().mWorkerId, - startTs, commitTs); + LS_DLOG("Commit log appended, workerId={}, startTs={}, commitTs={}", + WorkerContext::My().mWorkerId, startTs, commitTs); } void CommitTree::CompactCommitLog() { @@ -44,9 +44,9 @@ void CommitTree::CompactCommitLog() { // workers can see the latest commitTs of this worker. set.insert(mCommitLog[mCommitLog.size() - 1]); - const WORKERID myWorkerId = Worker::My().mWorkerId; - auto allWorkers = Worker::My().mAllWorkers; - for (WORKERID i = 0; i < Worker::My().mAllWorkers.size(); i++) { + const WORKERID myWorkerId = WorkerContext::My().mWorkerId; + auto allWorkers = WorkerContext::My().mAllWorkers; + for (WORKERID i = 0; i < WorkerContext::My().mAllWorkers.size(); i++) { if (i == myWorkerId) { continue; } @@ -58,7 +58,7 @@ void CommitTree::CompactCommitLog() { continue; } - activeTxId &= Worker::kCleanBitsMask; + activeTxId &= WorkerContext::kCleanBitsMask; if (auto result = lcbNoLatch(activeTxId); result) { set.insert(*result); } @@ -72,8 +72,8 @@ void CommitTree::CompactCommitLog() { } DEBUG_BLOCK() { - LS_DLOG("Commit log cleaned up, workerId={}, mCommitLog.size()={}", Worker::My().mWorkerId, - mCommitLog.size()); + LS_DLOG("Commit log cleaned up, workerId={}, mCommitLog.size()={}", + WorkerContext::My().mWorkerId, mCommitLog.size()); } } @@ -104,7 +104,7 @@ std::optional> CommitTree::lcbNoLatch(TXID startTs) { COMMANDID ConcurrencyControl::PutVersion(TREEID treeId, bool isRemoveCommand, uint64_t versionSize, std::function putCallBack) { utils::Timer timer(CRCounters::MyCounters().cc_ms_history_tree_insert); - auto& curWorker = Worker::My(); + auto& curWorker = WorkerContext::My(); auto commandId = curWorker.mCommandId++; if (isRemoveCommand) { commandId |= kRemoveCommandMark; @@ -116,7 +116,7 @@ COMMANDID ConcurrencyControl::PutVersion(TREEID treeId, bool isRemoveCommand, ui bool ConcurrencyControl::VisibleForMe(WORKERID workerId, TXID txId) { // visible if writtern by me - if (Worker::My().mWorkerId == workerId) { + if (WorkerContext::My().mWorkerId == workerId) { return true; } @@ -124,7 +124,7 @@ bool ConcurrencyControl::VisibleForMe(WORKERID workerId, TXID txId) { case IsolationLevel::kSnapshotIsolation: case IsolationLevel::kSerializable: { // mGlobalWmkOfAllTx is copied from global watermark info at the beginning of each transaction. - // Global watermarks are occassionally updated by Worker::updateGlobalTxWatermarks, it's + // Global watermarks are occassionally updated by WorkerContext::updateGlobalTxWatermarks, it's // possible that mGlobalWmkOfAllTx is not the latest value, but it is always safe to use it as // the lower bound of the visibility check. if (txId < mGlobalWmkOfAllTx) { @@ -182,12 +182,12 @@ void ConcurrencyControl::GarbageCollection() { if (mCleanedWmkOfShortTx <= mLocalWmkOfAllTx) { utils::Timer timer(CRCounters::MyCounters().cc_ms_gc_history_tree); LS_DLOG("Garbage collect history tree, workerId={}, fromTxId={}, toTxId(mLocalWmkOfAllTx)={}", - Worker::My().mWorkerId, 0, mLocalWmkOfAllTx); + WorkerContext::My().mWorkerId, 0, mLocalWmkOfAllTx); mHistoryStorage.PurgeVersions( 0, mLocalWmkOfAllTx, [&](const TXID versionTxId, const TREEID treeId, const uint8_t* versionData, uint64_t versionSize [[maybe_unused]], const bool calledBefore) { - mStore->mTreeRegistry->GarbageCollect(treeId, versionData, Worker::My().mWorkerId, + mStore->mTreeRegistry->GarbageCollect(treeId, versionData, WorkerContext::My().mWorkerId, versionTxId, calledBefore); COUNTERS_BLOCK() { WorkerCounters::MyCounters().cc_gc_long_tx_executed[treeId]++; @@ -198,7 +198,7 @@ void ConcurrencyControl::GarbageCollection() { } else { LS_DLOG("Skip garbage collect history tree, workerId={}, " "mCleanedWmkOfShortTx={}, mLocalWmkOfAllTx={}", - Worker::My().mWorkerId, mCleanedWmkOfShortTx, mLocalWmkOfAllTx); + WorkerContext::My().mWorkerId, mCleanedWmkOfShortTx, mLocalWmkOfAllTx); } // move tombstones to graveyard @@ -207,12 +207,12 @@ void ConcurrencyControl::GarbageCollection() { utils::Timer timer(CRCounters::MyCounters().cc_ms_gc_graveyard); LS_DLOG("Garbage collect graveyard, workerId={}, fromTxId={}, " "toTxId(mLocalWmkOfShortTx)={}", - Worker::My().mWorkerId, mCleanedWmkOfShortTx, mLocalWmkOfShortTx); + WorkerContext::My().mWorkerId, mCleanedWmkOfShortTx, mLocalWmkOfShortTx); mHistoryStorage.VisitRemovedVersions( mCleanedWmkOfShortTx, mLocalWmkOfShortTx, [&](const TXID versionTxId, const TREEID treeId, const uint8_t* versionData, uint64_t, const bool calledBefore) { - mStore->mTreeRegistry->GarbageCollect(treeId, versionData, Worker::My().mWorkerId, + mStore->mTreeRegistry->GarbageCollect(treeId, versionData, WorkerContext::My().mWorkerId, versionTxId, calledBefore); COUNTERS_BLOCK() { WorkerCounters::MyCounters().cc_todo_oltp_executed[treeId]++; @@ -222,12 +222,12 @@ void ConcurrencyControl::GarbageCollection() { } else { LS_DLOG("Skip garbage collect graveyard, workerId={}, " "mCleanedWmkOfShortTx={}, mLocalWmkOfShortTx={}", - Worker::My().mWorkerId, mCleanedWmkOfShortTx, mLocalWmkOfShortTx); + WorkerContext::My().mWorkerId, mCleanedWmkOfShortTx, mLocalWmkOfShortTx); } } ConcurrencyControl& ConcurrencyControl::Other(WORKERID otherWorkerId) { - return Worker::My().mAllWorkers[otherWorkerId]->mCc; + return WorkerContext::My().mAllWorkers[otherWorkerId]->mCc; } // It calculates and updates the global oldest running transaction id and the @@ -246,8 +246,9 @@ void ConcurrencyControl::updateGlobalTxWatermarks() { } utils::Timer timer(CRCounters::MyCounters().cc_ms_refresh_global_state); - auto meetGcProbability = mStore->mStoreOption->mEnableEagerGc || - utils::RandomGenerator::RandU64(0, Worker::My().mAllWorkers.size()) == 0; + auto meetGcProbability = + mStore->mStoreOption->mEnableEagerGc || + utils::RandomGenerator::RandU64(0, WorkerContext::My().mAllWorkers.size()) == 0; auto performGc = meetGcProbability && mStore->mCRManager->mGlobalWmkInfo.mGlobalMutex.try_lock(); if (!performGc) { LS_DLOG("Skip updating global watermarks, meetGcProbability={}, performGc={}", @@ -266,20 +267,20 @@ void ConcurrencyControl::updateGlobalTxWatermarks() { TXID oldestTxId = std::numeric_limits::max(); TXID newestLongTxId = std::numeric_limits::min(); TXID oldestShortTxId = std::numeric_limits::max(); - auto allWorkers = Worker::My().mAllWorkers; - for (WORKERID i = 0; i < Worker::My().mAllWorkers.size(); i++) { + auto allWorkers = WorkerContext::My().mAllWorkers; + for (WORKERID i = 0; i < WorkerContext::My().mAllWorkers.size(); i++) { auto activeTxId = allWorkers[i]->mActiveTxId.load(); // Skip transactions not running. if (activeTxId == 0) { continue; } // Skip transactions running in read-committed mode. - if (activeTxId & Worker::kRcBit) { + if (activeTxId & WorkerContext::kRcBit) { continue; } - bool isLongRunningTx = activeTxId & Worker::kLongRunningBit; - activeTxId &= Worker::kCleanBitsMask; + bool isLongRunningTx = activeTxId & WorkerContext::kLongRunningBit; + activeTxId &= WorkerContext::kCleanBitsMask; oldestTxId = std::min(activeTxId, oldestTxId); if (isLongRunningTx) { newestLongTxId = std::max(activeTxId, newestLongTxId); @@ -303,7 +304,7 @@ void ConcurrencyControl::updateGlobalTxWatermarks() { // Update global lower watermarks based on the three transaction ids TXID globalWmkOfAllTx = std::numeric_limits::max(); TXID globalWmkOfShortTx = std::numeric_limits::max(); - for (WORKERID i = 0; i < Worker::My().mAllWorkers.size(); i++) { + for (WORKERID i = 0; i < WorkerContext::My().mAllWorkers.size(); i++) { ConcurrencyControl& mCc = Other(i); if (mCc.mUpdatedLatestCommitTs == mCc.mLatestCommitTs) { LS_DLOG("Skip updating watermarks for worker {}, no transaction " @@ -368,7 +369,7 @@ void ConcurrencyControl::updateGlobalTxWatermarks() { void ConcurrencyControl::updateLocalWatermarks() { SCOPED_DEFER(LS_DLOG("Local watermarks updated, workerId={}, " "mLocalWmkOfAllTx={}, mLocalWmkOfShortTx={}", - Worker::My().mWorkerId, mLocalWmkOfAllTx, mLocalWmkOfShortTx)); + WorkerContext::My().mWorkerId, mLocalWmkOfAllTx, mLocalWmkOfShortTx)); while (true) { uint64_t version = mWmkVersion.load(); @@ -390,7 +391,7 @@ void ConcurrencyControl::updateLocalWatermarks() { "Lower watermark of all transactions should be no higher than the lower " "watermark of short-running transactions, workerId={}, " "mLocalWmkOfAllTx={}, mLocalWmkOfShortTx={}", - Worker::My().mWorkerId, mLocalWmkOfAllTx, mLocalWmkOfShortTx); + WorkerContext::My().mWorkerId, mLocalWmkOfAllTx, mLocalWmkOfShortTx); } } // namespace leanstore::cr diff --git a/src/concurrency/GroupCommitter.cpp b/src/concurrency/GroupCommitter.cpp index a97d7b28..0cbd9faa 100644 --- a/src/concurrency/GroupCommitter.cpp +++ b/src/concurrency/GroupCommitter.cpp @@ -1,7 +1,7 @@ #include "leanstore/concurrency/GroupCommitter.hpp" #include "leanstore/concurrency/CRManager.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/profiling/counters/CPUCounters.hpp" #include "leanstore/telemetry/MetricOnlyTimer.hpp" #include "telemetry/MetricsManager.hpp" @@ -24,8 +24,8 @@ void GroupCommitter::runImpl() { uint64_t minFlushedGSN = std::numeric_limits::max(); uint64_t maxFlushedGSN = 0; TXID minFlushedTxId = std::numeric_limits::max(); - std::vector numRfaTxs(mWorkers.size(), 0); - std::vector walFlushReqCopies(mWorkers.size()); + std::vector numRfaTxs(mWorkerCtxs.size(), 0); + std::vector walFlushReqCopies(mWorkerCtxs.size()); while (mKeepRunning) { // phase 1 @@ -54,8 +54,8 @@ void GroupCommitter::collectWalRecords(uint64_t& minFlushedGSN, uint64_t& maxFlu maxFlushedGSN = 0; minFlushedTxId = std::numeric_limits::max(); - for (uint32_t workerId = 0; workerId < mWorkers.size(); workerId++) { - auto& logging = mWorkers[workerId]->mLogging; + for (uint32_t workerId = 0; workerId < mWorkerCtxs.size(); workerId++) { + auto& logging = mWorkerCtxs[workerId]->mLogging; // collect logging info std::unique_lock guard(logging.mRfaTxToCommitMutex); numRfaTxs[workerId] = logging.mRfaTxToCommit.size(); @@ -127,8 +127,8 @@ void GroupCommitter::determineCommitableTx(uint64_t minFlushedGSN, uint64_t maxF METRIC_HIST_OBSERVE(mStore->mMetricsManager, group_committer_commit_txs_us, timer.ElaspedUs()); }); - for (WORKERID workerId = 0; workerId < mWorkers.size(); workerId++) { - auto& logging = mWorkers[workerId]->mLogging; + for (WORKERID workerId = 0; workerId < mWorkerCtxs.size(); workerId++) { + auto& logging = mWorkerCtxs[workerId]->mLogging; const auto& reqCopy = walFlushReqCopies[workerId]; // update the flushed commit TS info diff --git a/src/concurrency/Logging.cpp b/src/concurrency/Logging.cpp index d858fd84..77e49f6e 100644 --- a/src/concurrency/Logging.cpp +++ b/src/concurrency/Logging.cpp @@ -2,7 +2,7 @@ #include "leanstore/Exceptions.hpp" #include "leanstore/concurrency/WalEntry.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/profiling/counters/WorkerCounters.hpp" #include "leanstore/utils/Log.hpp" #include "utils/ToJson.hpp" @@ -59,8 +59,9 @@ void Logging::WriteWalTxAbort() { mWalBuffered += size; publishWalFlushReq(); - LS_DLOG("WriteWalTxAbort, workerId={}, startTs={}, curGSN={}, walJson={}", Worker::My().mWorkerId, - Worker::My().mActiveTx.mStartTs, GetCurrentGsn(), utils::ToJsonString(entry)); + LS_DLOG("WriteWalTxAbort, workerId={}, startTs={}, curGSN={}, walJson={}", + WorkerContext::My().mWorkerId, WorkerContext::My().mActiveTx.mStartTs, GetCurrentGsn(), + utils::ToJsonString(entry)); } void Logging::WriteWalTxFinish() { @@ -71,14 +72,14 @@ void Logging::WriteWalTxFinish() { // Initialize a WalTxFinish auto* data = mWalBuffer + mWalBuffered; std::memset(data, 0, size); - auto* entry [[maybe_unused]] = new (data) WalTxFinish(Worker::My().mActiveTx.mStartTs); + auto* entry [[maybe_unused]] = new (data) WalTxFinish(WorkerContext::My().mActiveTx.mStartTs); // Submit the WalTxAbort to group committer mWalBuffered += size; publishWalFlushReq(); LS_DLOG("WriteWalTxFinish, workerId={}, startTs={}, curGSN={}, walJson={}", - Worker::My().mWorkerId, Worker::My().mActiveTx.mStartTs, GetCurrentGsn(), + WorkerContext::My().mWorkerId, WorkerContext::My().mActiveTx.mStartTs, GetCurrentGsn(), utils::ToJsonString(entry)); } @@ -100,8 +101,8 @@ void Logging::SubmitWALEntryComplex(uint64_t totalSize) { COUNTERS_BLOCK() { WorkerCounters::MyCounters().wal_write_bytes += totalSize; } - LS_DLOG("SubmitWal, workerId={}, startTs={}, curGSN={}, walJson={}", Worker::My().mWorkerId, - Worker::My().mActiveTx.mStartTs, GetCurrentGsn(), + LS_DLOG("SubmitWal, workerId={}, startTs={}, curGSN={}, walJson={}", + WorkerContext::My().mWorkerId, WorkerContext::My().mActiveTx.mStartTs, GetCurrentGsn(), utils::ToJsonString(mActiveWALEntryComplex)); } @@ -110,7 +111,7 @@ void Logging::publishWalBufferedOffset() { } void Logging::publishWalFlushReq() { - WalFlushReq current(mWalBuffered, GetCurrentGsn(), Worker::My().mActiveTx.mStartTs); + WalFlushReq current(mWalBuffered, GetCurrentGsn(), WorkerContext::My().mActiveTx.mStartTs); mWalFlushReq.Set(current); } diff --git a/src/concurrency/Worker.cpp b/src/concurrency/WorkerContext.cpp similarity index 93% rename from src/concurrency/Worker.cpp rename to src/concurrency/WorkerContext.cpp index cf0c2041..8a028bc8 100644 --- a/src/concurrency/Worker.cpp +++ b/src/concurrency/WorkerContext.cpp @@ -1,4 +1,4 @@ -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/LeanStore.hpp" #include "leanstore/buffer-manager/TreeRegistry.hpp" @@ -19,10 +19,11 @@ namespace leanstore::cr { -thread_local std::unique_ptr Worker::sTlsWorker = nullptr; -thread_local Worker* Worker::sTlsWorkerRaw = nullptr; +thread_local std::unique_ptr WorkerContext::sTlsWorkerCtx = nullptr; +thread_local WorkerContext* WorkerContext::sTlsWorkerCtxRaw = nullptr; -Worker::Worker(uint64_t workerId, std::vector& allWorkers, leanstore::LeanStore* store) +WorkerContext::WorkerContext(uint64_t workerId, std::vector& allWorkers, + leanstore::LeanStore* store) : mStore(store), mCc(store, allWorkers.size()), mActiveTxId(0), @@ -39,12 +40,12 @@ Worker::Worker(uint64_t workerId, std::vector& allWorkers, leanstore::L mCc.mLcbCacheKey = std::make_unique(mAllWorkers.size()); } -Worker::~Worker() { +WorkerContext::~WorkerContext() { free(mLogging.mWalBuffer); mLogging.mWalBuffer = nullptr; } -void Worker::StartTx(TxMode mode, IsolationLevel level, bool isReadOnly) { +void WorkerContext::StartTx(TxMode mode, IsolationLevel level, bool isReadOnly) { Transaction prevTx [[maybe_unused]] = mActiveTx; LS_DCHECK(prevTx.mState != TxState::kStarted, "Previous transaction not ended, workerId={}, startTs={}, txState={}", mWorkerId, @@ -108,7 +109,7 @@ void Worker::StartTx(TxMode mode, IsolationLevel level, bool isReadOnly) { mCc.mCommitTree.CompactCommitLog(); } -void Worker::CommitTx() { +void WorkerContext::CommitTx() { SCOPED_DEFER(mActiveTx.mState = TxState::kCommitted); if (!mActiveTx.mIsDurable) { @@ -180,7 +181,7 @@ void Worker::CommitTx() { //! transaction //! //! It may share the same code with the recovery process? -void Worker::AbortTx() { +void WorkerContext::AbortTx() { SCOPED_DEFER({ mActiveTx.mState = TxState::kAborted; METRIC_COUNTER_INC(mStore->mMetricsManager, tx_abort_total, 1); diff --git a/src/leanstore-c/StoreOption.cpp b/src/leanstore-c/StoreOption.cpp index 05e4ca3f..ce4904a8 100644 --- a/src/leanstore-c/StoreOption.cpp +++ b/src/leanstore-c/StoreOption.cpp @@ -11,7 +11,7 @@ static const StoreOption kDefaultStoreOption = { // log related options .mLogLevel = LogLevel::kInfo, - // Worker thread related options + // WorkerContext thread related options .mWorkerThreads = 4, .mWalBufferSize = 10 * 1024 * 1024, diff --git a/src/leanstore-c/leanstore-c.cpp b/src/leanstore-c/leanstore-c.cpp index ed761346..d6234b67 100644 --- a/src/leanstore-c/leanstore-c.cpp +++ b/src/leanstore-c/leanstore-c.cpp @@ -6,10 +6,14 @@ #include "leanstore/Slice.hpp" #include "leanstore/btree/BasicKV.hpp" #include "leanstore/btree/core/PessimisticSharedIterator.hpp" +#include "telemetry/MetricsHttpExposer.hpp" #include +#include #include #include +#include +#include #include #include @@ -25,11 +29,13 @@ String* CreateString(const char* data, uint64_t size) { if (data == nullptr || size == 0) { str->mData = nullptr; str->mSize = 0; + str->mCapacity = 0; return str; } // allocate memory, copy data str->mSize = size; + str->mCapacity = size + 1; str->mData = new char[size + 1]; memcpy(str->mData, data, size); str->mData[size] = '\0'; @@ -46,6 +52,7 @@ void DestroyString(String* str) { str->mData = nullptr; str->mSize = 0; + str->mCapacity = 0; // release the string object delete str; @@ -123,15 +130,32 @@ bool BasicKvInsert(BasicKvHandle* handle, uint64_t workerId, StringSlice key, St return succeed; } -String* BasicKvLookup(BasicKvHandle* handle, uint64_t workerId, StringSlice key) { - String* val{nullptr}; +bool BasicKvLookup(BasicKvHandle* handle, uint64_t workerId, StringSlice key, String** val) { + bool found = false; handle->mStore->ExecSync(workerId, [&]() { + // copy value out to a thread-local buffer to reduce memory allocation auto copyValueOut = [&](leanstore::Slice valSlice) { - val = CreateString(reinterpret_cast(valSlice.data()), valSlice.size()); + // set the found flag + found = true; + + // create a new string if the value is out of the buffer size + if ((**val).mCapacity < valSlice.size() + 1) { + DestroyString(*val); + *val = CreateString(reinterpret_cast(valSlice.data()), valSlice.size()); + return; + } + + // copy data to the buffer + (**val).mSize = valSlice.size(); + memcpy((**val).mData, valSlice.data(), valSlice.size()); + (**val).mData[valSlice.size()] = '\0'; }; + + // lookup the key handle->mBtree->Lookup(leanstore::Slice(key.mData, key.mSize), std::move(copyValueOut)); }); - return val; + + return found; } bool BasicKvRemove(BasicKvHandle* handle, uint64_t workerId, StringSlice key) { @@ -247,3 +271,24 @@ StringSlice BasicKvIterVal(BasicKvIterHandle* handle) { auto valSlice = handle->mIterator.Val(); return {reinterpret_cast(valSlice.data()), valSlice.size()}; } + +//------------------------------------------------------------------------------ +// Interfaces for metrics +//------------------------------------------------------------------------------ + +static leanstore::telemetry::MetricsHttpExposer* sGlobalMetricsHttpExposer = nullptr; +static std::mutex sGlobalMetricsHttpExposerMutex; + +void StartMetricsHttpExposer(int32_t port) { + std::unique_lock guard{sGlobalMetricsHttpExposerMutex}; + sGlobalMetricsHttpExposer = new leanstore::telemetry::MetricsHttpExposer(nullptr, port); + sGlobalMetricsHttpExposer->Start(); +} + +void StopMetricsHttpExposer() { + std::unique_lock guard{sGlobalMetricsHttpExposerMutex}; + if (sGlobalMetricsHttpExposer != nullptr) { + delete sGlobalMetricsHttpExposer; + sGlobalMetricsHttpExposer = nullptr; + } +} diff --git a/src/telemetry/MetricsHttpExposer.cpp b/src/telemetry/MetricsHttpExposer.cpp index 751faa61..45fee57c 100644 --- a/src/telemetry/MetricsHttpExposer.cpp +++ b/src/telemetry/MetricsHttpExposer.cpp @@ -5,8 +5,12 @@ namespace leanstore::telemetry { MetricsHttpExposer::MetricsHttpExposer(LeanStore* store) + : leanstore::telemetry::MetricsHttpExposer(store, store->mStoreOption->mMetricsPort) { +} + +MetricsHttpExposer::MetricsHttpExposer(LeanStore* store, int32_t port) : UserThread(store, "MetricsExposer"), - mPort(mStore->mStoreOption->mMetricsPort) { + mPort(port) { mServer.new_task_queue = [] { return new httplib::ThreadPool(1); }; mServer.Get("/metrics", [&](const httplib::Request& req, httplib::Response& res) { handleMetrics(req, res); diff --git a/src/telemetry/MetricsHttpExposer.hpp b/src/telemetry/MetricsHttpExposer.hpp index fa903e1d..c869753e 100644 --- a/src/telemetry/MetricsHttpExposer.hpp +++ b/src/telemetry/MetricsHttpExposer.hpp @@ -24,6 +24,8 @@ class MetricsHttpExposer : public utils::UserThread { public: MetricsHttpExposer(LeanStore* store); + MetricsHttpExposer(LeanStore* store, int32_t port); + ~MetricsHttpExposer() override { mServer.stop(); } @@ -127,6 +129,7 @@ class MetricsHttpExposer : public utils::UserThread { //! The http server httplib::Server mServer; + //! The port to expose metrics int32_t mPort; //! The mutex to protect mCollectable diff --git a/src/telemetry/MetricsManager.hpp b/src/telemetry/MetricsManager.hpp index 49e21a36..43d5d536 100644 --- a/src/telemetry/MetricsManager.hpp +++ b/src/telemetry/MetricsManager.hpp @@ -1,8 +1,6 @@ #pragma once #include "leanstore/Exceptions.hpp" -#include "leanstore/utils/UserThread.hpp" -#include "telemetry/MetricsHttpExposer.hpp" #include #include @@ -118,34 +116,36 @@ const std::vector kBoundariesUs{ //! LeanStore. It's expected to be a singleton inside a LeanStore instance. class MetricsManager { public: - MetricsManager(LeanStore* store) : mExposer(store) { - // create a metrics registry + //! The constructor of the MetricsManager. + MetricsManager() { mRegistry = std::make_shared(); METRIC_COUNTER_LIST(INIT_METRIC_COUNTER); METRIC_HIST_LIST(INIT_METRIC_HIST); - - mExposer.SetCollectable(mRegistry); - } - - void Expose() { - mExposer.Start(); } METRIC_COUNTER_LIST(DEFINE_METRIC_FUNC_COUNTER_INC); METRIC_HIST_LIST(DEFINE_METRIC_FUNC_HIST_OBSERVE); + //! Get the metrics registry + std::shared_ptr GetRegistry() { + return mRegistry; + } + private: + //! Create a counter family prometheus::Family* createCounterFamily(const std::string& metricName, const std::string& help) { return &prometheus::BuildCounter().Name(metricName).Help(help).Register(*mRegistry); } + //! Create a histogram family prometheus::Family* createHistogramFamily(const std::string& metricName, const std::string& help) { return &prometheus::BuildHistogram().Name(metricName).Help(help).Register(*mRegistry); } + //! Create linear buckets static prometheus::Histogram::BucketBoundaries createLinearBuckets(double start, double end, double step) { auto bucketBoundaries = prometheus::Histogram::BucketBoundaries{}; @@ -155,10 +155,9 @@ class MetricsManager { return bucketBoundaries; } + //! The registry for all the metrics std::shared_ptr mRegistry; - MetricsHttpExposer mExposer; - METRIC_COUNTER_LIST(DECLARE_METRIC_COUNTER); METRIC_HIST_LIST(DECLARE_METRIC_HIST); }; diff --git a/tests/LongRunningTxTest.cpp b/tests/LongRunningTxTest.cpp index aa73dd3c..7d11f20f 100644 --- a/tests/LongRunningTxTest.cpp +++ b/tests/LongRunningTxTest.cpp @@ -6,7 +6,7 @@ #include "leanstore/buffer-manager/BufferManager.hpp" #include "leanstore/concurrency/CRManager.hpp" #include "leanstore/concurrency/HistoryStorage.hpp" -#include "leanstore/concurrency/Worker.hpp" +#include "leanstore/concurrency/WorkerContext.hpp" #include "leanstore/utils/Defer.hpp" #include "leanstore/utils/RandomGenerator.hpp" @@ -46,7 +46,7 @@ class LongRunningTxTest : public ::testing::Test { ASSERT_TRUE(res); mStore = std::move(res.value()); - // Worker 0, create a btree for test + // WorkerContext 0, create a btree for test mTreeName = RandomGenerator::RandAlphString(10); mStore->ExecSync(0, [&]() { auto res = mStore->CreateTransactionKV(mTreeName); @@ -55,25 +55,25 @@ class LongRunningTxTest : public ::testing::Test { ASSERT_NE(mKv, nullptr); }); - // Worker 0, do extra insert and remove transactions in worker 0 to make it + // WorkerContext 0, do extra insert and remove transactions in worker 0 to make it // have more than one entries in the commit log, which helps to advance the // global lower watermarks for garbage collection mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); ASSERT_EQ(mKv->Insert(ToSlice("0"), ToSlice("0")), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); ASSERT_EQ(mKv->Remove(ToSlice("0")), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } void TearDown() override { - // Worker 0, remove the btree + // WorkerContext 0, remove the btree mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); mStore->DropTransactionKV(mTreeName); }); } @@ -90,19 +90,19 @@ TEST_F(LongRunningTxTest, LookupFromGraveyard) { // Insert 2 key-values as the test base. mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(mKv->Insert(key1, ToSlice(val1)), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(mKv->Insert(key2, ToSlice(val2)), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); - mStore->ExecSync(1, [&]() { cr::Worker::My().StartTx(); }); + mStore->ExecSync(1, [&]() { cr::WorkerContext::My().StartTx(); }); mStore->ExecSync(2, [&]() { - cr::Worker::My().StartTx(TxMode::kLongRunning); + cr::WorkerContext::My().StartTx(TxMode::kLongRunning); // get the old value in worker 2 EXPECT_EQ(mKv->Lookup(key1, copyValue), OpCode::kOK); @@ -130,7 +130,7 @@ TEST_F(LongRunningTxTest, LookupFromGraveyard) { // commit the transaction in worker 1, after garbage collection when // committing the transaction, tombstones should be moved to the graveyard. mStore->ExecSync(1, [&]() { - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(mKv->mGraveyard->CountEntries(), 2u); }); @@ -143,13 +143,13 @@ TEST_F(LongRunningTxTest, LookupFromGraveyard) { EXPECT_EQ(copiedVal, val2); // commit the transaction in worker 2 - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); // now worker 2 can not get the old value mStore->ExecSync(2, [&]() { - cr::Worker::My().StartTx(TxMode::kLongRunning, IsolationLevel::kSnapshotIsolation); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(TxMode::kLongRunning, IsolationLevel::kSnapshotIsolation); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); EXPECT_EQ(mKv->Lookup(key1, copyValue), OpCode::kNotFound); EXPECT_EQ(mKv->Lookup(key2, copyValue), OpCode::kNotFound); @@ -166,21 +166,21 @@ TEST_F(LongRunningTxTest, LookupAfterUpdate100Times) { // Work 1, insert 2 key-values as the test base mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(mKv->Insert(key1, ToSlice(val1)), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(mKv->Insert(key2, ToSlice(val2)), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); - // Worker 1, start a short-running transaction - mStore->ExecSync(1, [&]() { cr::Worker::My().StartTx(); }); + // WorkerContext 1, start a short-running transaction + mStore->ExecSync(1, [&]() { cr::WorkerContext::My().StartTx(); }); - // Worker 2, start a long-running transaction, lookup, get the old value + // WorkerContext 2, start a long-running transaction, lookup, get the old value mStore->ExecSync(2, [&]() { - cr::Worker::My().StartTx(TxMode::kLongRunning); + cr::WorkerContext::My().StartTx(TxMode::kLongRunning); EXPECT_EQ(mKv->Lookup(key1, copyValue), OpCode::kOK); EXPECT_EQ(copiedVal, val1); @@ -189,7 +189,7 @@ TEST_F(LongRunningTxTest, LookupAfterUpdate100Times) { EXPECT_EQ(copiedVal, val2); }); - // Worker 1, update key1 100 times with random values + // WorkerContext 1, update key1 100 times with random values std::string newVal; mStore->ExecSync(1, [&]() { auto updateDescBufSize = UpdateDesc::Size(1); @@ -210,7 +210,7 @@ TEST_F(LongRunningTxTest, LookupAfterUpdate100Times) { } }); - // Worker 2, lookup, get the old value + // WorkerContext 2, lookup, get the old value mStore->ExecSync(2, [&]() { EXPECT_EQ(mKv->Lookup(key1, copyValue), OpCode::kOK); EXPECT_EQ(copiedVal, val1); @@ -219,19 +219,19 @@ TEST_F(LongRunningTxTest, LookupAfterUpdate100Times) { EXPECT_EQ(copiedVal, val2); }); - // Worker 1, commit the transaction, graveyard should be empty, update history + // WorkerContext 1, commit the transaction, graveyard should be empty, update history // trees should have 100 versions mStore->ExecSync(1, [&]() { - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(mKv->mGraveyard->CountEntries(), 0u); - auto* updateTree = cr::Worker::My().mCc.mHistoryStorage.GetUpdateIndex(); - auto* removeTree = cr::Worker::My().mCc.mHistoryStorage.GetRemoveIndex(); + auto* updateTree = cr::WorkerContext::My().mCc.mHistoryStorage.GetUpdateIndex(); + auto* removeTree = cr::WorkerContext::My().mCc.mHistoryStorage.GetRemoveIndex(); EXPECT_EQ(updateTree->CountEntries(), 100u); EXPECT_EQ(removeTree->CountEntries(), 0u); }); - // Worker 2, lookup, skip the update versions, still get old values, commit + // WorkerContext 2, lookup, skip the update versions, still get old values, commit mStore->ExecSync(2, [&]() { EXPECT_EQ(mKv->Lookup(key1, copyValue), OpCode::kOK); EXPECT_EQ(copiedVal, val1); @@ -240,13 +240,13 @@ TEST_F(LongRunningTxTest, LookupAfterUpdate100Times) { EXPECT_EQ(copiedVal, val2); // commit the transaction in worker 2 - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); - // Worker 2, now get the updated new value + // WorkerContext 2, now get the updated new value mStore->ExecSync(2, [&]() { - cr::Worker::My().StartTx(TxMode::kLongRunning); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(TxMode::kLongRunning); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); EXPECT_EQ(mKv->Lookup(key1, copyValue), OpCode::kOK); EXPECT_EQ(copiedVal, newVal); @@ -279,8 +279,8 @@ TEST_F(LongRunningTxTest, ScanAscFromGraveyard) { // insert the key-values in worker 0 mStore->ExecSync(0, [&]() { for (const auto& [key, val] : kvToTest) { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); EXPECT_EQ(mKv->Insert(key, ToSlice(val)), OpCode::kOK); } }); @@ -294,13 +294,13 @@ TEST_F(LongRunningTxTest, ScanAscFromGraveyard) { return true; }; mStore->ExecSync(2, [&]() { - cr::Worker::My().StartTx(TxMode::kLongRunning); + cr::WorkerContext::My().StartTx(TxMode::kLongRunning); EXPECT_EQ(mKv->ScanAsc(ToSlice(smallestKey), copyKeyVal), OpCode::kOK); }); // remove the key-values in worker 1 mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); for (const auto& [key, val] : kvToTest) { EXPECT_EQ(mKv->Remove(key), OpCode::kOK); } @@ -313,7 +313,7 @@ TEST_F(LongRunningTxTest, ScanAscFromGraveyard) { // commit the transaction in worker 1, all the removed key-values should be // moved to graveyard mStore->ExecSync(1, [&]() { - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(mKv->mGraveyard->CountEntries(), kvToTest.size()); }); @@ -322,13 +322,13 @@ TEST_F(LongRunningTxTest, ScanAscFromGraveyard) { EXPECT_EQ(mKv->ScanAsc(ToSlice(smallestKey), copyKeyVal), OpCode::kOK); // commit the transaction in worker 2 - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); // now worker 2 can not get the old values mStore->ExecSync(2, [&]() { - cr::Worker::My().StartTx(TxMode::kLongRunning); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(TxMode::kLongRunning); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); EXPECT_EQ(mKv->ScanAsc(ToSlice(smallestKey), copyKeyVal), OpCode::kOK); }); } diff --git a/tests/MvccTest.cpp b/tests/MvccTest.cpp index 09bb71a3..85af0a9a 100644 --- a/tests/MvccTest.cpp +++ b/tests/MvccTest.cpp @@ -50,8 +50,8 @@ class MvccTest : public ::testing::Test { void TearDown() override { mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); mStore->DropTransactionKV(mTreeName); }); } @@ -62,10 +62,10 @@ TEST_F(MvccTest, LookupWhileInsert) { auto key0 = RandomGenerator::RandAlphString(42); auto val0 = RandomGenerator::RandAlphString(151); mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = mBTree->Insert(Slice((const uint8_t*)key0.data(), key0.size()), Slice((const uint8_t*)val0.data(), val0.size())); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(res, OpCode::kOK); }); @@ -73,7 +73,7 @@ TEST_F(MvccTest, LookupWhileInsert) { auto key1 = RandomGenerator::RandAlphString(17); auto val1 = RandomGenerator::RandAlphString(131); mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = mBTree->Insert(Slice((const uint8_t*)key1.data(), key1.size()), Slice((const uint8_t*)val1.data(), val1.size())); EXPECT_EQ(res, OpCode::kOK); @@ -86,11 +86,11 @@ TEST_F(MvccTest, LookupWhileInsert) { auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); }; - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(mBTree->Lookup(Slice((const uint8_t*)key0.data(), key0.size()), copyValueOut), OpCode::kOK); EXPECT_EQ(copiedValue, val0); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); // commit the transaction @@ -103,7 +103,7 @@ TEST_F(MvccTest, LookupWhileInsert) { EXPECT_EQ(mBTree->Lookup(Slice((const uint8_t*)key1.data(), key1.size()), copyValueOut), OpCode::kOK); EXPECT_EQ(copiedValue, val1); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); // now we can see the latest record @@ -112,11 +112,11 @@ TEST_F(MvccTest, LookupWhileInsert) { auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); }; - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(mBTree->Lookup(Slice((const uint8_t*)key1.data(), key1.size()), copyValueOut), OpCode::kOK); EXPECT_EQ(copiedValue, val1); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -125,10 +125,10 @@ TEST_F(MvccTest, InsertConflict) { auto key0 = RandomGenerator::RandAlphString(42); auto val0 = RandomGenerator::RandAlphString(151); mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = mBTree->Insert(Slice((const uint8_t*)key0.data(), key0.size()), Slice((const uint8_t*)val0.data(), val0.size())); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(res, OpCode::kOK); }); @@ -136,7 +136,7 @@ TEST_F(MvccTest, InsertConflict) { auto key1 = key0 + "a"; auto val1 = val0; mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = mBTree->Insert(Slice((const uint8_t*)key1.data(), key1.size()), Slice((const uint8_t*)val1.data(), val1.size())); EXPECT_EQ(res, OpCode::kOK); @@ -144,22 +144,22 @@ TEST_F(MvccTest, InsertConflict) { // start another transaction to insert the same key mStore->ExecSync(2, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = mBTree->Insert(Slice((const uint8_t*)key1.data(), key1.size()), Slice((const uint8_t*)val1.data(), val1.size())); EXPECT_EQ(res, OpCode::kAbortTx); - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); }); // start another transaction to insert a smaller key auto key2 = std::string(key0.data(), key0.size() - 1); auto val2 = val0; mStore->ExecSync(2, [&]() { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = mBTree->Insert(Slice((const uint8_t*)key1.data(), key1.size()), Slice((const uint8_t*)val1.data(), val1.size())); EXPECT_EQ(res, OpCode::kAbortTx); - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); }); // commit the transaction @@ -172,7 +172,7 @@ TEST_F(MvccTest, InsertConflict) { EXPECT_EQ(mBTree->Lookup(Slice((const uint8_t*)key1.data(), key1.size()), copyValueOut), OpCode::kOK); EXPECT_EQ(copiedValue, val1); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); // now we can see the latest record @@ -181,11 +181,11 @@ TEST_F(MvccTest, InsertConflict) { auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); }; - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(mBTree->Lookup(Slice((const uint8_t*)key1.data(), key1.size()), copyValueOut), OpCode::kOK); EXPECT_EQ(copiedValue, val1); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } diff --git a/tests/OptimisticGuardedTest.cpp b/tests/OptimisticGuardedTest.cpp index 25dbdbfb..9fd472a6 100644 --- a/tests/OptimisticGuardedTest.cpp +++ b/tests/OptimisticGuardedTest.cpp @@ -39,14 +39,14 @@ class OptimisticGuardedTest : public ::testing::Test { TEST_F(OptimisticGuardedTest, Set) { storage::OptimisticGuarded guardedVal({0, 100}); - // Worker 0, set the guardedVal 100 times + // WorkerContext 0, set the guardedVal 100 times mStore->ExecSync(0, [&]() { for (int64_t i = 0; i < 100; i++) { guardedVal.Set(TestPayload{i, 100 - i}); } }); - // Worker 1, read the guardedVal 200 times + // WorkerContext 1, read the guardedVal 200 times mStore->ExecSync(1, [&]() { TestPayload copiedVal; auto version = guardedVal.Get(copiedVal); @@ -64,14 +64,14 @@ TEST_F(OptimisticGuardedTest, Set) { TEST_F(OptimisticGuardedTest, UpdateAttribute) { storage::OptimisticGuarded guardedVal({0, 100}); - // Worker 0, update the guardedVal 100 times + // WorkerContext 0, update the guardedVal 100 times mStore->ExecSync(0, [&]() { for (int64_t i = 0; i < 100; i++) { guardedVal.UpdateAttribute(&TestPayload::mA, i); } }); - // Worker 1, read the guardedVal 200 times + // WorkerContext 1, read the guardedVal 200 times mStore->ExecSync(1, [&]() { TestPayload copiedVal; auto version = guardedVal.Get(copiedVal); diff --git a/tests/RecoveryTest.cpp b/tests/RecoveryTest.cpp index 104ac514..3ef4b7c2 100644 --- a/tests/RecoveryTest.cpp +++ b/tests/RecoveryTest.cpp @@ -67,8 +67,8 @@ TEST_F(RecoveryTest, SerializeAndDeserialize) { // insert some values mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; EXPECT_EQ(btree->Insert(key, val), OpCode::kOK); @@ -97,8 +97,8 @@ TEST_F(RecoveryTest, SerializeAndDeserialize) { // lookup the restored btree mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); std::string copiedValue; auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); @@ -112,8 +112,8 @@ TEST_F(RecoveryTest, SerializeAndDeserialize) { }); mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); mStore->DropTransactionKV(btreeName); }); @@ -145,12 +145,12 @@ TEST_F(RecoveryTest, RecoverAfterInsert) { EXPECT_NE(btree, nullptr); // insert some values - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; EXPECT_EQ(btree->Insert(key, val), OpCode::kOK); } - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); // skip dumpping buffer frames on exit @@ -171,8 +171,8 @@ TEST_F(RecoveryTest, RecoverAfterInsert) { // lookup the restored btree mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); std::string copiedValue; auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); @@ -232,9 +232,9 @@ TEST_F(RecoveryTest, RecoverAfterUpdate) { // insert some values for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Insert(key, val), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } // update all the values @@ -246,9 +246,9 @@ TEST_F(RecoveryTest, RecoverAfterUpdate) { // update each key 3 times for (auto j = 1u; j <= 3; j++) { val = GenerateValue(j, valSize); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->UpdatePartial(key, updateCallBack, *updateDesc), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } } }); @@ -271,8 +271,8 @@ TEST_F(RecoveryTest, RecoverAfterUpdate) { // lookup the restored btree mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); std::string copiedValue; auto copyValueOut = [&](Slice val) { copiedValue = val.ToString(); }; for (size_t i = 0; i < numKVs; ++i) { @@ -311,17 +311,17 @@ TEST_F(RecoveryTest, RecoverAfterRemove) { // insert some values for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Insert(key, val), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } // remove all the values for (size_t i = 0; i < numKVs; ++i) { auto& [key, val] = kvToTest[i]; - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Remove(key), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } }); @@ -343,8 +343,8 @@ TEST_F(RecoveryTest, RecoverAfterRemove) { // lookup the restored btree mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); std::string copiedValue; auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); diff --git a/tests/TransactionKVTest.cpp b/tests/TransactionKVTest.cpp index 7e9c74f6..57ad5d6f 100644 --- a/tests/TransactionKVTest.cpp +++ b/tests/TransactionKVTest.cpp @@ -73,8 +73,8 @@ TEST_F(TransactionKVTest, Create) { }); mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); mStore->DropTransactionKV(btreeName); mStore->DropTransactionKV(btreeName2); }); @@ -100,20 +100,20 @@ TEST_F(TransactionKVTest, InsertAndLookup) { EXPECT_NE(btree, nullptr); // insert some values - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; EXPECT_EQ(btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())), OpCode::kOK); } - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); // query on the created btree in the same worker mStore->ExecSync(0, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); std::string copiedValue; auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); @@ -128,8 +128,8 @@ TEST_F(TransactionKVTest, InsertAndLookup) { // query on the created btree in another worker mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); std::string copiedValue; auto copyValueOut = [&](Slice val) { copiedValue = std::string((const char*)val.data(), val.size()); @@ -143,8 +143,8 @@ TEST_F(TransactionKVTest, InsertAndLookup) { }); mStore->ExecSync(1, [&]() { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); mStore->DropTransactionKV(btreeName); }); } @@ -163,7 +163,7 @@ TEST_F(TransactionKVTest, Insert1000KVs) { // insert numKVs tuples std::set uniqueKeys; ssize_t numKVs(1000); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); for (ssize_t i = 0; i < numKVs; ++i) { auto key = RandomGenerator::RandAlphString(24); if (uniqueKeys.find(key) != uniqueKeys.end()) { @@ -176,11 +176,11 @@ TEST_F(TransactionKVTest, Insert1000KVs) { Slice((const uint8_t*)val.data(), val.size())), OpCode::kOK); } - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); mStore->DropTransactionKV(btreeName); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -206,24 +206,24 @@ TEST_F(TransactionKVTest, InsertDuplicates) { } uniqueKeys.insert(key); auto val = RandomGenerator::RandAlphString(128); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } // insert duplicated keys for (auto& key : uniqueKeys) { auto val = RandomGenerator::RandAlphString(128); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Insert(ToSlice(key), ToSlice(val)), OpCode::kDuplicated); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); mStore->DropTransactionKV(btreeName); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -250,29 +250,29 @@ TEST_F(TransactionKVTest, Remove) { uniqueKeys.insert(key); auto val = RandomGenerator::RandAlphString(128); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } for (auto& key : uniqueKeys) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Remove(Slice((const uint8_t*)key.data(), key.size())), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } for (auto& key : uniqueKeys) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Lookup(Slice((const uint8_t*)key.data(), key.size()), [](Slice) {}), OpCode::kNotFound); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); mStore->DropTransactionKV(btreeName); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -299,11 +299,11 @@ TEST_F(TransactionKVTest, RemoveNotExisted) { uniqueKeys.insert(key); auto val = RandomGenerator::RandAlphString(128); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } // remove keys not existed @@ -315,14 +315,14 @@ TEST_F(TransactionKVTest, RemoveNotExisted) { } uniqueKeys.insert(key); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Remove(Slice((const uint8_t*)key.data(), key.size())), OpCode::kNotFound); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); mStore->DropTransactionKV(btreeName); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -349,46 +349,46 @@ TEST_F(TransactionKVTest, RemoveFromOthers) { uniqueKeys.insert(key); auto val = RandomGenerator::RandAlphString(128); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } }); mStore->ExecSync(1, [&]() { // remove from another worker for (auto& key : uniqueKeys) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Remove(Slice((const uint8_t*)key.data(), key.size())), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } // should not found any keys for (auto& key : uniqueKeys) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Lookup(Slice((const uint8_t*)key.data(), key.size()), [](Slice) {}), OpCode::kNotFound); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } }); mStore->ExecSync(0, [&]() { // lookup from another worker, should not found any keys for (auto& key : uniqueKeys) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Lookup(Slice((const uint8_t*)key.data(), key.size()), [](Slice) {}), OpCode::kNotFound); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } }); mStore->ExecSync(1, [&]() { // unregister the tree from another worker - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); mStore->DropTransactionKV(btreeName); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -412,22 +412,22 @@ TEST_F(TransactionKVTest, RemoveFromOthers) { // EXPECT_NE(btree, nullptr); // // // insert some values -// cr::Worker::My().StartTx(); +// cr::WorkerContext::My().StartTx(); // for (size_t i = 0; i < numKVs; ++i) { // const auto& [key, val] = kvToTest[i]; // EXPECT_EQ(btree->Insert(Slice((const uint8_t*)key.data(), key.size()), // Slice((const uint8_t*)val.data(), val.size())), // OpCode::kOK); // } -// cr::Worker::My().CommitTx(); +// cr::WorkerContext::My().CommitTx(); // // rapidjson::Document doc(rapidjson::kObjectType); // leanstore::storage::btree::BTreeGeneric::ToJson(*btree, &doc); // EXPECT_GE(leanstore::utils::JsonToStr(&doc).size(), 0u); // -// cr::Worker::My().StartTx(); +// cr::WorkerContext::My().StartTx(); // mStore->DropTransactionKV(btreeName); -// cr::Worker::My().CommitTx(); +// cr::WorkerContext::My().CommitTx(); // }); // } @@ -455,10 +455,10 @@ TEST_F(TransactionKVTest, Update) { // insert values for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(res, OpCode::kOK); } @@ -477,10 +477,10 @@ TEST_F(TransactionKVTest, Update) { updateDesc->mUpdateSlots[0].mSize = valSize; for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = btree->UpdatePartial(Slice((const uint8_t*)key.data(), key.size()), updateCallBack, *updateDesc); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(res, OpCode::kOK); } @@ -491,16 +491,16 @@ TEST_F(TransactionKVTest, Update) { }; for (size_t i = 0; i < numKVs; ++i) { const auto& [key, val] = kvToTest[i]; - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->Lookup(Slice((const uint8_t*)key.data(), key.size()), copyValueOut), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(copiedValue, newVal); } - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); mStore->DropTransactionKV(btreeName); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -539,10 +539,10 @@ TEST_F(TransactionKVTest, ScanAsc) { // insert values for (const auto& [key, val] : kvToTest) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(res, OpCode::kOK); } @@ -556,10 +556,10 @@ TEST_F(TransactionKVTest, ScanAsc) { // scan from the smallest key copiedKVs.clear(); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->ScanAsc(Slice((const uint8_t*)smallest.data(), smallest.size()), scanCallBack), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(copiedKVs.size(), numKVs); for (const auto& [key, val] : copiedKVs) { EXPECT_EQ(val, kvToTest[key]); @@ -567,17 +567,17 @@ TEST_F(TransactionKVTest, ScanAsc) { // scan from the bigest key copiedKVs.clear(); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->ScanAsc(Slice((const uint8_t*)bigest.data(), bigest.size()), scanCallBack), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(copiedKVs.size(), 1u); EXPECT_EQ(copiedKVs[bigest], kvToTest[bigest]); // destroy the tree - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); mStore->DropTransactionKV(btreeName); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -616,10 +616,10 @@ TEST_F(TransactionKVTest, ScanDesc) { // insert values for (const auto& [key, val] : kvToTest) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(res, OpCode::kOK); } @@ -633,10 +633,10 @@ TEST_F(TransactionKVTest, ScanDesc) { // scan from the bigest key copiedKVs.clear(); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ(btree->ScanDesc(Slice((const uint8_t*)bigest.data(), bigest.size()), scanCallBack), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(copiedKVs.size(), numKVs); for (const auto& [key, val] : copiedKVs) { EXPECT_EQ(val, kvToTest[key]); @@ -644,18 +644,18 @@ TEST_F(TransactionKVTest, ScanDesc) { // scan from the smallest key copiedKVs.clear(); - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); EXPECT_EQ( btree->ScanDesc(Slice((const uint8_t*)smallest.data(), smallest.size()), scanCallBack), OpCode::kOK); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(copiedKVs.size(), 1u); EXPECT_EQ(copiedKVs[smallest], kvToTest[smallest]); // destroy the tree - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); mStore->DropTransactionKV(btreeName); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -699,18 +699,18 @@ TEST_F(TransactionKVTest, InsertAfterRemove) { // insert values for (const auto& [key, val] : kvToTest) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(res, OpCode::kOK); } // remove, insert, and lookup for (const auto& [key, val] : kvToTest) { // remove - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); EXPECT_EQ(btree->Remove(Slice((const uint8_t*)key.data(), key.size())), OpCode::kOK); @@ -751,13 +751,13 @@ TEST_F(TransactionKVTest, InsertAfterRemove) { kvToTest.begin()->second, newVal); mStore->ExecSync(1, [&]() { // lookup the new value - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); for (const auto& [key, val] : kvToTest) { EXPECT_EQ(btree->Lookup(Slice((const uint8_t*)key.data(), key.size()), copyValueOut), OpCode::kOK); EXPECT_EQ(copiedValue, newVal); } - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); }); } @@ -801,25 +801,25 @@ TEST_F(TransactionKVTest, InsertAfterRemoveDifferentWorkers) { // insert values for (const auto& [key, val] : kvToTest) { - cr::Worker::My().StartTx(); + cr::WorkerContext::My().StartTx(); auto res = btree->Insert(Slice((const uint8_t*)key.data(), key.size()), Slice((const uint8_t*)val.data(), val.size())); - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); EXPECT_EQ(res, OpCode::kOK); } // remove for (const auto& [key, val] : kvToTest) { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); EXPECT_EQ(btree->Remove(Slice((const uint8_t*)key.data(), key.size())), OpCode::kOK); } }); mStore->ExecSync(1, [&]() { for (const auto& [key, val] : kvToTest) { - cr::Worker::My().StartTx(); - SCOPED_DEFER(cr::Worker::My().CommitTx()); + cr::WorkerContext::My().StartTx(); + SCOPED_DEFER(cr::WorkerContext::My().CommitTx()); // remove twice should got not found error EXPECT_EQ(btree->Remove(Slice((const uint8_t*)key.data(), key.size())), OpCode::kNotFound); diff --git a/tests/TxKV.hpp b/tests/TxKV.hpp index 9c522297..9706dc79 100644 --- a/tests/TxKV.hpp +++ b/tests/TxKV.hpp @@ -182,16 +182,16 @@ inline void LeanStoreMVCCSession::SetTxMode(TxMode txMode) { } inline void LeanStoreMVCCSession::StartTx() { - mStore->mLeanStore->ExecSync(mWorkerId, - [&]() { cr::Worker::My().StartTx(mTxMode, mIsolationLevel); }); + mStore->mLeanStore->ExecSync( + mWorkerId, [&]() { cr::WorkerContext::My().StartTx(mTxMode, mIsolationLevel); }); } inline void LeanStoreMVCCSession::CommitTx() { - mStore->mLeanStore->ExecSync(mWorkerId, [&]() { cr::Worker::My().CommitTx(); }); + mStore->mLeanStore->ExecSync(mWorkerId, [&]() { cr::WorkerContext::My().CommitTx(); }); } inline void LeanStoreMVCCSession::AbortTx() { - mStore->mLeanStore->ExecSync(mWorkerId, [&]() { cr::Worker::My().AbortTx(); }); + mStore->mLeanStore->ExecSync(mWorkerId, [&]() { cr::WorkerContext::My().AbortTx(); }); } // DDL operations @@ -213,11 +213,11 @@ inline Result LeanStoreMVCCSession::CreateTable(const std::string& tb inline Result LeanStoreMVCCSession::DropTable(const std::string& tblName, bool implicitTx) { mStore->mLeanStore->ExecSync(mWorkerId, [&]() { if (implicitTx) { - cr::Worker::My().StartTx(mTxMode, mIsolationLevel); + cr::WorkerContext::My().StartTx(mTxMode, mIsolationLevel); } mStore->mLeanStore->DropTransactionKV(tblName); if (implicitTx) { - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } }); return {}; @@ -230,13 +230,13 @@ inline Result LeanStoreMVCCSession::Put(TableRef* tbl, Slice key, Slice va OpCode res; mStore->mLeanStore->ExecSync(mWorkerId, [&]() { if (implicitTx) { - cr::Worker::My().StartTx(mTxMode, mIsolationLevel); + cr::WorkerContext::My().StartTx(mTxMode, mIsolationLevel); } SCOPED_DEFER(if (implicitTx) { if (res == OpCode::kOK) { - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } else { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } }); @@ -260,13 +260,13 @@ inline Result LeanStoreMVCCSession::Get(TableRef* tbl, Slice key, std: mStore->mLeanStore->ExecSync(mWorkerId, [&]() { if (implicitTx) { - cr::Worker::My().StartTx(mTxMode, mIsolationLevel); + cr::WorkerContext::My().StartTx(mTxMode, mIsolationLevel); } SCOPED_DEFER(if (implicitTx) { if (res == OpCode::kOK || res == OpCode::kNotFound) { - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } else { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } }); @@ -290,13 +290,13 @@ inline Result LeanStoreMVCCSession::Update(TableRef* tbl, Slice key, S }; mStore->mLeanStore->ExecSync(mWorkerId, [&]() { if (implicitTx) { - cr::Worker::My().StartTx(mTxMode, mIsolationLevel); + cr::WorkerContext::My().StartTx(mTxMode, mIsolationLevel); } SCOPED_DEFER(if (implicitTx) { if (res == OpCode::kOK || res == OpCode::kNotFound) { - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } else { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } }); @@ -323,13 +323,13 @@ inline Result LeanStoreMVCCSession::Delete(TableRef* tbl, Slice key, b OpCode res; mStore->mLeanStore->ExecSync(mWorkerId, [&]() { if (implicitTx) { - cr::Worker::My().StartTx(mTxMode, mIsolationLevel); + cr::WorkerContext::My().StartTx(mTxMode, mIsolationLevel); } SCOPED_DEFER(if (implicitTx) { if (res == OpCode::kOK || res == OpCode::kNotFound) { - cr::Worker::My().CommitTx(); + cr::WorkerContext::My().CommitTx(); } else { - cr::Worker::My().AbortTx(); + cr::WorkerContext::My().AbortTx(); } }); diff --git a/tests/btree/BasicKvIteratorTest.cpp b/tests/btree/BasicKvIteratorTest.cpp index 44ae5d9a..57fe5465 100644 --- a/tests/btree/BasicKvIteratorTest.cpp +++ b/tests/btree/BasicKvIteratorTest.cpp @@ -52,11 +52,11 @@ TEST_F(BasicKvIteratorTest, BasicKvHandle) { // lookup for (auto i = 0; i < numEntries; i++) { - String* valStr = BasicKvLookup(mKvHandle, 1, {keys[i].data(), keys[i].size()}); - ASSERT_NE(valStr, nullptr); - EXPECT_EQ(valStr->mSize, vals[i].size()); - EXPECT_EQ(memcmp(valStr->mData, vals[i].data(), valStr->mSize), 0); - DestroyString(valStr); + String* val = CreateString(nullptr, 0); + bool found = BasicKvLookup(mKvHandle, 1, {keys[i].data(), keys[i].size()}, &val); + ASSERT_TRUE(found); + EXPECT_EQ(val->mSize, vals[i].size()); + EXPECT_EQ(memcmp(val->mData, vals[i].data(), val->mSize), 0); } // remove 50 key-value pairs From 613e752850e073e52b3e8a060b04efaf8a01935e Mon Sep 17 00:00:00 2001 From: Jian Zhang Date: Fri, 23 Aug 2024 13:16:53 +0800 Subject: [PATCH 3/4] chore: remove unused sources --- benchmarks/shared/Adapter.hpp | 81 - benchmarks/shared/GenericSchema.hpp | 49 - benchmarks/shared/LMDBAdapter.hpp | 189 --- benchmarks/shared/LeanStoreAdapter.hpp | 151 -- benchmarks/shared/LeanStoreAdapterNC.hpp | 225 --- benchmarks/shared/RocksDBAdapter.hpp | 221 --- benchmarks/shared/Schema.hpp | 61 - benchmarks/shared/Types.hpp | 129 -- benchmarks/shared/lmdb++.hpp | 1847 --------------------- include/leanstore/buffer-manager/Swip.hpp | 2 +- 10 files changed, 1 insertion(+), 2954 deletions(-) delete mode 100644 benchmarks/shared/Adapter.hpp delete mode 100644 benchmarks/shared/GenericSchema.hpp delete mode 100644 benchmarks/shared/LMDBAdapter.hpp delete mode 100644 benchmarks/shared/LeanStoreAdapter.hpp delete mode 100644 benchmarks/shared/LeanStoreAdapterNC.hpp delete mode 100644 benchmarks/shared/RocksDBAdapter.hpp delete mode 100644 benchmarks/shared/Schema.hpp delete mode 100644 benchmarks/shared/Types.hpp delete mode 100644 benchmarks/shared/lmdb++.hpp diff --git a/benchmarks/shared/Adapter.hpp b/benchmarks/shared/Adapter.hpp deleted file mode 100644 index 68a458c2..00000000 --- a/benchmarks/shared/Adapter.hpp +++ /dev/null @@ -1,81 +0,0 @@ -#pragma once - -#include "Types.hpp" -#include "leanstore/Exceptions.hpp" -#include "leanstore/KVInterface.hpp" - -#include -#include -#include -#include -#include - -// Helpers to generate a descriptor that describes which attributes are in-place -// updating in a fixed-size value -#define UpdateDescriptorInit(Name, Count) \ - uint8_t \ - Name##_buffer[sizeof(leanstore::UpdateDesc) + (sizeof(leanstore::UpdateSlotInfo) * Count)]; \ - auto& Name = *reinterpret_cast(Name##_buffer); \ - Name.count = Count; - -#define UpdateDescriptorFillSlot(Name, Index, Type, Attribute) \ - Name.mUpdateSlots[Index].offset = offsetof(Type, Attribute); \ - Name.mUpdateSlots[Index].length = sizeof(Type::Attribute); - -#define UpdateDescriptorGenerator1(Name, Type, A0) \ - UpdateDescriptorInit(Name, 1); \ - UpdateDescriptorFillSlot(Name, 0, Type, A0); - -#define UpdateDescriptorGenerator2(Name, Type, A0, A1) \ - UpdateDescriptorInit(Name, 2); \ - UpdateDescriptorFillSlot(Name, 0, Type, A0); \ - UpdateDescriptorFillSlot(Name, 1, Type, A1); - -#define UpdateDescriptorGenerator3(Name, Type, A0, A1, A2) \ - UpdateDescriptorInit(Name, 3); \ - UpdateDescriptorFillSlot(Name, 0, Type, A0); \ - UpdateDescriptorFillSlot(Name, 1, Type, A1); \ - UpdateDescriptorFillSlot(Name, 2, Type, A2); - -#define UpdateDescriptorGenerator4(Name, Type, A0, A1, A2, A3) \ - UpdateDescriptorInit(Name, 4); \ - UpdateDescriptorFillSlot(Name, 0, Type, A0); \ - UpdateDescriptorFillSlot(Name, 1, Type, A1); \ - UpdateDescriptorFillSlot(Name, 2, Type, A2); \ - UpdateDescriptorFillSlot(Name, 3, Type, A3); - -// ------------------------------------------------------------------------------------- -// Unified interface used by our benchmarks for different storage engines -// including LeanStore -template -class Adapter { -public: - // Scan in ascending order, scan can fail if it is executed in optimistic mode - // without latching the leaves - virtual void scan( - const typename Record::Key& key, - const std::function& found_record_cb, - std::function reset_if_scan_failed_cb) = 0; - // ------------------------------------------------------------------------------------- - virtual void ScanDesc( - const typename Record::Key& key, - const std::function& found_record_cb, - std::function reset_if_scan_failed_cb) = 0; - // ------------------------------------------------------------------------------------- - virtual void insert(const typename Record::Key& key, const Record& record) = 0; - // ------------------------------------------------------------------------------------- - virtual void lookup1(const typename Record::Key& key, - const std::function& callback) = 0; - // ------------------------------------------------------------------------------------- - virtual void update1(const typename Record::Key& key, - const std::function& update_the_record_in_place_cb, - leanstore::UpdateDesc& update_descriptor) = 0; - // ------------------------------------------------------------------------------------- - // Returns false if the record was not found - virtual bool erase(const typename Record::Key& key) = 0; - // ------------------------------------------------------------------------------------- - template - Field lookupField(const typename Record::Key& key, Field Record::*f) { - UNREACHABLE(); - } -}; diff --git a/benchmarks/shared/GenericSchema.hpp b/benchmarks/shared/GenericSchema.hpp deleted file mode 100644 index 1825414b..00000000 --- a/benchmarks/shared/GenericSchema.hpp +++ /dev/null @@ -1,49 +0,0 @@ -#pragma once - -#include "shared/Types.hpp" - -template -struct Relation { - static constexpr int id = 0; - struct Key { - static constexpr int id = 0; - KeyT mKey; - }; - ValT mValue; - // ------------------------------------------------------------------------------------- - template - static unsigned foldKey(uint8_t* out, const T& key) { - unsigned pos = 0; - pos += Fold(out + pos, key.mKey); - return pos; - } - template - static unsigned unfoldKey(const uint8_t* in, T& key) { - unsigned pos = 0; - pos += Unfold(in + pos, key.mKey); - return pos; - } - static constexpr unsigned maxFoldLength() { - return 0 + sizeof(Key::mKey); - }; -}; -// ------------------------------------------------------------------------------------- -template -struct BytesPayload { - uint8_t value[size]; - BytesPayload() { - } - bool operator==(BytesPayload& other) { - return (std::memcmp(value, other.value, sizeof(value)) == 0); - } - bool operator!=(BytesPayload& other) { - return !(operator==(other)); - } - BytesPayload(const BytesPayload& other) { - std::memcpy(value, other.value, sizeof(value)); - } - BytesPayload& operator=(const BytesPayload& other) { - std::memcpy(value, other.value, sizeof(value)); - return *this; - } -}; diff --git a/benchmarks/shared/LMDBAdapter.hpp b/benchmarks/shared/LMDBAdapter.hpp deleted file mode 100644 index ca7c3f2c..00000000 --- a/benchmarks/shared/LMDBAdapter.hpp +++ /dev/null @@ -1,189 +0,0 @@ -#pragma once - -#include "Adapter.hpp" -#include "Types.hpp" -// ------------------------------------------------------------------------------------- -#include "leanstore/KVInterface.hpp" -#include "leanstore/utils/JumpMU.hpp" -#include "lmdb++.hpp" // Using C++ Wrapper from LMDB -// ------------------------------------------------------------------------------------- -#include -#include -#include -#include -#include -// ------------------------------------------------------------------------------------- -struct LMDB { - lmdb::env env; - lmdb::txn dummy_tx{nullptr}; - static thread_local lmdb::txn txn; - - LMDB() : env(lmdb::env::create()) { - env.set_max_dbs(100); - // FLAGS_dram_gib is misued here to set the maximum map size for LMDB - env.set_mapsize(FLAGS_dram_gib * 1024UL * 1024UL * 1024UL); - env.open(FLAGS_ssd_path.c_str(), MDB_NOSYNC); - } - // ------------------------------------------------------------------------------------- - void StartTx(bool read_only = false) { - txn = lmdb::txn::begin(env, nullptr, read_only ? MDB_RDONLY : 0); - } - // ------------------------------------------------------------------------------------- - void CommitTx() { - txn.commit(); - } - // ------------------------------------------------------------------------------------- - void AbortTx() { - txn.abort(); - } - // ------------------------------------------------------------------------------------- - ~LMDB() { - } -}; -// ------------------------------------------------------------------------------------- -template -struct LMDBAdapter : public Adapter { - LMDB& map; - std::string name; - lmdb::dbi dbi; - // ------------------------------------------------------------------------------------- - lmdb::txn& hack() { - map.dummy_tx = lmdb::txn::begin(map.env); - return map.dummy_tx; - } - // ------------------------------------------------------------------------------------- - LMDBAdapter(LMDB& map, std::string name) - : map(map), - name(name), - dbi(lmdb::dbi::open(hack(), name.c_str(), MDB_CREATE)) { - map.dummy_tx.commit(); - } - // ------------------------------------------------------------------------------------- - void insert(const typename Record::Key& key, const Record& record) final { - uint8_t folded_key[Record::maxFoldLength()]; - const uint32_t folded_key_len = Record::foldKey(folded_key, key); - lmdb::val lmdb_key{folded_key, folded_key_len}; - lmdb::val lmdb_payload{const_cast(&record), sizeof(record)}; - // ------------------------------------------------------------------------------------- - if (!dbi.put(map.txn, lmdb_key, lmdb_payload)) - throw; - } - // ------------------------------------------------------------------------------------- - void lookup1(const typename Record::Key& key, - const std::function& fn) final { - uint8_t folded_key[Record::maxFoldLength()]; - const uint32_t folded_key_len = Record::foldKey(folded_key, key); - lmdb::val lmdb_key{folded_key, folded_key_len}; - lmdb::val lmdb_payload; - // ------------------------------------------------------------------------------------- - if (!dbi.get(map.txn, lmdb_key, lmdb_payload)) - throw; - Record& record = *reinterpret_cast(lmdb_payload.data()); - fn(record); - } - // ------------------------------------------------------------------------------------- - void update1(const typename Record::Key& key, const std::function& fn, - leanstore::UpdateDesc&) final { - Record r; - lookup1(key, [&](const Record& rec) { r = rec; }); - fn(r); - insert(key, r); - } - // ------------------------------------------------------------------------------------- - bool erase(const typename Record::Key& key) final { - uint8_t folded_key[Record::maxFoldLength()]; - const uint32_t folded_key_len = Record::foldKey(folded_key, key); - lmdb::val lmdb_key{folded_key, folded_key_len}; - // ------------------------------------------------------------------------------------- - if (!dbi.del(map.txn, lmdb_key)) - throw; - return true; - } - // ------------------------------------------------------------------------------------- - void scan(const typename Record::Key& key, - const std::function& fn, - std::function) final { - uint8_t folded_key[Record::maxFoldLength()]; - const uint32_t folded_key_len = Record::foldKey(folded_key, key); - lmdb::val lmdb_key{folded_key, folded_key_len}; - // ------------------------------------------------------------------------------------- - lmdb::val lmdb_payload; - lmdb::cursor cursor = lmdb::cursor::open(map.txn, dbi); - if (cursor.get(lmdb_key, lmdb_payload, MDB_SET_RANGE)) { - bool cont; - do { - typename Record::Key s_key; - Record::unfoldKey(reinterpret_cast(lmdb_key.data()), s_key); - Record& s_value = *reinterpret_cast(lmdb_payload.data()); - cont = fn(s_key, s_value); - } while (cont && cursor.get(lmdb_key, lmdb_payload, MDB_NEXT)); - } - } - // ------------------------------------------------------------------------------------- - void ScanDesc(const typename Record::Key& key, - const std::function& fn, - std::function) final { - uint8_t folded_key[Record::maxFoldLength()]; - const uint32_t folded_key_len = Record::foldKey(folded_key, key); - lmdb::val lmdb_key{folded_key, folded_key_len}; - // ------------------------------------------------------------------------------------- - lmdb::val lmdb_payload; - lmdb::cursor cursor = lmdb::cursor::open(map.txn, dbi); - if (!cursor.get(lmdb_key, lmdb_payload, MDB_SET_RANGE)) { - if (!cursor.get(lmdb_key, lmdb_payload, MDB_LAST)) { - return; - } - } - while (true) { - std::basic_string_view upper(folded_key, folded_key_len); - std::basic_string_view current(reinterpret_cast(lmdb_key.data()), - lmdb_key.size()); - if (current > upper) { - if (cursor.get(lmdb_key, lmdb_payload, MDB_PREV)) { - continue; - } else { - return; - } - } else { - break; - } - } - bool cont; - do { - typename Record::Key s_key; - Record::unfoldKey(reinterpret_cast(lmdb_key.data()), s_key); - Record& s_value = *reinterpret_cast(lmdb_payload.data()); - cont = fn(s_key, s_value); - } while (cont && cursor.get(lmdb_key, lmdb_payload, MDB_PREV)); - } - // ------------------------------------------------------------------------------------- - template - auto lookupField(const typename Record::Key& key, Field f) { - uint8_t folded_key[Record::maxFoldLength()]; - const uint32_t folded_key_len = Record::foldKey(folded_key, key); - lmdb::val lmdb_key{folded_key, folded_key_len}; - lmdb::val lmdb_payload; - // ------------------------------------------------------------------------------------- - if (!dbi.get(map.txn, lmdb_key, lmdb_payload)) - throw; - Record& record = *reinterpret_cast(lmdb_payload.data()); - auto ret = record.*f; - return ret; - } - // ------------------------------------------------------------------------------------- - uint64_t count() { - uint8_t folded_key[Record::maxFoldLength()]; - const uint32_t folded_key_len = Record::foldKey(folded_key, 0); - lmdb::val lmdb_key{folded_key, folded_key_len}; - lmdb::cursor cursor = lmdb::cursor::open(map.txn, dbi); - lmdb::val lmdb_payload; - // ------------------------------------------------------------------------------------- - uint64_t count = 0; - if (cursor.get(lmdb_key, lmdb_payload, MDB_SET_RANGE)) { - do { - count++; - } while (cursor.get(lmdb_key, lmdb_payload, MDB_NEXT)); - } - return count; - } -}; diff --git a/benchmarks/shared/LeanStoreAdapter.hpp b/benchmarks/shared/LeanStoreAdapter.hpp deleted file mode 100644 index 971f1cfd..00000000 --- a/benchmarks/shared/LeanStoreAdapter.hpp +++ /dev/null @@ -1,151 +0,0 @@ -#pragma once - -#include "Adapter.hpp" -#include "leanstore/LeanStore.hpp" -#include "leanstore/utils/Log.hpp" - -#include - -using namespace leanstore; -template -struct LeanStoreAdapter : Adapter { - - leanstore::KVInterface* btree; - - std::string name; - - LeanStoreAdapter() { - } - - LeanStoreAdapter(LeanStore& db, std::string name) : name(name) { - if (!db.mStoreOption.mCreateFromScratch) { - leanstore::storage::btree::TransactionKV* tree; - db.GetTransactionKV(name, &tree); - btree = reinterpret_cast(tree); - } else { - leanstore::storage::btree::TransactionKV* tree; - auto res = db.CreateTransactionKV(name); - if (res) { - tree = res.value(); - } else { - Log::Fatal( - std::format("failed to create transaction kv, error={}", res.error().ToString())); - } - btree = reinterpret_cast(tree); - } - } - - void ScanDesc(const typename Record::Key& key, - const std::function& cb, - std::function undo [[maybe_unused]]) final { - uint8_t foldedKey[Record::maxFoldLength()]; - uint16_t foldedKeySize = Record::foldKey(foldedKey, key); - OpCode ret = btree->ScanDesc(Slice(foldedKey, foldedKeySize), [&](Slice key, Slice val) { - if (key.size() != foldedKeySize) { - return false; - } - typename Record::Key typed_key; - Record::unfoldKey(key.data(), typed_key); - const auto& record = *reinterpret_cast(val.data()); - return cb(typed_key, record); - }); - if (ret == leanstore::OpCode::kAbortTx) { - cr::WorkerContext::My().AbortTx(); - } - } - - void insert(const typename Record::Key& key, const Record& record) final { - uint8_t foldedKey[Record::maxFoldLength()]; - uint16_t foldedKeySize = Record::foldKey(foldedKey, key); - const OpCode res = - btree->Insert(Slice(foldedKey, foldedKeySize), Slice((uint8_t*)(&record), sizeof(Record))); - LS_DCHECK(res == leanstore::OpCode::kOK || res == leanstore::OpCode::kAbortTx); - if (res == leanstore::OpCode::kAbortTx) { - cr::WorkerContext::My().AbortTx(); - } - } - - void lookup1(const typename Record::Key& key, - const std::function& cb) final { - uint8_t foldedKey[Record::maxFoldLength()]; - uint16_t foldedKeySize = Record::foldKey(foldedKey, key); - const OpCode res = btree->Lookup(Slice(foldedKey, foldedKeySize), [&](Slice val) { - const Record& record = *reinterpret_cast(val.data()); - cb(record); - }); - if (res == leanstore::OpCode::kAbortTx) { - cr::WorkerContext::My().AbortTx(); - } - LS_DCHECK(res == leanstore::OpCode::kOK); - } - - void update1(const typename Record::Key& key, const std::function& cb, - UpdateDesc& updateDesc) final { - uint8_t foldedKey[Record::maxFoldLength()]; - uint16_t foldedKeySize = Record::foldKey(foldedKey, key); - - const OpCode res = btree->UpdatePartial( - Slice(foldedKey, foldedKeySize), - [&](MutableSlice mutRawVal) { - LS_DCHECK(mutRawVal.Size() == sizeof(Record)); - auto& record = *reinterpret_cast(mutRawVal.Data()); - cb(record); - }, - updateDesc); - LS_DCHECK(res != leanstore::OpCode::kNotFound); - if (res == leanstore::OpCode::kAbortTx) { - cr::WorkerContext::My().AbortTx(); - } - } - - bool erase(const typename Record::Key& key) final { - uint8_t foldedKey[Record::maxFoldLength()]; - uint16_t foldedKeySize = Record::foldKey(foldedKey, key); - const auto res = btree->Remove(Slice(foldedKey, foldedKeySize)); - if (res == leanstore::OpCode::kAbortTx) { - cr::WorkerContext::My().AbortTx(); - } - return (res == leanstore::OpCode::kOK); - } - - void scan(const typename Record::Key& key, - const std::function& cb, - std::function undo [[maybe_unused]]) final { - uint8_t foldedKey[Record::maxFoldLength()]; - uint16_t foldedKeySize = Record::foldKey(foldedKey, key); - OpCode ret = btree->ScanAsc(Slice(foldedKey, foldedKeySize), [&](Slice key, Slice val) { - if (key.size() != foldedKeySize) { - return false; - } - static_cast(val.size()); - typename Record::Key typed_key; - Record::unfoldKey(key.data(), typed_key); - const Record& record = *reinterpret_cast(val.data()); - return cb(typed_key, record); - }); - if (ret == leanstore::OpCode::kAbortTx) { - cr::WorkerContext::My().AbortTx(); - } - } - - template - Field lookupField(const typename Record::Key& key, Field Record::*f) { - uint8_t foldedKey[Record::maxFoldLength()]; - uint16_t foldedKeySize = Record::foldKey(foldedKey, key); - Field local_f; - const OpCode res = - btree->Lookup(foldedKey, foldedKeySize, [&](const uint8_t* payload, uint16_t payloadSize) { - Record& record = *const_cast(reinterpret_cast(payload)); - local_f = (record).*f; - }); - if (res == leanstore::OpCode::kAbortTx) { - cr::WorkerContext::My().AbortTx(); - } - LS_DCHECK(res == OpCode::kOK); - return local_f; - } - - uint64_t count() { - return btree->CountEntries(); - } -}; diff --git a/benchmarks/shared/LeanStoreAdapterNC.hpp b/benchmarks/shared/LeanStoreAdapterNC.hpp deleted file mode 100644 index 3c69d4cc..00000000 --- a/benchmarks/shared/LeanStoreAdapterNC.hpp +++ /dev/null @@ -1,225 +0,0 @@ -#pragma once -#include "Adapter.hpp" -// ------------------------------------------------------------------------------------- -#include "leanstore/LeanStore.hpp" -#include "leanstore/utils/RandomGenerator.hpp" -// ------------------------------------------------------------------------------------- -#include -#include -#include -#include -#include - -using namespace leanstore; -using TID = uint64_t; -std::atomic global_tid[1024] = {0}; -// ------------------------------------------------------------------------------------- -template -struct LeanStoreAdapter : Adapter { - leanstore::storage::btree::BasicKV* key_tid; - leanstore::storage::btree::BasicKV* tid_value; - std::string name; - // ------------------------------------------------------------------------------------- - LeanStoreAdapter() { - // hack - } - LeanStoreAdapter(LeanStore& db, std::string name) : name(name) { - key_tid = &db.registerBasicKV(name + "_key_tid", false); - tid_value = &db.registerBasicKV(name + "_tid_value", false); - } - // ------------------------------------------------------------------------------------- - void insert(const typename Record::Key& key, const Record& record) final { - uint8_t folded_key[Record::maxFoldLength()]; - uint16_t folded_key_len = Record::foldKey(folded_key, key); - // ------------------------------------------------------------------------------------- - TID tid = global_tid[Record::id * 8].fetch_add(1); - OpCode res; - res = key_tid->Insert(folded_key, folded_key_len, (uint8_t*)(&tid), sizeof(TID)); - ensure(res == leanstore::OpCode::kOK); - res = tid_value->Insert((uint8_t*)&tid, sizeof(TID), (uint8_t*)(&record), sizeof(Record)); - ensure(res == leanstore::OpCode::kOK); - } - - void moveIt(TID tid, uint8_t* folded_key, uint16_t folded_key_len) { - if (tid & (1ull << 63)) { - return; - } - } - // ------------------------------------------------------------------------------------- - void lookup1(const typename Record::Key& key, - const std::function& cb) final { - uint8_t folded_key[Record::maxFoldLength()]; - uint16_t folded_key_len = Record::foldKey(folded_key, key); - // ------------------------------------------------------------------------------------- - OpCode ret; - TID tid; - ret = key_tid->lookup( - folded_key, folded_key_len, [&](const uint8_t* payload, uint16_t payload_length) { - ensure(payload_length == sizeof(TID)); - tid = *reinterpret_cast(payload); - // ------------------------------------------------------------------------------------- - tid_value->lookup( - (uint8_t*)&tid, sizeof(TID), [&](const uint8_t* payload, uint16_t payload_length) { - ensure(payload_length == sizeof(Record)); - const Record& typed_payload = *reinterpret_cast(payload); - cb(typed_payload); - }); - }); - ensure(ret == OpCode::kOK); - // ------------------------------------------------------------------------------------- - moveIt(tid, folded_key, folded_key_len); - } - // ------------------------------------------------------------------------------------- - void update1(const typename Record::Key& key, const std::function& cb, - UpdateDesc& update_descriptor) final { - uint8_t folded_key[Record::maxFoldLength()]; - uint16_t folded_key_len = Record::foldKey(folded_key, key); - // ------------------------------------------------------------------------------------- - UpdateDesc tmp; - tmp.count = 0; - OpCode ret; - TID tid; - ret = key_tid->UpdatePartial( - folded_key, folded_key_len, - [&](uint8_t* tid_payload, uint16_t tid_payload_length) { - ensure(tid_payload_length == sizeof(TID)); - tid = *reinterpret_cast(tid_payload); - // ------------------------------------------------------------------------------------- - OpCode ret2 = tid_value->UpdatePartial((uint8_t*)&tid, sizeof(TID), - [&](uint8_t* payload, uint16_t payload_length) { - static_cast(payload_length); - assert(payload_length == sizeof(Record)); - Record& typed_payload = - *reinterpret_cast(payload); - cb(typed_payload); - }, - update_descriptor); - ensure(ret2 == OpCode::kOK); - }, - tmp); - ensure(ret == OpCode::kOK); - moveIt(tid, folded_key, folded_key_len); - } - // ------------------------------------------------------------------------------------- - bool erase(const typename Record::Key& key) final { - uint8_t folded_key[Record::maxFoldLength()]; - uint16_t folded_key_len = Record::foldKey(folded_key, key); - // ------------------------------------------------------------------------------------- - OpCode ret; - TID tid; - ret = key_tid->lookup(folded_key, folded_key_len, - [&](const uint8_t* payload, uint16_t payload_length) { - ensure(payload_length == sizeof(TID)); - tid = *reinterpret_cast(payload); - }); - if (ret != OpCode::kOK) { - return false; - } - // ------------------------------------------------------------------------------------- - ret = tid_value->Remove((uint8_t*)&tid, sizeof(TID)); - if (ret != OpCode::kOK) { - return false; - } - ret = key_tid->Remove(folded_key, folded_key_len); - if (ret != OpCode::kOK) { - return false; - } - return true; - } - // ------------------------------------------------------------------------------------- - void scan(const typename Record::Key& key, - const std::function& cb, - std::function undo) final { - uint8_t folded_key[Record::maxFoldLength()]; - uint16_t folded_key_len = Record::foldKey(folded_key, key); - // ------------------------------------------------------------------------------------- - OpCode ret; - ret = key_tid->ScanAsc( - folded_key, folded_key_len, - [&](const uint8_t* key, [[maybe_unused]] uint16_t keySize, const uint8_t* tid_ptr, - [[maybe_unused]] uint16_t tid_length) { - TID tid = *reinterpret_cast(tid_ptr); - ensure(tid_length == sizeof(TID)); - // ------------------------------------------------------------------------------------- - bool should_continue; - OpCode res2 = tid_value->lookup( - (uint8_t*)&tid, sizeof(TID), [&](const uint8_t* value_ptr, uint16_t valSize) { - ensure(valSize == sizeof(Record)); - typename Record::Key typed_key; - Record::unfoldKey(key, typed_key); - const Record& typed_payload = *reinterpret_cast(value_ptr); - should_continue = cb(typed_key, typed_payload); - }); - if (res2 == OpCode::kOK) { - return should_continue; - } else { - return true; - } - }, - undo); - ensure(ret == OpCode::kOK); - } - // ------------------------------------------------------------------------------------- - void ScanDesc(const typename Record::Key& key, - const std::function& cb, - std::function undo) final { - uint8_t folded_key[Record::maxFoldLength()]; - uint16_t folded_key_len = Record::foldKey(folded_key, key); - // ------------------------------------------------------------------------------------- - OpCode ret; - ret = key_tid->ScanDesc( - folded_key, folded_key_len, - [&](const uint8_t* key, [[maybe_unused]] uint16_t keySize, const uint8_t* tid_ptr, - [[maybe_unused]] uint16_t tid_length) { - const TID tid = *reinterpret_cast(tid_ptr); - ensure(tid_length == sizeof(TID)); - // ------------------------------------------------------------------------------------- - bool should_continue; - OpCode res2 = tid_value->lookup( - (uint8_t*)&tid, sizeof(TID), [&](const uint8_t* value_ptr, uint16_t valSize) { - ensure(valSize == sizeof(Record)); - typename Record::Key typed_key; - Record::unfoldKey(key, typed_key); - const Record& typed_payload = *reinterpret_cast(value_ptr); - should_continue = cb(typed_key, typed_payload); - }); - if (res2 == OpCode::kOK) { - return should_continue; - } else { - return true; - } - }, - undo); - ensure(ret == OpCode::kOK); - } - // ------------------------------------------------------------------------------------- - template - Field lookupField(const typename Record::Key& key, Field Record::*f) { - uint8_t folded_key[Record::maxFoldLength()]; - uint16_t folded_key_len = Record::foldKey(folded_key, key); - // ------------------------------------------------------------------------------------- - OpCode ret; - TID tid; - ret = key_tid->lookup(folded_key, folded_key_len, - [&](const uint8_t* payload, uint16_t payload_length) { - ensure(payload_length == sizeof(TID)); - tid = *reinterpret_cast(payload); - }); - ensure(ret == OpCode::kOK); - // ------------------------------------------------------------------------------------- - Field local_f; - ret = tid_value->lookup( - (uint8_t*)&tid, sizeof(TID), [&](const uint8_t* payload, uint16_t payload_length) { - ensure(payload_length == sizeof(Record)); - const Record& typed_payload = *reinterpret_cast(payload); - local_f = (typed_payload).*f; - }); - ensure(ret == OpCode::kOK); - moveIt(tid, folded_key, folded_key_len); - return local_f; - } - // ------------------------------------------------------------------------------------- - uint64_t count() { - return 0; - } -}; diff --git a/benchmarks/shared/RocksDBAdapter.hpp b/benchmarks/shared/RocksDBAdapter.hpp deleted file mode 100644 index d9b2494c..00000000 --- a/benchmarks/shared/RocksDBAdapter.hpp +++ /dev/null @@ -1,221 +0,0 @@ -#pragma once -#include "Adapter.hpp" -#include "Types.hpp" -// ------------------------------------------------------------------------------------- -#include "leanstore/KVInterface.hpp" -#include "leanstore/utils/JumpMU.hpp" - -#include "rocksdb/db.h" -#include "rocksdb/utilities/optimistic_transaction_db.h" -#include "rocksdb/utilities/transaction_db.h" -// ------------------------------------------------------------------------------------- -#include -#include -#include -#include -#include - -struct RocksDB { - union { - rocksdb::DB* db = nullptr; - rocksdb::TransactionDB* tx_db; - rocksdb::OptimisticTransactionDB* optimistic_transaction_db; - }; - static thread_local rocksdb::Transaction* txn; - rocksdb::WriteOptions wo; - rocksdb::ReadOptions ro; - enum class DB_TYPE : uint8_t { DB, TransactionDB, OptimisticDB }; - const DB_TYPE type; - // ------------------------------------------------------------------------------------- - RocksDB(DB_TYPE type = DB_TYPE::DB) : type(type) { - wo.disableWAL = true; - wo.sync = false; - // ------------------------------------------------------------------------------------- - rocksdb::Options db_options; - db_options.use_direct_reads = true; - db_options.use_direct_io_for_flush_and_compaction = true; - db_options.db_write_buffer_size = 0; // disabled - // db_options.write_buffer_size = 64 * 1024 * 1024; keep the default - db_options.create_if_missing = true; - db_options.manual_wal_flush = true; - db_options.compression = rocksdb::CompressionType::kNoCompression; - // db_options.OptimizeLevelStyleCompaction(FLAGS_dram_gib * 1024 * 1024 * - // 1024); - db_options.row_cache = rocksdb::NewLRUCache(FLAGS_dram_gib * 1024 * 1024 * 1024); - rocksdb::Status s; - if (type == DB_TYPE::DB) { - s = rocksdb::DB::Open(db_options, FLAGS_ssd_path, &db); - } else if (type == DB_TYPE::TransactionDB) { - s = rocksdb::TransactionDB::Open(db_options, {}, FLAGS_ssd_path, &tx_db); - } else if (type == DB_TYPE::OptimisticDB) { - s = rocksdb::OptimisticTransactionDB::Open(db_options, FLAGS_ssd_path, - &optimistic_transaction_db); - } - if (!s.ok()) - cerr << s.ToString() << endl; - assert(s.ok()); - } - - ~RocksDB() { - delete db; - } - void StartTx() { - rocksdb::Status s; - if (type == DB_TYPE::TransactionDB) { - txn = tx_db->BeginTransaction(wo, {}); - } else if (type == DB_TYPE::OptimisticDB) { - txn = optimistic_transaction_db->BeginTransaction({}, {}); - } else { - } - } - void CommitTx() { - if (type != DB_TYPE::DB) { - rocksdb::Status s; - s = txn->Commit(); - delete txn; - txn = nullptr; - } - } - void prepareThread() { - } -}; -// ------------------------------------------------------------------------------------- -template -struct RocksDBAdapter : public Adapter { - using SEP = uint32_t; // use 32-bits integer as separator instead of column family - RocksDB& map; - RocksDBAdapter(RocksDB& map) : map(map) { - } - // ------------------------------------------------------------------------------------- - template - rocksdb::Slice RSlice(T* ptr, uint64_t len) { - return rocksdb::Slice(reinterpret_cast(ptr), len); - } - // ------------------------------------------------------------------------------------- - void insert(const typename Record::Key& key, const Record& record) final { - uint8_t folded_key[Record::maxFoldLength() + sizeof(SEP)]; - const uint32_t folded_key_len = - Fold(folded_key, Record::id) + Record::foldKey(folded_key + sizeof(SEP), key); - // ------------------------------------------------------------------------------------- - rocksdb::Status s; - if (map.type == RocksDB::DB_TYPE::DB) { - s = map.db->Put(map.wo, RSlice(folded_key, folded_key_len), RSlice(&record, sizeof(record))); - ensure(s.ok()); - } else { - s = map.txn->Put(RSlice(folded_key, folded_key_len), RSlice(&record, sizeof(record))); - if (!s.ok()) { - map.txn->Rollback(); - jumpmu::Jump(); - } - } - } - // ------------------------------------------------------------------------------------- - void lookup1(const typename Record::Key& key, - const std::function& fn) final { - uint8_t folded_key[Record::maxFoldLength() + sizeof(SEP)]; - const uint32_t folded_key_len = - Fold(folded_key, Record::id) + Record::foldKey(folded_key + sizeof(SEP), key); - // ------------------------------------------------------------------------------------- - rocksdb::PinnableSlice value; - rocksdb::Status s; - if (map.type == RocksDB::DB_TYPE::DB) { - s = map.db->Get(map.ro, map.db->DefaultColumnFamily(), RSlice(folded_key, folded_key_len), - &value); - } else { - s = map.txn->Get(map.ro, map.db->DefaultColumnFamily(), RSlice(folded_key, folded_key_len), - &value); - } - assert(s.ok()); - const Record& record = *reinterpret_cast(value.data()); - fn(record); - value.Reset(); - } - // ------------------------------------------------------------------------------------- - void update1(const typename Record::Key& key, const std::function& fn, - leanstore::UpdateDesc&) final { - Record r; - lookup1(key, [&](const Record& rec) { r = rec; }); - fn(r); - insert(key, r); - } - // ------------------------------------------------------------------------------------- - bool erase(const typename Record::Key& key) final { - uint8_t folded_key[Record::maxFoldLength() + sizeof(SEP)]; - const uint32_t folded_key_len = - Fold(folded_key, Record::id) + Record::foldKey(folded_key + sizeof(SEP), key); - // ------------------------------------------------------------------------------------- - rocksdb::Status s; - if (map.type == RocksDB::DB_TYPE::DB) { - s = map.db->Delete(map.wo, RSlice(folded_key, folded_key_len)); - if (s.ok()) { - return true; - } else { - return false; - } - } else { - s = map.txn->Delete(RSlice(folded_key, folded_key_len)); - if (!s.ok()) { - map.txn->Rollback(); - jumpmu::Jump(); - } - return true; - } - } - // ------------------------------------------------------------------------------------- - template - uint32_t getId(const T& str) { - return __builtin_bswap32(*reinterpret_cast(str.data())) ^ (1ul << 31); - } - // [&](const neworder_t::Key& key, const neworder_t&) { - void scan(const typename Record::Key& key, - const std::function& fn, - std::function) final { - uint8_t folded_key[Record::maxFoldLength() + sizeof(SEP)]; - const uint32_t folded_key_len = - Fold(folded_key, Record::id) + Record::foldKey(folded_key + sizeof(SEP), key); - // ------------------------------------------------------------------------------------- - rocksdb::Iterator* it = map.db->NewIterator(map.ro); - for (it->SeekToFirstGreaterEqual(RSlice(folded_key, folded_key_len)); - it->Valid() && getId(it->Key()) == Record::id; it->Next()) { - typename Record::Key s_key; - Record::unfoldKey(reinterpret_cast(it->Key().data() + sizeof(SEP)), s_key); - const Record& s_value = *reinterpret_cast(it->Val().data()); - if (!fn(s_key, s_value)) - break; - } - assert(it->status().ok()); - delete it; - } - // ------------------------------------------------------------------------------------- - void ScanDesc(const typename Record::Key& key, - const std::function& fn, - std::function) final { - uint8_t folded_key[Record::maxFoldLength() + sizeof(SEP)]; - const uint32_t folded_key_len = - Fold(folded_key, Record::id) + Record::foldKey(folded_key + sizeof(SEP), key); - // ------------------------------------------------------------------------------------- - rocksdb::Iterator* it = map.db->NewIterator(map.ro); - for (it->SeekToLastLessEqual(RSlice(folded_key, folded_key_len)); - it->Valid() && getId(it->Key()) == Record::id; it->Prev()) { - typename Record::Key s_key; - Record::unfoldKey(reinterpret_cast(it->Key().data() + sizeof(SEP)), s_key); - const Record& s_value = *reinterpret_cast(it->Val().data()); - if (!fn(s_key, s_value)) - break; - } - assert(it->status().ok()); - delete it; - } - // ------------------------------------------------------------------------------------- - template - Field lookupField(const typename Record::Key& key, Field Record::*f) { - Field local_f; - bool found = false; - lookup1(key, [&](const Record& record) { - found = true; - local_f = (record).*f; - }); - assert(found); - return local_f; - } -}; diff --git a/benchmarks/shared/Schema.hpp b/benchmarks/shared/Schema.hpp deleted file mode 100644 index 135e4303..00000000 --- a/benchmarks/shared/Schema.hpp +++ /dev/null @@ -1,61 +0,0 @@ -#pragma once - -#include "shared/Types.hpp" - -template -struct Relation { -public: - // Entries: 1 to 1 160 000 * scale - static constexpr int id = 0; - - struct Key { - static constexpr int id = 0; - KeyT mKey; - }; - -public: - ValT mValue; - -public: - template - static unsigned foldKey(uint8_t* out, const T& key) { - unsigned pos = 0; - pos += Fold(out + pos, key.mKey); - return pos; - } - - template - static unsigned unfoldKey(const uint8_t* in, T& key) { - unsigned pos = 0; - pos += Unfold(in + pos, key.mKey); - return pos; - } - static constexpr unsigned maxFoldLength() { - return 0 + sizeof(Key::mKey); - }; -}; - -template -struct BytesPayload { - uint8_t value[size]; - - BytesPayload() { - } - - bool operator==(BytesPayload& other) { - return (std::memcmp(value, other.value, sizeof(value)) == 0); - } - - bool operator!=(BytesPayload& other) { - return !(operator==(other)); - } - - BytesPayload(const BytesPayload& other) { - std::memcpy(value, other.value, sizeof(value)); - } - - BytesPayload& operator=(const BytesPayload& other) { - std::memcpy(value, other.value, sizeof(value)); - return *this; - } -}; diff --git a/benchmarks/shared/Types.hpp b/benchmarks/shared/Types.hpp deleted file mode 100644 index a1e4805e..00000000 --- a/benchmarks/shared/Types.hpp +++ /dev/null @@ -1,129 +0,0 @@ -#pragma once - -#include "leanstore/Units.hpp" - -#include -#include -#include -#include -#include - -using UInteger = uint32_t; -using Integer = int32_t; -using Timestamp = int64_t; -using Numeric = double; -static constexpr Integer minUInteger = std::numeric_limits::min(); -static constexpr Integer minInteger = std::numeric_limits::min(); -// ------------------------------------------------------------------------------------- -template -struct Varchar { - int16_t length; - char data[maxLength] = {0}; // not '\0' terminated - - Varchar() : length(0) { - } - Varchar(const char* str) { - int l = strlen(str); - assert(l <= maxLength); - length = l; - memcpy(data, str, l); - } - template - Varchar(const Varchar& other) { - assert(other.length <= maxLength); - length = other.length; - memcpy(data, other.data, length); - } - - void Append(char x) { - assert(length < maxLength); - data[length++] = x; - }; - - std::string toString() const { - return std::string(data, length); - }; - - template - Varchar operator||(const Varchar& other) const { - Varchar tmp; - assert((static_cast(length) + other.length) <= maxLength); - tmp.length = length + other.length; - memcpy(tmp.data, data, length); - memcpy(tmp.data + length, other.data, other.length); - return tmp; - } - - bool operator==(const Varchar& other) const { - return (length == other.length) && (memcmp(data, other.data, length) == 0); - } - - bool operator!=(const Varchar& other) const { - return !operator==(other); - } - - bool operator<(const Varchar& other) const { - int cmp = memcmp(data, other.data, (length < other.length) ? length : other.length); - if (cmp) - return cmp < 0; - else - return length < other.length; - } -}; -// ------------------------------------------------------------------------------------- -// Fold functions convert integers to a lexicographical comparable format -unsigned Fold(uint8_t* writer, const Integer& x) { - *reinterpret_cast(writer) = __builtin_bswap32(x ^ (1ul << 31)); - return sizeof(x); -} -// ------------------------------------------------------------------------------------- -unsigned Fold(uint8_t* writer, const Timestamp& x) { - *reinterpret_cast(writer) = __builtin_bswap64(x ^ (1ull << 63)); - return sizeof(x); -} -// ------------------------------------------------------------------------------------- -unsigned Fold(uint8_t* writer, const uint32_t& x) { - *reinterpret_cast(writer) = __builtin_bswap32(x); - return sizeof(x); -} -// ------------------------------------------------------------------------------------- -unsigned Fold(uint8_t* writer, const uint64_t& x) { - *reinterpret_cast(writer) = __builtin_bswap64(x); - return sizeof(x); -} -// ------------------------------------------------------------------------------------- -template -unsigned Fold(uint8_t* writer, const Varchar& x) { - memcpy(writer, x.data, x.length); - writer[x.length] = 0; - return x.length + 1; -} -// ------------------------------------------------------------------------------------- -unsigned Unfold(const uint8_t* input, Integer& x) { - x = __builtin_bswap32(*reinterpret_cast(input)) ^ (1ul << 31); - return sizeof(x); -} -// ------------------------------------------------------------------------------------- -unsigned Unfold(const uint8_t* input, Timestamp& x) { - x = __builtin_bswap64(*reinterpret_cast(input)) ^ (1ul << 63); - return sizeof(x); -} -// ------------------------------------------------------------------------------------- -unsigned Unfold(const uint8_t* input, uint32_t& x) { - x = __builtin_bswap32(*reinterpret_cast(input)); - return sizeof(x); -} -// ------------------------------------------------------------------------------------- -unsigned Unfold(const uint8_t* input, uint64_t& x) { - x = __builtin_bswap64(*reinterpret_cast(input)); - return sizeof(x); -} -// ------------------------------------------------------------------------------------- -template -unsigned Unfold(const uint8_t* input, Varchar& x) { - int l = strlen(reinterpret_cast(input)); - assert(l <= len); - memcpy(x.data, input, l); - x.length = l; - return l + 1; -} diff --git a/benchmarks/shared/lmdb++.hpp b/benchmarks/shared/lmdb++.hpp deleted file mode 100644 index 010c1133..00000000 --- a/benchmarks/shared/lmdb++.hpp +++ /dev/null @@ -1,1847 +0,0 @@ -/* This is free and unencumbered software released into the public domain. */ - -#ifndef LMDBXX_H -#define LMDBXX_H - -/** - * - C++11 wrapper for LMDB. - * - * @author Arto Bendiken - * @see https://sourceforge.net/projects/lmdbxx/ - */ - -#ifndef __cplusplus -#error " requires a C++ compiler" -#endif - -#if __cplusplus < 201103L -#if !defined(_MSC_VER) || _MSC_VER < 1900 -#error " requires a C++11 compiler (CXXFLAGS='-std=c++11')" -#endif // _MSC_VER check -#endif - -//////////////////////////////////////////////////////////////////////////////// - -#include /* for MDB_*, mdb_*() */ - -#ifdef LMDBXX_DEBUG -#include /* for assert() */ -#endif -#include /* for std::size_t */ -#include /* for std::snprintf() */ -#include /* for std::strlen() */ -#include /* for std::runtime_error */ -#include /* for std::string */ -#include /* for std::is_pod<> */ - -namespace lmdb { -using mode = mdb_mode_t; -} - -//////////////////////////////////////////////////////////////////////////////// -/* Error Handling */ - -namespace lmdb { -class error; -class logic_error; -class fatal_error; -class runtime_error; -class key_exist_error; -class not_found_error; -class corrupted_error; -class panic_error; -class version_mismatch_error; -class map_full_error; -class bad_dbi_error; -} // namespace lmdb - -/** - * Base class for LMDB exception conditions. - * - * @see http://symas.com/mdb/doc/group__errors.html - */ -class lmdb::error : public std::runtime_error { -protected: - const int _code; - -public: - /** - * Throws an error based on the given LMDB return code. - */ - [[noreturn]] inline static void raise(const char* origin, int rc); - - /** - * Constructor. - */ - error(const char* const origin, const int rc) noexcept : runtime_error{origin}, _code{rc} { - } - - /** - * Returns the underlying LMDB error code. - */ - int code() const noexcept { - return _code; - } - - /** - * Returns the origin of the LMDB error. - */ - const char* origin() const noexcept { - return runtime_error::what(); - } - - /** - * Returns the underlying LMDB error code. - */ - virtual const char* what() const noexcept { - static thread_local char buffer[1024]; - std::snprintf(buffer, sizeof(buffer), "%s: %s", origin(), ::mdb_strerror(code())); - return buffer; - } -}; - -/** - * Base class for logic error conditions. - */ -class lmdb::logic_error : public lmdb::error { -public: - using error::error; -}; - -/** - * Base class for fatal error conditions. - */ -class lmdb::fatal_error : public lmdb::error { -public: - using error::error; -}; - -/** - * Base class for runtime error conditions. - */ -class lmdb::runtime_error : public lmdb::error { -public: - using error::error; -}; - -/** - * Exception class for `MDB_KEYEXIST` errors. - * - * @see - * http://symas.com/mdb/doc/group__errors.html#ga05dc5bbcc7da81a7345bd8676e8e0e3b - */ -class lmdb::key_exist_error final : public lmdb::runtime_error { -public: - using runtime_error::runtime_error; -}; - -/** - * Exception class for `MDB_NOTFOUND` errors. - * - * @see - * http://symas.com/mdb/doc/group__errors.html#gabeb52e4c4be21b329e31c4add1b71926 - */ -class lmdb::not_found_error final : public lmdb::runtime_error { -public: - using runtime_error::runtime_error; -}; - -/** - * Exception class for `MDB_CORRUPTED` errors. - * - * @see - * http://symas.com/mdb/doc/group__errors.html#gaf8148bf1b85f58e264e57194bafb03ef - */ -class lmdb::corrupted_error final : public lmdb::fatal_error { -public: - using fatal_error::fatal_error; -}; - -/** - * Exception class for `MDB_PANIC` errors. - * - * @see - * http://symas.com/mdb/doc/group__errors.html#gae37b9aedcb3767faba3de8c1cf6d3473 - */ -class lmdb::panic_error final : public lmdb::fatal_error { -public: - using fatal_error::fatal_error; -}; - -/** - * Exception class for `MDB_VERSION_MISMATCH` errors. - * - * @see - * http://symas.com/mdb/doc/group__errors.html#ga909b2db047fa90fb0d37a78f86a6f99b - */ -class lmdb::version_mismatch_error final : public lmdb::fatal_error { -public: - using fatal_error::fatal_error; -}; - -/** - * Exception class for `MDB_MAP_FULL` errors. - * - * @see - * http://symas.com/mdb/doc/group__errors.html#ga0a83370402a060c9175100d4bbfb9f25 - */ -class lmdb::map_full_error final : public lmdb::runtime_error { -public: - using runtime_error::runtime_error; -}; - -/** - * Exception class for `MDB_BAD_DBI` errors. - * - * @since 0.9.14 (2014/09/20) - * @see - * http://symas.com/mdb/doc/group__errors.html#gab4c82e050391b60a18a5df08d22a7083 - */ -class lmdb::bad_dbi_error final : public lmdb::runtime_error { -public: - using runtime_error::runtime_error; -}; - -inline void lmdb::error::raise(const char* const origin, const int rc) { - switch (rc) { - case MDB_KEYEXIST: - throw key_exist_error{origin, rc}; - case MDB_NOTFOUND: - throw not_found_error{origin, rc}; - case MDB_CORRUPTED: - throw corrupted_error{origin, rc}; - case MDB_PANIC: - throw panic_error{origin, rc}; - case MDB_VERSION_MISMATCH: - throw version_mismatch_error{origin, rc}; - case MDB_MAP_FULL: - throw map_full_error{origin, rc}; -#ifdef MDB_BAD_DBI - case MDB_BAD_DBI: - throw bad_dbi_error{origin, rc}; -#endif - default: - throw lmdb::runtime_error{origin, rc}; - } -} - -//////////////////////////////////////////////////////////////////////////////// -/* Procedural Interface: Metadata */ - -namespace lmdb { -// TODO: mdb_version() -// TODO: mdb_strerror() -} - -//////////////////////////////////////////////////////////////////////////////// -/* Procedural Interface: Environment */ - -namespace lmdb { -inline static void env_create(MDB_env** env); -inline static void env_open(MDB_env* env, const char* path, unsigned int flags, mode mode); -#if MDB_VERSION_FULL >= MDB_VERINT(0, 9, 14) -inline static void env_copy(MDB_env* env, const char* path, unsigned int flags); -inline static void env_copy_fd(MDB_env* env, mdb_filehandle_t fd, unsigned int flags); -#else -inline static void env_copy(MDB_env* env, const char* path); -inline static void env_copy_fd(MDB_env* env, mdb_filehandle_t fd); -#endif -inline static void env_stat(MDB_env* env, MDB_stat* stat); -inline static void env_info(MDB_env* env, MDB_envinfo* stat); -inline static void env_sync(MDB_env* env, bool force); -inline static void env_close(MDB_env* env) noexcept; -inline static void env_set_flags(MDB_env* env, unsigned int flags, bool onoff); -inline static void env_get_flags(MDB_env* env, unsigned int* flags); -inline static void env_get_path(MDB_env* env, const char** path); -inline static void env_get_fd(MDB_env* env, mdb_filehandle_t* fd); -inline static void env_set_mapsize(MDB_env* env, std::size_t size); -inline static void env_set_max_readers(MDB_env* env, unsigned int count); -inline static void env_get_max_readers(MDB_env* env, unsigned int* count); -inline static void env_set_max_dbs(MDB_env* env, MDB_dbi count); -inline static unsigned int env_get_max_keysize(MDB_env* env); -#if MDB_VERSION_FULL >= MDB_VERINT(0, 9, 11) -inline static void env_set_userctx(MDB_env* env, void* ctx); -inline static void* env_get_userctx(MDB_env* env); -#endif -// TODO: mdb_env_set_assert() -// TODO: mdb_reader_list() -// TODO: mdb_reader_check() -} // namespace lmdb - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gaad6be3d8dcd4ea01f8df436f41d158d4 - */ -inline static void lmdb::env_create(MDB_env** env) { - const int rc = ::mdb_env_create(env); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_create", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga32a193c6bf4d7d5c5d579e71f22e9340 - */ -inline static void lmdb::env_open(MDB_env* const env, const char* const path, - const unsigned int flags, const mode mode) { - const int rc = ::mdb_env_open(env, path, flags, mode); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_open", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga3bf50d7793b36aaddf6b481a44e24244 - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga5d51d6130325f7353db0955dbedbc378 - */ -inline static void lmdb::env_copy(MDB_env* const env, -#if MDB_VERSION_FULL >= MDB_VERINT(0, 9, 14) - const char* const path, const unsigned int flags = 0) { - const int rc = ::mdb_env_copy2(env, path, flags); -#else - const char* const path) { - const int rc = ::mdb_env_copy(env, path); -#endif - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_copy2", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga5040d0de1f14000fa01fc0b522ff1f86 - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga470b0bcc64ac417de5de5930f20b1a28 - */ -inline static void lmdb::env_copy_fd(MDB_env* const env, -#if MDB_VERSION_FULL >= MDB_VERINT(0, 9, 14) - const mdb_filehandle_t fd, const unsigned int flags = 0) { - const int rc = ::mdb_env_copyfd2(env, fd, flags); -#else - const mdb_filehandle_t fd) { - const int rc = ::mdb_env_copyfd(env, fd); -#endif - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_copyfd2", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gaf881dca452050efbd434cd16e4bae255 - */ -inline static void lmdb::env_stat(MDB_env* const env, MDB_stat* const stat) { - const int rc = ::mdb_env_stat(env, stat); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_stat", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga18769362c7e7d6cf91889a028a5c5947 - */ -inline static void lmdb::env_info(MDB_env* const env, MDB_envinfo* const stat) { - const int rc = ::mdb_env_info(env, stat); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_info", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga85e61f05aa68b520cc6c3b981dba5037 - */ -inline static void lmdb::env_sync(MDB_env* const env, const bool force = true) { - const int rc = ::mdb_env_sync(env, force); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_sync", rc); - } -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga4366c43ada8874588b6a62fbda2d1e95 - */ -inline static void lmdb::env_close(MDB_env* const env) noexcept { - ::mdb_env_close(env); -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga83f66cf02bfd42119451e9468dc58445 - */ -inline static void lmdb::env_set_flags(MDB_env* const env, const unsigned int flags, - const bool onoff = true) { - const int rc = ::mdb_env_set_flags(env, flags, onoff ? 1 : 0); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_set_flags", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga2733aefc6f50beb49dd0c6eb19b067d9 - */ -inline static void lmdb::env_get_flags(MDB_env* const env, unsigned int* const flags) { - const int rc = ::mdb_env_get_flags(env, flags); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_get_flags", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gac699fdd8c4f8013577cb933fb6a757fe - */ -inline static void lmdb::env_get_path(MDB_env* const env, const char** path) { - const int rc = ::mdb_env_get_path(env, path); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_get_path", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gaf1570e7c0e5a5d860fef1032cec7d5f2 - */ -inline static void lmdb::env_get_fd(MDB_env* const env, mdb_filehandle_t* const fd) { - const int rc = ::mdb_env_get_fd(env, fd); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_get_fd", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gaa2506ec8dab3d969b0e609cd82e619e5 - */ -inline static void lmdb::env_set_mapsize(MDB_env* const env, const std::size_t size) { - const int rc = ::mdb_env_set_mapsize(env, size); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_set_mapsize", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gae687966c24b790630be2a41573fe40e2 - */ -inline static void lmdb::env_set_max_readers(MDB_env* const env, const unsigned int count) { - const int rc = ::mdb_env_set_maxreaders(env, count); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_set_maxreaders", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga70e143cf11760d869f754c9c9956e6cc - */ -inline static void lmdb::env_get_max_readers(MDB_env* const env, unsigned int* const count) { - const int rc = ::mdb_env_get_maxreaders(env, count); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_get_maxreaders", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gaa2fc2f1f37cb1115e733b62cab2fcdbc - */ -inline static void lmdb::env_set_max_dbs(MDB_env* const env, const MDB_dbi count) { - const int rc = ::mdb_env_set_maxdbs(env, count); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_set_maxdbs", rc); - } -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#gaaf0be004f33828bf2fb09d77eb3cef94 - */ -inline static unsigned int lmdb::env_get_max_keysize(MDB_env* const env) { - const int rc = ::mdb_env_get_maxkeysize(env); -#ifdef LMDBXX_DEBUG - assert(rc >= 0); -#endif - return static_cast(rc); -} - -#if MDB_VERSION_FULL >= MDB_VERINT(0, 9, 11) -/** - * @throws lmdb::error on failure - * @since 0.9.11 (2014/01/15) - * @see - * http://symas.com/mdb/doc/group__mdb.html#gaf2fe09eb9c96eeb915a76bf713eecc46 - */ -inline static void lmdb::env_set_userctx(MDB_env* const env, void* const ctx) { - const int rc = ::mdb_env_set_userctx(env, ctx); - if (rc != MDB_SUCCESS) { - error::raise("mdb_env_set_userctx", rc); - } -} -#endif - -#if MDB_VERSION_FULL >= MDB_VERINT(0, 9, 11) -/** - * @since 0.9.11 (2014/01/15) - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga45df6a4fb150cda2316b5ae224ba52f1 - */ -inline static void* lmdb::env_get_userctx(MDB_env* const env) { - return ::mdb_env_get_userctx(env); -} -#endif - -//////////////////////////////////////////////////////////////////////////////// -/* Procedural Interface: Transactions */ - -namespace lmdb { -inline static void txn_begin(MDB_env* env, MDB_txn* parent, unsigned int flags, MDB_txn** txn); -inline static MDB_env* txn_env(MDB_txn* txn) noexcept; -#ifdef LMDBXX_TXN_ID -inline static std::size_t txn_id(MDB_txn* txn) noexcept; -#endif -inline static void txn_commit(MDB_txn* txn); -inline static void txn_abort(MDB_txn* txn) noexcept; -inline static void txn_reset(MDB_txn* txn) noexcept; -inline static void txn_renew(MDB_txn* txn); -} // namespace lmdb - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gad7ea55da06b77513609efebd44b26920 - */ -inline static void lmdb::txn_begin(MDB_env* const env, MDB_txn* const parent, - const unsigned int flags, MDB_txn** txn) { - const int rc = ::mdb_txn_begin(env, parent, flags, txn); - if (rc != MDB_SUCCESS) { - error::raise("mdb_txn_begin", rc); - } -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#gaeb17735b8aaa2938a78a45cab85c06a0 - */ -inline static MDB_env* lmdb::txn_env(MDB_txn* const txn) noexcept { - return ::mdb_txn_env(txn); -} - -#ifdef LMDBXX_TXN_ID -/** - * @note Only available in HEAD, not yet in any 0.9.x release (as of 0.9.16). - */ -inline static std::size_t lmdb::txn_id(MDB_txn* const txn) noexcept { - return ::mdb_txn_id(txn); -} -#endif - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga846fbd6f46105617ac9f4d76476f6597 - */ -inline static void lmdb::txn_commit(MDB_txn* const txn) { - const int rc = ::mdb_txn_commit(txn); - if (rc != MDB_SUCCESS) { - error::raise("mdb_txn_commit", rc); - } -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga73a5938ae4c3239ee11efa07eb22b882 - */ -inline static void lmdb::txn_abort(MDB_txn* const txn) noexcept { - ::mdb_txn_abort(txn); -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga02b06706f8a66249769503c4e88c56cd - */ -inline static void lmdb::txn_reset(MDB_txn* const txn) noexcept { - ::mdb_txn_reset(txn); -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga6c6f917959517ede1c504cf7c720ce6d - */ -inline static void lmdb::txn_renew(MDB_txn* const txn) { - const int rc = ::mdb_txn_renew(txn); - if (rc != MDB_SUCCESS) { - error::raise("mdb_txn_renew", rc); - } -} - -//////////////////////////////////////////////////////////////////////////////// -/* Procedural Interface: Databases */ - -namespace lmdb { -inline static void dbi_open(MDB_txn* txn, const char* name, unsigned int flags, MDB_dbi* dbi); -inline static void dbi_stat(MDB_txn* txn, MDB_dbi dbi, MDB_stat* stat); -inline static void dbi_flags(MDB_txn* txn, MDB_dbi dbi, unsigned int* flags); -inline static void dbi_close(MDB_env* env, MDB_dbi dbi) noexcept; -inline static void dbi_drop(MDB_txn* txn, MDB_dbi dbi, bool del); -inline static void dbi_set_compare(MDB_txn* txn, MDB_dbi dbi, MDB_cmp_func* cmp); -inline static void dbi_set_dupsort(MDB_txn* txn, MDB_dbi dbi, MDB_cmp_func* cmp); -inline static void dbi_set_relfunc(MDB_txn* txn, MDB_dbi dbi, MDB_rel_func* rel); -inline static void dbi_set_relctx(MDB_txn* txn, MDB_dbi dbi, void* ctx); -inline static bool dbi_get(MDB_txn* txn, MDB_dbi dbi, const MDB_val* key, MDB_val* data); -inline static bool dbi_put(MDB_txn* txn, MDB_dbi dbi, const MDB_val* key, MDB_val* data, - unsigned int flags); -inline static bool dbi_del(MDB_txn* txn, MDB_dbi dbi, const MDB_val* key, const MDB_val* data); -// TODO: mdb_cmp() -// TODO: mdb_dcmp() -} // namespace lmdb - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gac08cad5b096925642ca359a6d6f0562a - */ -inline static void lmdb::dbi_open(MDB_txn* const txn, const char* const name, - const unsigned int flags, MDB_dbi* const dbi) { - const int rc = ::mdb_dbi_open(txn, name, flags, dbi); - if (rc != MDB_SUCCESS) { - error::raise("mdb_dbi_open", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gae6c1069febe94299769dbdd032fadef6 - */ -inline static void lmdb::dbi_stat(MDB_txn* const txn, const MDB_dbi dbi, MDB_stat* const result) { - const int rc = ::mdb_stat(txn, dbi, result); - if (rc != MDB_SUCCESS) { - error::raise("mdb_stat", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga95ba4cb721035478a8705e57b91ae4d4 - */ -inline static void lmdb::dbi_flags(MDB_txn* const txn, const MDB_dbi dbi, - unsigned int* const flags) { - const int rc = ::mdb_dbi_flags(txn, dbi, flags); - if (rc != MDB_SUCCESS) { - error::raise("mdb_dbi_flags", rc); - } -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga52dd98d0c542378370cd6b712ff961b5 - */ -inline static void lmdb::dbi_close(MDB_env* const env, const MDB_dbi dbi) noexcept { - ::mdb_dbi_close(env, dbi); -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#gab966fab3840fc54a6571dfb32b00f2db - */ -inline static void lmdb::dbi_drop(MDB_txn* const txn, const MDB_dbi dbi, const bool del = false) { - const int rc = ::mdb_drop(txn, dbi, del ? 1 : 0); - if (rc != MDB_SUCCESS) { - error::raise("mdb_drop", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga68e47ffcf72eceec553c72b1784ee0fe - */ -inline static void lmdb::dbi_set_compare(MDB_txn* const txn, const MDB_dbi dbi, - MDB_cmp_func* const cmp = nullptr) { - const int rc = ::mdb_set_compare(txn, dbi, cmp); - if (rc != MDB_SUCCESS) { - error::raise("mdb_set_compare", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gacef4ec3dab0bbd9bc978b73c19c879ae - */ -inline static void lmdb::dbi_set_dupsort(MDB_txn* const txn, const MDB_dbi dbi, - MDB_cmp_func* const cmp = nullptr) { - const int rc = ::mdb_set_dupsort(txn, dbi, cmp); - if (rc != MDB_SUCCESS) { - error::raise("mdb_set_dupsort", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga697d82c7afe79f142207ad5adcdebfeb - */ -inline static void lmdb::dbi_set_relfunc(MDB_txn* const txn, const MDB_dbi dbi, - MDB_rel_func* const rel) { - const int rc = ::mdb_set_relfunc(txn, dbi, rel); - if (rc != MDB_SUCCESS) { - error::raise("mdb_set_relfunc", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga7c34246308cee01724a1839a8f5cc594 - */ -inline static void lmdb::dbi_set_relctx(MDB_txn* const txn, const MDB_dbi dbi, void* const ctx) { - const int rc = ::mdb_set_relctx(txn, dbi, ctx); - if (rc != MDB_SUCCESS) { - error::raise("mdb_set_relctx", rc); - } -} - -/** - * @retval true if the key/value pair was retrieved - * @retval false if the key wasn't found - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga8bf10cd91d3f3a83a34d04ce6b07992d - */ -inline static bool lmdb::dbi_get(MDB_txn* const txn, const MDB_dbi dbi, const MDB_val* const key, - MDB_val* const data) { - const int rc = ::mdb_get(txn, dbi, const_cast(key), data); - if (rc != MDB_SUCCESS && rc != MDB_NOTFOUND) { - error::raise("mdb_get", rc); - } - return (rc == MDB_SUCCESS); -} - -/** - * @retval true if the key/value pair was inserted - * @retval false if the key already existed - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga4fa8573d9236d54687c61827ebf8cac0 - */ -inline static bool lmdb::dbi_put(MDB_txn* const txn, const MDB_dbi dbi, const MDB_val* const key, - MDB_val* const data, const unsigned int flags = 0) { - const int rc = ::mdb_put(txn, dbi, const_cast(key), data, flags); - if (rc != MDB_SUCCESS && rc != MDB_KEYEXIST) { - error::raise("mdb_put", rc); - } - return (rc == MDB_SUCCESS); -} - -/** - * @retval true if the key/value pair was removed - * @retval false if the key wasn't found - * @see - * http://symas.com/mdb/doc/group__mdb.html#gab8182f9360ea69ac0afd4a4eaab1ddb0 - */ -inline static bool lmdb::dbi_del(MDB_txn* const txn, const MDB_dbi dbi, const MDB_val* const key, - const MDB_val* const data = nullptr) { - const int rc = ::mdb_del(txn, dbi, const_cast(key), const_cast(data)); - if (rc != MDB_SUCCESS && rc != MDB_NOTFOUND) { - error::raise("mdb_del", rc); - } - return (rc == MDB_SUCCESS); -} - -//////////////////////////////////////////////////////////////////////////////// -/* Procedural Interface: Cursors */ - -namespace lmdb { -inline static void cursor_open(MDB_txn* txn, MDB_dbi dbi, MDB_cursor** cursor); -inline static void cursor_close(MDB_cursor* cursor) noexcept; -inline static void cursor_renew(MDB_txn* txn, MDB_cursor* cursor); -inline static MDB_txn* cursor_txn(MDB_cursor* cursor) noexcept; -inline static MDB_dbi cursor_dbi(MDB_cursor* cursor) noexcept; -inline static bool cursor_get(MDB_cursor* cursor, MDB_val* key, MDB_val* data, MDB_cursor_op op); -inline static void cursor_put(MDB_cursor* cursor, MDB_val* key, MDB_val* data, unsigned int flags); -inline static void cursor_del(MDB_cursor* cursor, unsigned int flags); -inline static void cursor_count(MDB_cursor* cursor, std::size_t& count); -} // namespace lmdb - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga9ff5d7bd42557fd5ee235dc1d62613aa - */ -inline static void lmdb::cursor_open(MDB_txn* const txn, const MDB_dbi dbi, - MDB_cursor** const cursor) { - const int rc = ::mdb_cursor_open(txn, dbi, cursor); - if (rc != MDB_SUCCESS) { - error::raise("mdb_cursor_open", rc); - } -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#gad685f5d73c052715c7bd859cc4c05188 - */ -inline static void lmdb::cursor_close(MDB_cursor* const cursor) noexcept { - ::mdb_cursor_close(cursor); -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#gac8b57befb68793070c85ea813df481af - */ -inline static void lmdb::cursor_renew(MDB_txn* const txn, MDB_cursor* const cursor) { - const int rc = ::mdb_cursor_renew(txn, cursor); - if (rc != MDB_SUCCESS) { - error::raise("mdb_cursor_renew", rc); - } -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga7bf0d458f7f36b5232fcb368ebda79e0 - */ -inline static MDB_txn* lmdb::cursor_txn(MDB_cursor* const cursor) noexcept { - return ::mdb_cursor_txn(cursor); -} - -/** - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga2f7092cf70ee816fb3d2c3267a732372 - */ -inline static MDB_dbi lmdb::cursor_dbi(MDB_cursor* const cursor) noexcept { - return ::mdb_cursor_dbi(cursor); -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga48df35fb102536b32dfbb801a47b4cb0 - */ -inline static bool lmdb::cursor_get(MDB_cursor* const cursor, MDB_val* const key, - MDB_val* const data, const MDB_cursor_op op) { - const int rc = ::mdb_cursor_get(cursor, key, data, op); - if (rc != MDB_SUCCESS && rc != MDB_NOTFOUND) { - error::raise("mdb_cursor_get", rc); - } - return (rc == MDB_SUCCESS); -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga1f83ccb40011837ff37cc32be01ad91e - */ -inline static void lmdb::cursor_put(MDB_cursor* const cursor, MDB_val* const key, - MDB_val* const data, const unsigned int flags = 0) { - const int rc = ::mdb_cursor_put(cursor, key, data, flags); - if (rc != MDB_SUCCESS) { - error::raise("mdb_cursor_put", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga26a52d3efcfd72e5bf6bd6960bf75f95 - */ -inline static void lmdb::cursor_del(MDB_cursor* const cursor, const unsigned int flags = 0) { - const int rc = ::mdb_cursor_del(cursor, flags); - if (rc != MDB_SUCCESS) { - error::raise("mdb_cursor_del", rc); - } -} - -/** - * @throws lmdb::error on failure - * @see - * http://symas.com/mdb/doc/group__mdb.html#ga4041fd1e1862c6b7d5f10590b86ffbe2 - */ -inline static void lmdb::cursor_count(MDB_cursor* const cursor, std::size_t& count) { - const int rc = ::mdb_cursor_count(cursor, &count); - if (rc != MDB_SUCCESS) { - error::raise("mdb_cursor_count", rc); - } -} - -//////////////////////////////////////////////////////////////////////////////// -/* Resource Interface: Values */ - -namespace lmdb { -class val; -} - -/** - * Wrapper class for `MDB_val` structures. - * - * @note Instances of this class are movable and copyable both. - * @see http://symas.com/mdb/doc/group__mdb.html#structMDB__val - */ -class lmdb::val { -protected: - MDB_val _val; - -public: - /** - * Default constructor. - */ - val() noexcept = default; - - /** - * Constructor. - */ - val(const std::string& data) noexcept : val{data.data(), data.size()} { - } - - /** - * Constructor. - */ - val(const char* const data) noexcept : val{data, std::strlen(data)} { - } - - /** - * Constructor. - */ - val(const void* const data, const std::size_t size) noexcept - : _val{size, const_cast(data)} { - } - - /** - * Move constructor. - */ - val(val&& other) noexcept = default; - - /** - * Move assignment operator. - */ - val& operator=(val&& other) noexcept = default; - - /** - * Destructor. - */ - ~val() noexcept = default; - - /** - * Returns an `MDB_val*` pointer. - */ - operator MDB_val*() noexcept { - return &_val; - } - - /** - * Returns an `MDB_val*` pointer. - */ - operator const MDB_val*() const noexcept { - return &_val; - } - - /** - * Determines whether this value is empty. - */ - bool empty() const noexcept { - return size() == 0; - } - - /** - * Returns the size of the data. - */ - std::size_t size() const noexcept { - return _val.mv_size; - } - - /** - * Returns a pointer to the data. - */ - template - T* data() noexcept { - return reinterpret_cast(_val.mv_data); - } - - /** - * Returns a pointer to the data. - */ - template - const T* data() const noexcept { - return reinterpret_cast(_val.mv_data); - } - - /** - * Returns a pointer to the data. - */ - char* data() noexcept { - return reinterpret_cast(_val.mv_data); - } - - /** - * Returns a pointer to the data. - */ - const char* data() const noexcept { - return reinterpret_cast(_val.mv_data); - } - - /** - * Assigns the value. - */ - template - val& assign(const T* const data, const std::size_t size) noexcept { - _val.mv_size = size; - _val.mv_data = const_cast(reinterpret_cast(data)); - return *this; - } - - /** - * Assigns the value. - */ - val& assign(const char* const data) noexcept { - return assign(data, std::strlen(data)); - } - - /** - * Assigns the value. - */ - val& assign(const std::string& data) noexcept { - return assign(data.data(), data.size()); - } -}; - -#if !(defined(__COVERITY__) || defined(_MSC_VER)) -static_assert(std::is_pod::value, "lmdb::val must be a POD type"); -static_assert(sizeof(lmdb::val) == sizeof(MDB_val), "sizeof(lmdb::val) != sizeof(MDB_val)"); -#endif - -//////////////////////////////////////////////////////////////////////////////// -/* Resource Interface: Environment */ - -namespace lmdb { -class env; -} - -/** - * Resource class for `MDB_env*` handles. - * - * @note Instances of this class are movable, but not copyable. - * @see http://symas.com/mdb/doc/group__internal.html#structMDB__env - */ -class lmdb::env { -protected: - MDB_env* _handle{nullptr}; - -public: - static constexpr unsigned int default_flags = 0; - static constexpr mode default_mode = 0644; /* -rw-r--r-- */ - - /** - * Creates a new LMDB environment. - * - * @param flags - * @throws lmdb::error on failure - */ - static env create(const unsigned int flags = default_flags) { - MDB_env* handle{nullptr}; - lmdb::env_create(&handle); -#ifdef LMDBXX_DEBUG - assert(handle != nullptr); -#endif - if (flags) { - try { - lmdb::env_set_flags(handle, flags); - } catch (const lmdb::error&) { - lmdb::env_close(handle); - throw; - } - } - return env{handle}; - } - - /** - * Constructor. - * - * @param handle a valid `MDB_env*` handle - */ - env(MDB_env* const handle) noexcept : _handle{handle} { - } - - /** - * Move constructor. - */ - env(env&& other) noexcept { - std::swap(_handle, other._handle); - } - - /** - * Move assignment operator. - */ - env& operator=(env&& other) noexcept { - if (this != &other) { - std::swap(_handle, other._handle); - } - return *this; - } - - /** - * Destructor. - */ - ~env() noexcept { - try { - close(); - } catch (...) { - } - } - - /** - * Returns the underlying `MDB_env*` handle. - */ - operator MDB_env*() const noexcept { - return _handle; - } - - /** - * Returns the underlying `MDB_env*` handle. - */ - MDB_env* handle() const noexcept { - return _handle; - } - - /** - * Flushes data buffers to disk. - * - * @param force - * @throws lmdb::error on failure - */ - void sync(const bool force = true) { - lmdb::env_sync(handle(), force); - } - - /** - * Closes this environment, releasing the memory map. - * - * @note this method is idempotent - * @post `handle() == nullptr` - */ - void close() noexcept { - if (handle()) { - lmdb::env_close(handle()); - _handle = nullptr; - } - } - - /** - * Opens this environment. - * - * @param path - * @param flags - * @param mode - * @throws lmdb::error on failure - */ - env& open(const char* const path, const unsigned int flags = default_flags, - const mode mode = default_mode) { - lmdb::env_open(handle(), path, flags, mode); - return *this; - } - - /** - * @param flags - * @param onoff - * @throws lmdb::error on failure - */ - env& set_flags(const unsigned int flags, const bool onoff = true) { - lmdb::env_set_flags(handle(), flags, onoff); - return *this; - } - - /** - * @param size - * @throws lmdb::error on failure - */ - env& set_mapsize(const std::size_t size) { - lmdb::env_set_mapsize(handle(), size); - return *this; - } - - /** - * @param count - * @throws lmdb::error on failure - */ - env& set_max_readers(const unsigned int count) { - lmdb::env_set_max_readers(handle(), count); - return *this; - } - - /** - * @param count - * @throws lmdb::error on failure - */ - env& set_max_dbs(const MDB_dbi count) { - lmdb::env_set_max_dbs(handle(), count); - return *this; - } -}; - -//////////////////////////////////////////////////////////////////////////////// -/* Resource Interface: Transactions */ - -namespace lmdb { -class txn; -} - -/** - * Resource class for `MDB_txn*` handles. - * - * @note Instances of this class are movable, but not copyable. - * @see http://symas.com/mdb/doc/group__internal.html#structMDB__txn - */ -class lmdb::txn { -protected: - MDB_txn* _handle{nullptr}; - -public: - static constexpr unsigned int default_flags = 0; - - /** - * Creates a new LMDB transaction. - * - * @param env the environment handle - * @param parent - * @param flags - * @throws lmdb::error on failure - */ - static txn begin(MDB_env* const env, MDB_txn* const parent = nullptr, - const unsigned int flags = default_flags) { - MDB_txn* handle{nullptr}; - lmdb::txn_begin(env, parent, flags, &handle); -#ifdef LMDBXX_DEBUG - assert(handle != nullptr); -#endif - return txn{handle}; - } - - /** - * Constructor. - * - * @param handle a valid `MDB_txn*` handle - */ - txn(MDB_txn* const handle) noexcept : _handle{handle} { - } - - /** - * Move constructor. - */ - txn(txn&& other) noexcept { - std::swap(_handle, other._handle); - } - - /** - * Move assignment operator. - */ - txn& operator=(txn&& other) noexcept { - if (this != &other) { - std::swap(_handle, other._handle); - } - return *this; - } - - /** - * Destructor. - */ - ~txn() noexcept { - if (_handle) { - try { - abort(); - } catch (...) { - } - _handle = nullptr; - } - } - - /** - * Returns the underlying `MDB_txn*` handle. - */ - operator MDB_txn*() const noexcept { - return _handle; - } - - /** - * Returns the underlying `MDB_txn*` handle. - */ - MDB_txn* handle() const noexcept { - return _handle; - } - - /** - * Returns the transaction's `MDB_env*` handle. - */ - MDB_env* env() const noexcept { - return lmdb::txn_env(handle()); - } - - /** - * Commits this transaction. - * - * @throws lmdb::error on failure - * @post `handle() == nullptr` - */ - void commit() { - lmdb::txn_commit(_handle); - _handle = nullptr; - } - - /** - * Aborts this transaction. - * - * @post `handle() == nullptr` - */ - void abort() noexcept { - lmdb::txn_abort(_handle); - _handle = nullptr; - } - - /** - * Resets this read-only transaction. - */ - void Reset() noexcept { - lmdb::txn_reset(_handle); - } - - /** - * Renews this read-only transaction. - * - * @throws lmdb::error on failure - */ - void renew() { - lmdb::txn_renew(_handle); - } -}; - -//////////////////////////////////////////////////////////////////////////////// -/* Resource Interface: Databases */ - -namespace lmdb { -class dbi; -} - -/** - * Resource class for `MDB_dbi` handles. - * - * @note Instances of this class are movable, but not copyable. - * @see - * http://symas.com/mdb/doc/group__mdb.html#gadbe68a06c448dfb62da16443d251a78b - */ -class lmdb::dbi { -protected: - MDB_dbi _handle{0}; - -public: - static constexpr unsigned int default_flags = 0; - static constexpr unsigned int default_put_flags = 0; - - /** - * Opens a database handle. - * - * @param txn the transaction handle - * @param name - * @param flags - * @throws lmdb::error on failure - */ - static dbi open(MDB_txn* const txn, const char* const name = nullptr, - const unsigned int flags = default_flags) { - MDB_dbi handle{}; - lmdb::dbi_open(txn, name, flags, &handle); - return dbi{handle}; - } - - /** - * Constructor. - * - * @param handle a valid `MDB_dbi` handle - */ - dbi(const MDB_dbi handle) noexcept : _handle{handle} { - } - - /** - * Move constructor. - */ - dbi(dbi&& other) noexcept { - std::swap(_handle, other._handle); - } - - /** - * Move assignment operator. - */ - dbi& operator=(dbi&& other) noexcept { - if (this != &other) { - std::swap(_handle, other._handle); - } - return *this; - } - - /** - * Destructor. - */ - ~dbi() noexcept { - if (_handle) { - /* No need to call close() here. */ - } - } - - /** - * Returns the underlying `MDB_dbi` handle. - */ - operator MDB_dbi() const noexcept { - return _handle; - } - - /** - * Returns the underlying `MDB_dbi` handle. - */ - MDB_dbi handle() const noexcept { - return _handle; - } - - /** - * Returns statistics for this database. - * - * @param txn a transaction handle - * @throws lmdb::error on failure - */ - MDB_stat stat(MDB_txn* const txn) const { - MDB_stat result; - lmdb::dbi_stat(txn, handle(), &result); - return result; - } - - /** - * Retrieves the flags for this database handle. - * - * @param txn a transaction handle - * @throws lmdb::error on failure - */ - unsigned int flags(MDB_txn* const txn) const { - unsigned int result{}; - lmdb::dbi_flags(txn, handle(), &result); - return result; - } - - /** - * Returns the number of records in this database. - * - * @param txn a transaction handle - * @throws lmdb::error on failure - */ - std::size_t size(MDB_txn* const txn) const { - return stat(txn).ms_entries; - } - - /** - * @param txn a transaction handle - * @param del - * @throws lmdb::error on failure - */ - void drop(MDB_txn* const txn, const bool del = false) { - lmdb::dbi_drop(txn, handle(), del); - } - - /** - * Sets a custom key comparison function for this database. - * - * @param txn a transaction handle - * @param cmp the comparison function - * @throws lmdb::error on failure - */ - dbi& set_compare(MDB_txn* const txn, MDB_cmp_func* const cmp = nullptr) { - lmdb::dbi_set_compare(txn, handle(), cmp); - return *this; - } - - /** - * Retrieves a key/value pair from this database. - * - * @param txn a transaction handle - * @param key - * @param data - * @throws lmdb::error on failure - */ - bool get(MDB_txn* const txn, const val& key, val& data) { - return lmdb::dbi_get(txn, handle(), key, data); - } - - /** - * Retrieves a key from this database. - * - * @param txn a transaction handle - * @param key - * @throws lmdb::error on failure - */ - template - bool get(MDB_txn* const txn, const K& key) const { - const lmdb::val k{&key, sizeof(K)}; - lmdb::val v{}; - return lmdb::dbi_get(txn, handle(), k, v); - } - - /** - * Retrieves a key/value pair from this database. - * - * @param txn a transaction handle - * @param key - * @param val - * @throws lmdb::error on failure - */ - template - bool get(MDB_txn* const txn, const K& key, V& val) const { - const lmdb::val k{&key, sizeof(K)}; - lmdb::val v{}; - const bool result = lmdb::dbi_get(txn, handle(), k, v); - if (result) { - val = *v.data(); - } - return result; - } - - /** - * Retrieves a key/value pair from this database. - * - * @param txn a transaction handle - * @param key a NUL-terminated string key - * @param val - * @throws lmdb::error on failure - */ - template - bool get(MDB_txn* const txn, const char* const key, V& val) const { - const lmdb::val k{key, std::strlen(key)}; - lmdb::val v{}; - const bool result = lmdb::dbi_get(txn, handle(), k, v); - if (result) { - val = *v.data(); - } - return result; - } - - /** - * Stores a key/value pair into this database. - * - * @param txn a transaction handle - * @param key - * @param data - * @param flags - * @throws lmdb::error on failure - */ - bool put(MDB_txn* const txn, const val& key, val& data, - const unsigned int flags = default_put_flags) { - return lmdb::dbi_put(txn, handle(), key, data, flags); - } - - /** - * Stores a key into this database. - * - * @param txn a transaction handle - * @param key - * @param flags - * @throws lmdb::error on failure - */ - template - bool put(MDB_txn* const txn, const K& key, const unsigned int flags = default_put_flags) { - const lmdb::val k{&key, sizeof(K)}; - lmdb::val v{}; - return lmdb::dbi_put(txn, handle(), k, v, flags); - } - - /** - * Stores a key/value pair into this database. - * - * @param txn a transaction handle - * @param key - * @param val - * @param flags - * @throws lmdb::error on failure - */ - template - bool put(MDB_txn* const txn, const K& key, const V& val, - const unsigned int flags = default_put_flags) { - const lmdb::val k{&key, sizeof(K)}; - lmdb::val v{&val, sizeof(V)}; - return lmdb::dbi_put(txn, handle(), k, v, flags); - } - - /** - * Stores a key/value pair into this database. - * - * @param txn a transaction handle - * @param key a NUL-terminated string key - * @param val - * @param flags - * @throws lmdb::error on failure - */ - template - bool put(MDB_txn* const txn, const char* const key, const V& val, - const unsigned int flags = default_put_flags) { - const lmdb::val k{key, std::strlen(key)}; - lmdb::val v{&val, sizeof(V)}; - return lmdb::dbi_put(txn, handle(), k, v, flags); - } - - /** - * Stores a key/value pair into this database. - * - * @param txn a transaction handle - * @param key a NUL-terminated string key - * @param val a NUL-terminated string key - * @param flags - * @throws lmdb::error on failure - */ - bool put(MDB_txn* const txn, const char* const key, const char* const val, - const unsigned int flags = default_put_flags) { - const lmdb::val k{key, std::strlen(key)}; - lmdb::val v{val, std::strlen(val)}; - return lmdb::dbi_put(txn, handle(), k, v, flags); - } - - /** - * Removes a key/value pair from this database. - * - * @param txn a transaction handle - * @param key - * @throws lmdb::error on failure - */ - bool del(MDB_txn* const txn, const val& key) { - return lmdb::dbi_del(txn, handle(), key); - } - - /** - * Removes a key/value pair from this database. - * - * @param txn a transaction handle - * @param key - * @throws lmdb::error on failure - */ - template - bool del(MDB_txn* const txn, const K& key) { - const lmdb::val k{&key, sizeof(K)}; - return lmdb::dbi_del(txn, handle(), k); - } -}; - -//////////////////////////////////////////////////////////////////////////////// -/* Resource Interface: Cursors */ - -namespace lmdb { -class cursor; -} - -/** - * Resource class for `MDB_cursor*` handles. - * - * @note Instances of this class are movable, but not copyable. - * @see http://symas.com/mdb/doc/group__internal.html#structMDB__cursor - */ -class lmdb::cursor { -protected: - MDB_cursor* _handle{nullptr}; - -public: - static constexpr unsigned int default_flags = 0; - - /** - * Creates an LMDB cursor. - * - * @param txn the transaction handle - * @param dbi the database handle - * @throws lmdb::error on failure - */ - static cursor open(MDB_txn* const txn, const MDB_dbi dbi) { - MDB_cursor* handle{}; - lmdb::cursor_open(txn, dbi, &handle); -#ifdef LMDBXX_DEBUG - assert(handle != nullptr); -#endif - return cursor{handle}; - } - - /** - * Constructor. - * - * @param handle a valid `MDB_cursor*` handle - */ - cursor(MDB_cursor* const handle) noexcept : _handle{handle} { - } - - /** - * Move constructor. - */ - cursor(cursor&& other) noexcept { - std::swap(_handle, other._handle); - } - - /** - * Move assignment operator. - */ - cursor& operator=(cursor&& other) noexcept { - if (this != &other) { - std::swap(_handle, other._handle); - } - return *this; - } - - /** - * Destructor. - */ - ~cursor() noexcept { - try { - close(); - } catch (...) { - } - } - - /** - * Returns the underlying `MDB_cursor*` handle. - */ - operator MDB_cursor*() const noexcept { - return _handle; - } - - /** - * Returns the underlying `MDB_cursor*` handle. - */ - MDB_cursor* handle() const noexcept { - return _handle; - } - - /** - * Closes this cursor. - * - * @note this method is idempotent - * @post `handle() == nullptr` - */ - void close() noexcept { - if (_handle) { - lmdb::cursor_close(_handle); - _handle = nullptr; - } - } - - /** - * Renews this cursor. - * - * @param txn the transaction scope - * @throws lmdb::error on failure - */ - void renew(MDB_txn* const txn) { - lmdb::cursor_renew(txn, handle()); - } - - /** - * Returns the cursor's transaction handle. - */ - MDB_txn* txn() const noexcept { - return lmdb::cursor_txn(handle()); - } - - /** - * Returns the cursor's database handle. - */ - MDB_dbi dbi() const noexcept { - return lmdb::cursor_dbi(handle()); - } - - /** - * Retrieves a key from the database. - * - * @param key - * @param op - * @throws lmdb::error on failure - */ - bool get(MDB_val* const key, const MDB_cursor_op op) { - return get(key, nullptr, op); - } - - /** - * Retrieves a key from the database. - * - * @param key - * @param op - * @throws lmdb::error on failure - */ - bool get(lmdb::val& key, const MDB_cursor_op op) { - return get(key, nullptr, op); - } - - /** - * Retrieves a key/value pair from the database. - * - * @param key - * @param val (may be `nullptr`) - * @param op - * @throws lmdb::error on failure - */ - bool get(MDB_val* const key, MDB_val* const val, const MDB_cursor_op op) { - return lmdb::cursor_get(handle(), key, val, op); - } - - /** - * Retrieves a key/value pair from the database. - * - * @param key - * @param val - * @param op - * @throws lmdb::error on failure - */ - bool get(lmdb::val& key, lmdb::val& val, const MDB_cursor_op op) { - return lmdb::cursor_get(handle(), key, val, op); - } - - /** - * Retrieves a key/value pair from the database. - * - * @param key - * @param val - * @param op - * @throws lmdb::error on failure - */ - bool get(std::string& key, std::string& val, const MDB_cursor_op op) { - lmdb::val k{}, v{}; - const bool found = get(k, v, op); - if (found) { - key.assign(k.data(), k.size()); - val.assign(v.data(), v.size()); - } - return found; - } - - /** - * Positions this cursor at the given key. - * - * @param key - * @param op - * @throws lmdb::error on failure - */ - template - bool find(const K& key, const MDB_cursor_op op = MDB_SET) { - lmdb::val k{&key, sizeof(K)}; - return get(k, nullptr, op); - } -}; - -//////////////////////////////////////////////////////////////////////////////// - -#endif /* LMDBXX_H */ diff --git a/include/leanstore/buffer-manager/Swip.hpp b/include/leanstore/buffer-manager/Swip.hpp index e8eb5765..8d144da7 100644 --- a/include/leanstore/buffer-manager/Swip.hpp +++ b/include/leanstore/buffer-manager/Swip.hpp @@ -27,7 +27,7 @@ class Swip { }; //! Create an empty swip. - Swip() : mPageId(0) {}; + Swip() : mPageId(0){}; //! Create an swip pointing to the buffer frame. Swip(BufferFrame* bf) : mBf(bf) { From 3c9e1a43d9c889dcc51898cbab560086340b06f5 Mon Sep 17 00:00:00 2001 From: Jian Zhang Date: Fri, 23 Aug 2024 14:13:31 +0800 Subject: [PATCH 4/4] chore: add codecov config --- benchmarks/ycsb/YcsbFlags.cpp | 2 +- codecov.yml | 25 +++ include/leanstore-c/StoreOption.h | 268 +++++++++++++++--------------- src/leanstore-c/StoreOption.cpp | 4 +- 4 files changed, 162 insertions(+), 137 deletions(-) create mode 100644 codecov.yml diff --git a/benchmarks/ycsb/YcsbFlags.cpp b/benchmarks/ycsb/YcsbFlags.cpp index e6fc0ddc..582529b3 100644 --- a/benchmarks/ycsb/YcsbFlags.cpp +++ b/benchmarks/ycsb/YcsbFlags.cpp @@ -5,7 +5,7 @@ DEFINE_string(ycsb_target, "leanstore", "Ycsb target, available: unordered_map, leanstore, rocksdb, leveldb"); DEFINE_string(ycsb_cmd, "run", "Ycsb command, available: run, load"); DEFINE_string(ycsb_workload, "a", "Ycsb workload, available: a, b, c, d, e, f"); -DEFINE_uint32(ycsb_threads, 4, "WorkerContext threads"); +DEFINE_uint32(ycsb_threads, 4, "Worker threads"); DEFINE_uint64(ycsb_mem_kb, 1, "Max memory in KB to use"); DEFINE_uint64(ycsb_run_for_seconds, 300, "Run the benchmark for x seconds"); diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..6eda0727 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,25 @@ +coverage: + round: down + range: 30..100 + precision: 2 + status: + patch: + default: + target: 60% + threshold: 1% + base: auto + project: + default: + target: 57% + threshold: 1% + base: auto + +ignore: + - "benchmarks" + - "build" + - "dist" + - "docker" + - "docs" + - "examples" + - "scripts" + - "tests" diff --git a/include/leanstore-c/StoreOption.h b/include/leanstore-c/StoreOption.h index db6b5685..d625983c 100644 --- a/include/leanstore-c/StoreOption.h +++ b/include/leanstore-c/StoreOption.h @@ -8,194 +8,194 @@ extern "C" { #endif -//! The log level -typedef enum LogLevel { - kDebug = 0, - kInfo, - kWarn, - kError, -} LogLevel; + //! The log level + typedef enum LogLevel { + kDebug = 0, + kInfo, + kWarn, + kError, + } LogLevel; -//! The options for creating a new store. -typedef struct StoreOption { - // --------------------------------------------------------------------------- - // Store related options - // --------------------------------------------------------------------------- + //! The options for creating a new store. + typedef struct StoreOption { + // --------------------------------------------------------------------------- + // Store related options + // --------------------------------------------------------------------------- - //! Whether to create store from scratch. - bool mCreateFromScratch; + //! Whether to create store from scratch. + bool mCreateFromScratch; - //! The directory for all the database files. - const char* mStoreDir; + //! The directory for all the database files. + const char* mStoreDir; - // --------------------------------------------------------------------------- - // log related options - // --------------------------------------------------------------------------- + // --------------------------------------------------------------------------- + // log related options + // --------------------------------------------------------------------------- - //! The log level - LogLevel mLogLevel; + //! The log level + LogLevel mLogLevel; - // --------------------------------------------------------------------------- - // WorkerContext thread related options - // --------------------------------------------------------------------------- + // --------------------------------------------------------------------------- + // Worker thread related options + // --------------------------------------------------------------------------- - //! The number of worker threads. - uint64_t mWorkerThreads; + //! The number of worker threads. + uint64_t mWorkerThreads; - //! The WAL buffer size for each worker (bytes). - uint64_t mWalBufferSize; + //! The WAL buffer size for each worker (bytes). + uint64_t mWalBufferSize; - // --------------------------------------------------------------------------- - // Buffer pool related options - // --------------------------------------------------------------------------- + // --------------------------------------------------------------------------- + // Buffer pool related options + // --------------------------------------------------------------------------- - //! The page size (bytes). For buffer manager. - uint64_t mPageSize; + //! The page size (bytes). For buffer manager. + uint64_t mPageSize; - uint64_t mBufferFrameSize; + uint64_t mBufferFrameSize; - //! The number of partitions. For buffer manager. - uint32_t mNumPartitions; + //! The number of partitions. For buffer manager. + uint32_t mNumPartitions; - //! The buffer pool size (bytes). For buffer manager. - uint64_t mBufferPoolSize; + //! The buffer pool size (bytes). For buffer manager. + uint64_t mBufferPoolSize; - //! The free percentage of the buffer pool. In the range of [0, 100]. - uint32_t mFreePct; + //! The free percentage of the buffer pool. In the range of [0, 100]. + uint32_t mFreePct; - //! The number of page evictor threads. - uint32_t mNumBufferProviders; + //! The number of page evictor threads. + uint32_t mNumBufferProviders; - //! The async buffer - uint32_t mBufferWriteBatchSize; + //! The async buffer + uint32_t mBufferWriteBatchSize; - //! Whether to perform crc check for buffer frames. - bool mEnableBufferCrcCheck; + //! Whether to perform crc check for buffer frames. + bool mEnableBufferCrcCheck; - //! BufferFrame recycle batch size. Everytime a batch of buffer frames is - //! randomly picked and verified by page evictors, some of them are COOLed, - //! some of them are EVICted. - uint64_t mBufferFrameRecycleBatchSize; + //! BufferFrame recycle batch size. Everytime a batch of buffer frames is + //! randomly picked and verified by page evictors, some of them are COOLed, + //! some of them are EVICted. + uint64_t mBufferFrameRecycleBatchSize; - //! Whether to reclaim unused free page ids - bool mEnableReclaimPageIds; + //! Whether to reclaim unused free page ids + bool mEnableReclaimPageIds; - // --------------------------------------------------------------------------- - // Logging and recovery related options - // --------------------------------------------------------------------------- + // --------------------------------------------------------------------------- + // Logging and recovery related options + // --------------------------------------------------------------------------- - //! Whether to enable write-ahead log. - bool mEnableWal; + //! Whether to enable write-ahead log. + bool mEnableWal; - //! Whether to execute fsync after each WAL write. - bool mEnableWalFsync; + //! Whether to execute fsync after each WAL write. + bool mEnableWalFsync; - // --------------------------------------------------------------------------- - // Generic BTree related options - // --------------------------------------------------------------------------- + // --------------------------------------------------------------------------- + // Generic BTree related options + // --------------------------------------------------------------------------- - //! Whether to enable bulk insert. - bool mEnableBulkInsert; + //! Whether to enable bulk insert. + bool mEnableBulkInsert; - //! Whether to enable X-Merge - bool mEnableXMerge; + //! Whether to enable X-Merge + bool mEnableXMerge; - //! The number of children to merge in X-Merge - uint64_t mXMergeK; + //! The number of children to merge in X-Merge + uint64_t mXMergeK; - //! The target percentage of the number of children to merge in X-Merge - double mXMergeTargetPct; + //! The target percentage of the number of children to merge in X-Merge + double mXMergeTargetPct; - //! Whether to enable contention split. - bool mEnableContentionSplit; + //! Whether to enable contention split. + bool mEnableContentionSplit; - //! Contention split probability, as exponent of 2 - uint64_t mContentionSplitProbility; + //! Contention split probability, as exponent of 2 + uint64_t mContentionSplitProbility; - //! Contention stats sample probability, as exponent of 2 - uint64_t mContentionSplitSampleProbability; + //! Contention stats sample probability, as exponent of 2 + uint64_t mContentionSplitSampleProbability; - //! Contention percentage to trigger the split, in the range of [0, 100]. - uint64_t mContentionSplitThresholdPct; + //! Contention percentage to trigger the split, in the range of [0, 100]. + uint64_t mContentionSplitThresholdPct; - //! Whether to enable btree hints optimization. Available options: - //! 0: disabled - //! 1: serial - //! 2: AVX512 - int64_t mBTreeHints; + //! Whether to enable btree hints optimization. Available options: + //! 0: disabled + //! 1: serial + //! 2: AVX512 + int64_t mBTreeHints; - //! Whether to enable heads optimization in LowerBound search. - bool mEnableHeadOptimization; + //! Whether to enable heads optimization in LowerBound search. + bool mEnableHeadOptimization; - //! Whether to enable optimistic scan. Jump to next leaf directly if the - //! pointer in the parent has not changed - bool mEnableOptimisticScan; + //! Whether to enable optimistic scan. Jump to next leaf directly if the + //! pointer in the parent has not changed + bool mEnableOptimisticScan; - // --------------------------------------------------------------------------- - // Transaction related options - // --------------------------------------------------------------------------- + // --------------------------------------------------------------------------- + // Transaction related options + // --------------------------------------------------------------------------- - //! Whether to enable long running transaction. - bool mEnableLongRunningTx; + //! Whether to enable long running transaction. + bool mEnableLongRunningTx; - //! Whether to enable fat tuple. - bool mEnableFatTuple; + //! Whether to enable fat tuple. + bool mEnableFatTuple; - //! Whether to enable garbage collection. - bool mEnableGc; + //! Whether to enable garbage collection. + bool mEnableGc; - //! Whether to enable eager garbage collection. To enable eager garbage - //! collection, the garbage collection must be enabled first. Once enabled, - //! the garbage collection will be triggered after each transaction commit and - //! abort. - bool mEnableEagerGc; + //! Whether to enable eager garbage collection. To enable eager garbage + //! collection, the garbage collection must be enabled first. Once enabled, + //! the garbage collection will be triggered after each transaction commit and + //! abort. + bool mEnableEagerGc; - // --------------------------------------------------------------------------- - // Metrics related options - // --------------------------------------------------------------------------- + // --------------------------------------------------------------------------- + // Metrics related options + // --------------------------------------------------------------------------- - //! Whether to enable metrics. - bool mEnableMetrics; + //! Whether to enable metrics. + bool mEnableMetrics; - //! The metrics port. - int32_t mMetricsPort; + //! The metrics port. + int32_t mMetricsPort; - //! Whether to enable cpu counters. - bool mEnableCpuCounters; + //! Whether to enable cpu counters. + bool mEnableCpuCounters; - //! Whether to enable time measure. - bool mEnableTimeMeasure; + //! Whether to enable time measure. + bool mEnableTimeMeasure; - //! Whether to enable perf events. - bool mEnablePerfEvents; + //! Whether to enable perf events. + bool mEnablePerfEvents; -} StoreOption; + } StoreOption; -//! Create a new store option. -//! @param storeDir the directory for all the database files. The string content is deep copied to -//! the created store option. -StoreOption* CreateStoreOption(const char* storeDir); + //! Create a new store option. + //! @param storeDir the directory for all the database files. The string content is deep copied to + //! the created store option. + StoreOption* CreateStoreOption(const char* storeDir); -//! Create a new store option from an existing store option. -//! @param storeDir the existing store option. -StoreOption* CreateStoreOptionFrom(const StoreOption* storeDir); + //! Create a new store option from an existing store option. + //! @param storeDir the existing store option. + StoreOption* CreateStoreOptionFrom(const StoreOption* storeDir); -//! Destroy a store option. -//! @param option the store option to destroy. -void DestroyStoreOption(const StoreOption* option); + //! Destroy a store option. + //! @param option the store option to destroy. + void DestroyStoreOption(const StoreOption* option); -//! The options for creating a new BTree. -typedef struct BTreeConfig { - //! Whether to enable write-ahead log. - bool mEnableWal; + //! The options for creating a new BTree. + typedef struct BTreeConfig { + //! Whether to enable write-ahead log. + bool mEnableWal; - //! Whether to enable bulk insert. - bool mUseBulkInsert; + //! Whether to enable bulk insert. + bool mUseBulkInsert; -} BTreeConfig; + } BTreeConfig; #ifdef __cplusplus } #endif -#endif // LEANSTORE_STORE_OPTION_H \ No newline at end of file +#endif // LEANSTORE_STORE_OPTION_H diff --git a/src/leanstore-c/StoreOption.cpp b/src/leanstore-c/StoreOption.cpp index ce4904a8..61df1a9b 100644 --- a/src/leanstore-c/StoreOption.cpp +++ b/src/leanstore-c/StoreOption.cpp @@ -11,7 +11,7 @@ static const StoreOption kDefaultStoreOption = { // log related options .mLogLevel = LogLevel::kInfo, - // WorkerContext thread related options + // Worker thread related options .mWorkerThreads = 4, .mWalBufferSize = 10 * 1024 * 1024, @@ -96,4 +96,4 @@ void DestroyStoreOption(const StoreOption* option) { } delete option; } -} \ No newline at end of file +}