From c7d58a419127c85c20231336dd027f3632210b6a Mon Sep 17 00:00:00 2001 From: RobinTF <83676088+RobinTF@users.noreply.github.com> Date: Wed, 7 Aug 2024 23:25:34 +0200 Subject: [PATCH] Rework code (again) so that generator does not get cached --- src/engine/Operation.cpp | 104 +++---- src/engine/Operation.h | 7 +- src/engine/QueryExecutionContext.h | 31 +- src/engine/QueryExecutionTree.cpp | 7 +- src/engine/Result.cpp | 180 +++-------- src/engine/Result.h | 67 +--- src/index/CompressedRelation.cpp | 11 +- src/util/Cache.h | 58 +--- src/util/CacheableGenerator.h | 261 ++-------------- src/util/ConcurrentCache.h | 64 ++-- src/util/IteratorWrapper.h | 39 --- test/CMakeLists.txt | 2 - test/CacheTest.cpp | 184 ----------- test/CacheableGeneratorTest.cpp | 408 +------------------------ test/ConcurrentCacheTest.cpp | 42 ++- test/ExportQueryExecutionTreesTest.cpp | 55 ++-- test/IteratorWrapperTest.cpp | 52 ---- test/SparqlDataTypesTest.cpp | 17 +- 18 files changed, 247 insertions(+), 1342 deletions(-) delete mode 100644 src/util/IteratorWrapper.h delete mode 100644 test/IteratorWrapperTest.cpp diff --git a/src/engine/Operation.cpp b/src/engine/Operation.cpp index c8dab08d9e..61d9660e15 100644 --- a/src/engine/Operation.cpp +++ b/src/engine/Operation.cpp @@ -127,57 +127,33 @@ ProtoResult Operation::runComputation(ad_utility::Timer& timer, // _____________________________________________________________________________ CacheValue Operation::runComputationAndTransformToCache( ad_utility::Timer& timer, ComputationMode computationMode, - const std::string& cacheKey) { + const std::string& cacheKey, bool pinned) { auto& cache = _executionContext->getQueryTreeCache(); - CacheableResult result{runComputation(timer, computationMode), - cache.getMaxSizeSingleEntry().getBytes()}; - if (!result.isDataEvaluated()) { - result.setOnSizeChanged( - [&cache, cacheKey, runtimeInfo = getRuntimeInfoPointer()]( - std::optional duration) { - cache.recomputeSize(cacheKey); - if (duration.has_value()) { - runtimeInfo->totalTime_ += duration.value(); - } - }); - } - return CacheValue{std::move(result), runtimeInfo()}; -} - -// _____________________________________________________________________________ -Result Operation::extractFromCache( - std::shared_ptr result, bool freshlyInserted, - bool isRoot, ComputationMode computationMode) { - if (result->isDataEvaluated()) { - auto resultNumRows = result->idTable().size(); - auto resultNumCols = result->idTable().numColumns(); - LOG(DEBUG) << "Computed result of size " << resultNumRows << " x " - << resultNumCols << std::endl; - } - - if (result->isDataEvaluated()) { - return Result::createResultWithFullyEvaluatedIdTable(std::move(result)); - } - - if (freshlyInserted) { - return Result::createResultAsMasterConsumer( - std::move(result), - isRoot ? std::function{[this]() { signalQueryUpdate(); }} - : std::function{}); - } - // TODO timer does not make sense here. - ad_utility::Timer timer{ad_utility::Timer::Started}; - return Result::createResultWithFallback( - std::move(result), - [this, timer = std::move(timer), computationMode]() mutable { - return runComputation(timer, computationMode); + auto result = Result::fromProtoResult( + runComputation(timer, computationMode), + [&cache](const IdTable& idTable) { + return cache.getMaxSizeSingleEntry() >= CacheValue::getSize(idTable); }, - [this, isRoot](auto duration) { + [this, &cache, cacheKey, pinned](Result aggregatedResult) { + cache.tryInsertIfNotPresent( + pinned, cacheKey, + CacheValue{std::move(aggregatedResult), runtimeInfo()}); + }); + /* + TODO incorporate time calculations and query updates. runtimeInfo().totalTime_ += duration; if (isRoot) { signalQueryUpdate(); } - }); + */ + if (result.isDataEvaluated()) { + auto resultNumRows = result.idTable().size(); + auto resultNumCols = result.idTable().numColumns(); + LOG(DEBUG) << "Computed result of size " << resultNumRows << " x " + << resultNumCols << std::endl; + } + + return CacheValue{std::move(result), runtimeInfo()}; } // ________________________________________________________________________ @@ -212,46 +188,34 @@ std::shared_ptr Operation::getResult( updateRuntimeInformationOnFailure(timer.msecs()); } }); - bool actuallyComputed = false; - auto cacheSetup = [this, &timer, computationMode, &actuallyComputed, - &cacheKey]() { - actuallyComputed = true; - return runComputationAndTransformToCache(timer, computationMode, - cacheKey); + auto cacheSetup = [this, &timer, computationMode, &cacheKey, pinResult]() { + return runComputationAndTransformToCache(timer, computationMode, cacheKey, + pinResult); }; - using ad_utility::CachePolicy; + auto suitedForCache = [](const CacheValue& cacheValue) { + return cacheValue.resultTable().isDataEvaluated(); + }; - CachePolicy cachePolicy = computationMode == ComputationMode::ONLY_IF_CACHED - ? CachePolicy::neverCompute - : CachePolicy::computeOnDemand; + bool onlyReadFromCache = computationMode == ComputationMode::ONLY_IF_CACHED; auto result = - pinResult ? cache.computeOncePinned(cacheKey, cacheSetup, cachePolicy) - : cache.computeOnce(cacheKey, cacheSetup, cachePolicy); + pinResult ? cache.computeOncePinned(cacheKey, cacheSetup, + onlyReadFromCache, suitedForCache) + : cache.computeOnce(cacheKey, cacheSetup, onlyReadFromCache, + suitedForCache); if (result._resultPointer == nullptr) { - AD_CORRECTNESS_CHECK(cachePolicy == CachePolicy::neverCompute); + AD_CORRECTNESS_CHECK(onlyReadFromCache); return nullptr; } - if (!result._resultPointer->resultTable().isDataEvaluated() && - computationMode == ComputationMode::FULLY_MATERIALIZED) { - AD_CORRECTNESS_CHECK(!actuallyComputed); - result = pinResult ? cache.computeOncePinned(cacheKey, cacheSetup, - CachePolicy::alwaysCompute) - : cache.computeOnce(cacheKey, cacheSetup, - CachePolicy::alwaysCompute); - } - updateRuntimeInformationOnSuccess( result, result._resultPointer->resultTable().isDataEvaluated() ? timer.msecs() : result._resultPointer->runtimeInfo().totalTime_); - return std::make_shared( - extractFromCache(result._resultPointer->resultTablePtr(), - actuallyComputed, isRoot, computationMode)); + return result._resultPointer->resultTablePtr(); } catch (ad_utility::CancellationException& e) { e.setOperation(getDescriptor()); runtimeInfo().status_ = RuntimeInformation::Status::cancelled; diff --git a/src/engine/Operation.h b/src/engine/Operation.h index 242355831e..2eb70d647e 100644 --- a/src/engine/Operation.h +++ b/src/engine/Operation.h @@ -265,11 +265,8 @@ class Operation { CacheValue runComputationAndTransformToCache(ad_utility::Timer& timer, ComputationMode computationMode, - const std::string& cacheKey); - - Result extractFromCache(std::shared_ptr result, - bool freshlyInserted, bool isRoot, - ComputationMode computationMode); + const std::string& cacheKey, + bool pinned); // Create and store the complete runtime information for this operation after // it has either been successfully computed or read from the cache. diff --git a/src/engine/QueryExecutionContext.h b/src/engine/QueryExecutionContext.h index b7b54af6bf..0c17ea684b 100644 --- a/src/engine/QueryExecutionContext.h +++ b/src/engine/QueryExecutionContext.h @@ -22,13 +22,12 @@ class CacheValue { private: - std::shared_ptr resultTable_; + std::shared_ptr result_; RuntimeInformation runtimeInfo_; public: - explicit CacheValue(CacheableResult resultTable, - RuntimeInformation runtimeInfo) - : resultTable_{std::make_shared(std::move(resultTable))}, + explicit CacheValue(Result result, RuntimeInformation runtimeInfo) + : result_{std::make_shared(std::move(result))}, runtimeInfo_{std::move(runtimeInfo)} {} CacheValue(CacheValue&&) = default; @@ -36,34 +35,26 @@ class CacheValue { CacheValue& operator=(CacheValue&&) = default; CacheValue& operator=(const CacheValue&) = delete; - const CacheableResult& resultTable() const noexcept { return *resultTable_; } + const Result& resultTable() const noexcept { return *result_; } - std::shared_ptr resultTablePtr() const noexcept { - return resultTable_; + std::shared_ptr resultTablePtr() const noexcept { + return result_; } const RuntimeInformation& runtimeInfo() const noexcept { return runtimeInfo_; } - ~CacheValue() { - if (resultTable_ && !resultTable_->isDataEvaluated()) { - // Clear listeners - try { - resultTable_->setOnSizeChanged({}); - } catch (...) { - // Should never happen. The listeners only throw assertion errors - // if the result is evaluated. - std::exit(1); - } - } + static ad_utility::MemorySize getSize(const IdTable& idTable) { + return ad_utility::MemorySize::bytes(idTable.size() * idTable.numColumns() * + sizeof(Id)); } // Calculates the `MemorySize` taken up by an instance of `CacheValue`. struct SizeGetter { ad_utility::MemorySize operator()(const CacheValue& cacheValue) const { - if (const auto& tablePtr = cacheValue.resultTable_; tablePtr) { - return tablePtr->getCurrentSize(); + if (const auto& resultPtr = cacheValue.result_; resultPtr) { + return getSize(resultPtr->idTable()); } else { return 0_B; } diff --git a/src/engine/QueryExecutionTree.cpp b/src/engine/QueryExecutionTree.cpp index dc018c8430..12392aeb6e 100644 --- a/src/engine/QueryExecutionTree.cpp +++ b/src/engine/QueryExecutionTree.cpp @@ -119,12 +119,7 @@ void QueryExecutionTree::readFromCache() { auto& cache = qec_->getQueryTreeCache(); auto res = cache.getIfContained(getCacheKey()); if (res.has_value()) { - auto resultTable = res->_resultPointer->resultTablePtr(); - if (resultTable->isDataEvaluated()) { - cachedResult_ = std::make_shared( - Result::createResultWithFullyEvaluatedIdTable( - std::move(resultTable))); - } + cachedResult_ = res->_resultPointer->resultTablePtr(); } } diff --git a/src/engine/Result.cpp b/src/engine/Result.cpp index fe70f4479a..f530944f67 100644 --- a/src/engine/Result.cpp +++ b/src/engine/Result.cpp @@ -7,8 +7,8 @@ #include "engine/Result.h" #include "engine/LocalVocab.h" +#include "util/CacheableGenerator.h" #include "util/Exception.h" -#include "util/IteratorWrapper.h" #include "util/Log.h" #include "util/Timer.h" @@ -209,69 +209,61 @@ const IdTable& ProtoResult::idTable() const { return storage_.idTable(); } bool ProtoResult::isDataEvaluated() const noexcept { return storage_.isDataEvaluated(); } -// _____________________________________________________________________________ -CacheableResult::CacheableResult(ProtoResult protoResult, - uint64_t maxElementSize) - : storage_{StorageType{ - protoResult.isDataEvaluated() - ? decltype(StorageType::data_){std::move( - protoResult.storage_.idTable())} - : decltype(StorageType::data_){ad_utility::CacheableGenerator< - IdTable, SizeCalculator>{ - std::move(protoResult.storage_.idTables())}}, - std::move(protoResult.storage_.sortedBy_), - std::move(protoResult.storage_.localVocab_), - }} { - if (!storage_.isDataEvaluated()) { - storage_.idTables().setMaxSize(maxElementSize); - } -} - -// _____________________________________________________________________________ -void CacheableResult::setOnSizeChanged( - std::function duration)> - onSizeChanged) { - storage_.idTables().setOnSizeChanged(std::move(onSizeChanged)); -} - -// _____________________________________________________________________________ -const IdTable& CacheableResult::idTable() const { return storage_.idTable(); } - -// _____________________________________________________________________________ -const ad_utility::CacheableGenerator& -CacheableResult::idTables() const { - return storage_.idTables(); -} // _____________________________________________________________________________ -bool CacheableResult::isDataEvaluated() const noexcept { - return storage_.isDataEvaluated(); -} - -// _____________________________________________________________________________ -ad_utility::MemorySize CacheableResult::getCurrentSize() const { - return ad_utility::MemorySize::bytes( - storage_.isDataEvaluated() ? SizeCalculator{}(storage_.idTable()) - : storage_.idTables().getCurrentSize()); -} - -// _____________________________________________________________________________ -Result::Result(std::shared_ptr idTable, - std::vector sortedBy, LocalVocabPtr localVocab) +Result::Result(IdTable idTable, std::vector sortedBy, + LocalVocabPtr localVocab) : storage_{StorageType{std::move(idTable), std::move(sortedBy), std::move(localVocab)}} {} // _____________________________________________________________________________ -Result::Result(cppcoro::generator idTables, +Result::Result(cppcoro::generator idTables, std::vector sortedBy, LocalVocabPtr localVocab) : storage_{StorageType{std::move(idTables), std::move(sortedBy), std::move(localVocab)}} {} // _____________________________________________________________________________ -const IdTable& Result::idTable() const { return *storage_.idTable(); } +Result Result::fromProtoResult(ProtoResult protoResult, + std::function fitsInCache, + std::function storeInCache) { + if (protoResult.isDataEvaluated()) { + return Result{std::move(protoResult.storage_.idTable()), + std::move(protoResult.storage_.sortedBy_), + std::move(protoResult.storage_.localVocab_)}; + } + auto sortedByCopy = protoResult.storage_.sortedBy_; + auto localVocabReference = protoResult.storage_.localVocab_; + return Result{ + ad_utility::wrapGeneratorWithCache( + std::move(protoResult.storage_.idTables()), + [fitsInCache = std::move(fitsInCache)]( + std::optional& aggregate, const IdTable& newTable) { + if (aggregate.has_value()) { + aggregate.value().insertAtEnd(newTable); + } else { + aggregate.emplace(newTable.clone()); + } + return fitsInCache(aggregate.value()); + }, + [storeInCache = std::move(storeInCache), + sortedByCopy = std::move(sortedByCopy), + localVocabReference = std::move(localVocabReference)]( + std::optional idTable) mutable { + if (idTable.has_value()) { + storeInCache(Result{std::move(idTable).value(), + std::move(sortedByCopy), + std::move(localVocabReference)}); + } + }), + std::move(protoResult.storage_.sortedBy_), + std::move(protoResult.storage_.localVocab_)}; +} + +// _____________________________________________________________________________ +const IdTable& Result::idTable() const { return storage_.idTable(); } // _____________________________________________________________________________ -cppcoro::generator& Result::idTables() const { +cppcoro::generator& Result::idTables() const { return storage_.idTables(); } @@ -312,89 +304,3 @@ string Result::asDebugString() const { } return std::move(os).str(); } - -// _____________________________________________________________________________ -Result Result::createResultWithFullyEvaluatedIdTable( - std::shared_ptr cacheableResult) { - AD_CONTRACT_CHECK(cacheableResult->isDataEvaluated()); - auto sortedBy = cacheableResult->storage_.sortedBy_; - auto localVocab = cacheableResult->storage_.localVocab_; - const IdTable* tablePointer = &cacheableResult->idTable(); - return Result{ - std::shared_ptr{std::move(cacheableResult), tablePointer}, - std::move(sortedBy), std::move(localVocab)}; -} - -// _____________________________________________________________________________ -Result Result::createResultWithFallback( - std::shared_ptr original, - std::function fallback, - std::function onIteration) { - AD_CONTRACT_CHECK(!original->isDataEvaluated()); - auto generator = [](std::shared_ptr sharedResult, - std::function fallback, - auto onIteration) -> cppcoro::generator { - size_t index = 0; - try { - for (auto&& idTable : sharedResult->storage_.idTables()) { - co_yield *idTable; - index++; - } - co_return; - } catch (const ad_utility::IteratorExpired&) { - // co_yield is not allowed here, so simply ignore this and allow control - // flow to take over - } catch (...) { - throw; - } - ProtoResult freshResult = fallback(); - // If data is evaluated this means that this process is not deterministic - // or that there's a wrong callback used here. - AD_CORRECTNESS_CHECK(!freshResult.isDataEvaluated()); - auto start = std::chrono::steady_clock::now(); - for (auto&& idTable : freshResult.storage_.idTables()) { - auto stop = std::chrono::steady_clock::now(); - if (onIteration) { - onIteration(std::chrono::duration_cast( - stop - start)); - } - if (index > 0) { - index--; - continue; - } - co_yield idTable; - start = std::chrono::steady_clock::now(); - } - auto stop = std::chrono::steady_clock::now(); - if (onIteration) { - onIteration( - std::chrono::duration_cast(stop - start)); - } - }; - return Result{ - generator(original, std::move(fallback), std::move(onIteration)), - original->storage_.sortedBy_, original->storage_.localVocab_}; -} - -// _____________________________________________________________________________ -Result Result::createResultAsMasterConsumer( - std::shared_ptr original, - std::function onIteration) { - AD_CONTRACT_CHECK(!original->isDataEvaluated()); - auto generator = [](auto original, - auto onIteration) -> cppcoro::generator { - using ad_utility::IteratorWrapper; - auto& generator = original->storage_.idTables(); - for (std::shared_ptr idTable : - IteratorWrapper{generator, true}) { - if (onIteration) { - onIteration(); - } - co_yield *idTable; - } - }; - auto sortedBy = original->storage_.sortedBy_; - auto localVocab = original->storage_.localVocab_; - return Result{generator(std::move(original), std::move(onIteration)), - std::move(sortedBy), std::move(localVocab)}; -} diff --git a/src/engine/Result.h b/src/engine/Result.h index 25173c6375..a7cf837320 100644 --- a/src/engine/Result.h +++ b/src/engine/Result.h @@ -20,7 +20,6 @@ template class ResultStorage { friend class ProtoResult; - friend class CacheableResult; friend class Result; using Data = std::variant; @@ -67,7 +66,6 @@ class ResultStorage { }; class ProtoResult { - friend class CacheableResult; friend class Result; using StorageType = ResultStorage>; StorageType storage_; @@ -94,7 +92,6 @@ class ProtoResult { explicit SharedLocalVocabWrapper(LocalVocabPtr localVocab) : localVocab_{std::move(localVocab)} {} friend ProtoResult; - friend class CacheableResult; friend class Result; public: @@ -170,59 +167,21 @@ class ProtoResult { bool isDataEvaluated() const noexcept; }; -class CacheableResult { - friend class Result; - - struct SizeCalculator { - uint64_t operator()(const IdTable& idTable) const { - return idTable.size() * idTable.numColumns() * sizeof(Id); - } - }; - - using StorageType = - ResultStorage>; - StorageType storage_; - - public: - CacheableResult(const CacheableResult& other) = delete; - CacheableResult& operator=(const CacheableResult& other) = delete; - - CacheableResult(CacheableResult&& other) = default; - CacheableResult& operator=(CacheableResult&& other) = default; - - CacheableResult(ProtoResult protoResult, uint64_t maxElementSize); - - void setOnSizeChanged( - std::function)> - onSizeChanged); - - const IdTable& idTable() const; - - const ad_utility::CacheableGenerator& idTables() - const; - - bool isDataEvaluated() const noexcept; - - ad_utility::MemorySize getCurrentSize() const; -}; - // The result of an `Operation`. This is the class QLever uses for all // intermediate or final results when processing a SPARQL query. The actual data // is always a table and contained in the member `idTable()`. class Result { private: - using StorageType = ResultStorage, - cppcoro::generator>; + using StorageType = ResultStorage>; mutable StorageType storage_; using LocalVocabPtr = std::shared_ptr; using SharedLocalVocabWrapper = ProtoResult::SharedLocalVocabWrapper; - Result(std::shared_ptr idTable, - std::vector sortedBy, LocalVocabPtr localVocab); - Result(cppcoro::generator idTables, + Result(IdTable idTable, std::vector sortedBy, + LocalVocabPtr localVocab); + Result(cppcoro::generator idTables, std::vector sortedBy, LocalVocabPtr localVocab); public: @@ -234,11 +193,15 @@ class Result { Result(Result&& other) = default; Result& operator=(Result&& other) = default; + static Result fromProtoResult(ProtoResult protoResult, + std::function fitsInCache, + std::function storeInCache); + // Const access to the underlying `IdTable`. const IdTable& idTable() const; // Access to the underlying `IdTable`s. - cppcoro::generator& idTables() const; + cppcoro::generator& idTables() const; // Const access to the columns by which the `idTable()` is sorted. const std::vector& sortedBy() const { @@ -295,16 +258,4 @@ class Result { // The first rows of the result and its total size (for debugging). string asDebugString() const; - - static Result createResultWithFullyEvaluatedIdTable( - std::shared_ptr cacheableResult); - - static Result createResultWithFallback( - std::shared_ptr original, - std::function fallback, - std::function onIteration); - - static Result createResultAsMasterConsumer( - std::shared_ptr original, - std::function onIteration); }; diff --git a/src/index/CompressedRelation.cpp b/src/index/CompressedRelation.cpp index 482e8d9ffb..493eef57c4 100644 --- a/src/index/CompressedRelation.cpp +++ b/src/index/CompressedRelation.cpp @@ -395,11 +395,12 @@ DecompressedBlock CompressedRelationReader::readPossiblyIncompleteBlock( auto cacheKey = blockMetadata.offsetsAndCompressedSize_.at(0).offsetInFile_; auto sharedResultFromCache = blockCache_ - .computeOnce(cacheKey, - [&]() { - return readAndDecompressBlock(blockMetadata, - allColumns); - }) + .computeOnce( + cacheKey, + [&]() { + return readAndDecompressBlock(blockMetadata, allColumns); + }, + false, [](const auto&) { return true; }) ._resultPointer; const DecompressedBlock& block = *sharedResultFromCache; diff --git a/src/util/Cache.h b/src/util/Cache.h index 2de3a307a2..23750eea16 100644 --- a/src/util/Cache.h +++ b/src/util/Cache.h @@ -6,8 +6,6 @@ #pragma once -#include - #include #include #include @@ -170,8 +168,7 @@ class FlexibleCache { return {}; } Score s = _scoreCalculator(*valPtr); - _totalSizeNonPinned += sizeOfNewEntry; - _sizeMap.emplace(key, sizeOfNewEntry); + _totalSizeNonPinned += _valueSizeGetter(*valPtr); auto handle = _entries.insert(std::move(s), Entry(key, std::move(valPtr))); _accessMap[key] = handle; // The first value is the value part of the key-value pair in the priority @@ -201,8 +198,7 @@ class FlexibleCache { // Make room for the new entry. makeRoomIfFits(sizeOfNewEntry); _pinnedMap[key] = valPtr; - _totalSizePinned += sizeOfNewEntry; - _sizeMap.emplace(key, sizeOfNewEntry); + _totalSizePinned += _valueSizeGetter(*valPtr); return valPtr; } @@ -230,33 +226,6 @@ class FlexibleCache { return _maxSizeSingleEntry; } - void recomputeSize(const Key& key) { - // Pinned entries must not be dynamic in nature - AD_CONTRACT_CHECK(!containsPinned(key)); - if (!containsNonPinned(key)) { - return; - } - auto newSize = _valueSizeGetter(*(*this)[key]); - auto& sizeInMap = _sizeMap.at(key); - // Entry has grown too big to completely keep within the cache or we can't - // fit it in the cache - if (_maxSizeSingleEntry < newSize || - _maxSize - std::min(_totalSizePinned, _maxSize) < newSize) { - erase(key); - return; - } - - // `MemorySize` type does not allow for negative values, but they are safe - // here, so we do it in size_t instead and convert back. - _totalSizeNonPinned = - MemorySize::bytes(_totalSizeNonPinned.getBytes() - - sizeInMap.getBytes() + newSize.getBytes()); - sizeInMap = newSize; - if (_totalSizePinned <= _maxSize) { - makeRoomIfFits(0_B); - } - } - //! Checks if there is an entry with the given key. bool contains(const Key& key) const { return containsPinned(key) || containsNonPinned(key); @@ -281,7 +250,7 @@ class FlexibleCache { const ValuePtr valuePtr = handle.value().value(); // adapt the sizes of the pinned and non-pinned part of the cache - auto sz = _sizeMap.at(key); + auto sz = _valueSizeGetter(*valuePtr); _totalSizeNonPinned -= sz; _totalSizePinned += sz; // Move the entry to the _pinnedMap and remove it from the non-pinned data @@ -297,8 +266,7 @@ class FlexibleCache { void erase(const Key& key) { const auto pinnedIt = _pinnedMap.find(key); if (pinnedIt != _pinnedMap.end()) { - _totalSizePinned -= _sizeMap.at(key); - _sizeMap.erase(key); + _totalSizePinned -= _valueSizeGetter(*pinnedIt->second); _pinnedMap.erase(pinnedIt); return; } @@ -309,8 +277,7 @@ class FlexibleCache { return; } // the entry exists in the non-pinned part of the cache, erase it. - _totalSizeNonPinned -= _sizeMap.at(key); - _sizeMap.erase(key); + _totalSizeNonPinned -= _valueSizeGetter(*mapIt->second); _entries.erase(std::move(mapIt->second)); _accessMap.erase(mapIt); } @@ -417,8 +384,8 @@ class FlexibleCache { void removeOneEntry() { AD_CONTRACT_CHECK(!_entries.empty()); auto handle = _entries.pop(); - _totalSizeNonPinned -= _sizeMap.at(handle.value().key()); - _sizeMap.erase(handle.value().key()); + _totalSizeNonPinned = + _totalSizeNonPinned - _valueSizeGetter(*handle.value().value()); _accessMap.erase(handle.value().key()); } size_t _maxNumEntries; @@ -434,17 +401,6 @@ class FlexibleCache { ValueSizeGetter _valueSizeGetter; PinnedMap _pinnedMap; AccessMap _accessMap; - SizeMap _sizeMap; - - FRIEND_TEST(LRUCacheTest, - verifyCacheSizeIsCorrectlyTrackedWhenChangedWhenErased); - - FRIEND_TEST(LRUCacheTest, - verifyCacheSizeIsCorrectlyTrackedWhenChangedWhenErasedPinned); - FRIEND_TEST(LRUCacheTest, verifyCacheSizeIsCorrectlyRecomputed); - FRIEND_TEST(LRUCacheTest, - verifyNonPinnedEntriesAreRemovedToMakeRoomForResize); - FRIEND_TEST(LRUCacheTest, verifyRecomputeIsNoOpForNonExistentElement); }; // Partial instantiation of FlexibleCache using the heap-based priority queue diff --git a/src/util/CacheableGenerator.h b/src/util/CacheableGenerator.h index c84aabf0cc..32f456a727 100644 --- a/src/util/CacheableGenerator.h +++ b/src/util/CacheableGenerator.h @@ -4,255 +4,32 @@ #pragma once -#include -#include #include -#include -#include -#include "util/Exception.h" #include "util/Generator.h" -#include "util/Synchronized.h" -#include "util/Timer.h" -#include "util/UniqueCleanup.h" +#include "util/TypeTraits.h" namespace ad_utility { -/// Custom exception type that indicates the consumer took too long to consume -/// the generator. -class IteratorExpired : public std::exception {}; - -/// Lambda-like type that always returns 1 to indicate size 1 for every element -/// in the `CacheableGenerator`. template -struct DefaultSizeCounter { - uint64_t operator()(const std::remove_reference_t&) const { return 1; } -}; - -/// Range-like type that allows multiple consumers to consume the same -/// single-consumption generator asynchronously. -template &> - SizeCounter = DefaultSizeCounter> -class CacheableGenerator { - using GenIterator = typename cppcoro::generator::iterator; - - enum class MasterIteratorState { NOT_STARTED, MASTER_STARTED, MASTER_DONE }; - - class ComputationStorage { - friend CacheableGenerator; - mutable std::shared_mutex mutex_; - std::condition_variable_any conditionVariable_; - cppcoro::generator generator_; - std::optional generatorIterator_{}; - std::vector> cachedValues_{}; - MasterIteratorState masterState_ = MasterIteratorState::NOT_STARTED; - SizeCounter sizeCounter_{}; - std::atomic currentSize_ = 0; - uint64_t maxSize_ = std::numeric_limits::max(); - std::function)> - onSizeChanged_{}; - - public: - explicit ComputationStorage(cppcoro::generator generator) - : generator_{std::move(generator)} {} - ComputationStorage(ComputationStorage&& other) = delete; - ComputationStorage(const ComputationStorage& other) = delete; - ComputationStorage& operator=(ComputationStorage&& other) = delete; - ComputationStorage& operator=(const ComputationStorage& other) = delete; - - private: - void advanceTo(size_t index, bool isMaster) { - std::unique_lock lock{mutex_}; - AD_CONTRACT_CHECK(index <= cachedValues_.size()); - // Make sure master iterator does exist and we're not blocking - // indefinitely - if (isMaster) { - AD_CORRECTNESS_CHECK(masterState_ != MasterIteratorState::MASTER_DONE); - masterState_ = MasterIteratorState::MASTER_STARTED; - } else { - AD_CORRECTNESS_CHECK(masterState_ != MasterIteratorState::NOT_STARTED); - } - if (index < cachedValues_.size()) { - if (!cachedValues_.at(index)) { - throw IteratorExpired{}; - } - return; - } - if (generatorIterator_.has_value() && - generatorIterator_.value() == generator_.end()) { - return; - } - if (masterState_ == MasterIteratorState::MASTER_STARTED && !isMaster) { - conditionVariable_.wait(lock, [this, index]() { - return (generatorIterator_.has_value() && - generatorIterator_.value() == generator_.end()) || - index < cachedValues_.size(); - }); - return; - } - Timer timer{Timer::Started}; - if (generatorIterator_.has_value()) { - AD_CONTRACT_CHECK(generatorIterator_.value() != generator_.end()); - ++generatorIterator_.value(); - } else { - generatorIterator_ = generator_.begin(); - } - timer.stop(); - if (generatorIterator_.value() != generator_.end()) { - auto pointer = - std::make_shared(std::move(*generatorIterator_.value())); - currentSize_.fetch_add(sizeCounter_(*pointer)); - cachedValues_.push_back(std::move(pointer)); - if (onSizeChanged_) { - onSizeChanged_(std::chrono::milliseconds{timer.msecs()}); - } - tryShrinkCacheIfNeccessary(); - } - if (isMaster) { - lock.unlock(); - conditionVariable_.notify_all(); - } - } - - std::shared_ptr getCachedValue(size_t index) const { - std::shared_lock lock{mutex_}; - if (!cachedValues_.at(index)) { - throw IteratorExpired{}; - } - return cachedValues_.at(index); - } - - // Needs to be public in order to compile with gcc 11 & 12 - public: - bool isDone(size_t index) noexcept { - std::shared_lock lock{mutex_}; - return index >= cachedValues_.size() && generatorIterator_.has_value() && - generatorIterator_.value() == generator_.end(); - } - - private: - void clearMaster() { - std::unique_lock lock{mutex_}; - AD_CORRECTNESS_CHECK(masterState_ != MasterIteratorState::MASTER_DONE); - masterState_ = MasterIteratorState::MASTER_DONE; - lock.unlock(); - conditionVariable_.notify_all(); - } - - void setOnSizeChanged( - std::function)> - onSizeChanged) noexcept { - std::unique_lock lock{mutex_}; - onSizeChanged_ = std::move(onSizeChanged); - } - - void tryShrinkCacheIfNeccessary() { - if (currentSize_ <= maxSize_) { - return; - } - size_t maxBound = cachedValues_.size() - 1; - for (size_t i = 0; i < maxBound; i++) { - auto& pointer = cachedValues_.at(i); - if (pointer) { - currentSize_.fetch_add(sizeCounter_(*pointer)); - pointer.reset(); - if (onSizeChanged_) { - onSizeChanged_(std::nullopt); - } - if (currentSize_ <= maxSize_ || i >= maxBound - 1) { - break; - } - } - } - } - - void setMaxSize(uint64_t maxSize) { - std::unique_lock lock{mutex_}; - maxSize_ = maxSize; - } - }; - - std::shared_ptr computationStorage_; - - public: - explicit CacheableGenerator(cppcoro::generator generator) - : computationStorage_{ - std::make_shared(std::move(generator))} {} - - CacheableGenerator(CacheableGenerator&& other) noexcept = default; - CacheableGenerator(const CacheableGenerator& other) noexcept = delete; - CacheableGenerator& operator=(CacheableGenerator&& other) noexcept = default; - CacheableGenerator& operator=(const CacheableGenerator& other) noexcept = - delete; - - class IteratorSentinel {}; - - class Iterator { - size_t currentIndex_ = 0; - unique_cleanup::UniqueCleanup> storage_; - bool isMaster_; - - auto storage() const { - auto pointer = storage_->lock(); - AD_CORRECTNESS_CHECK(pointer); - return pointer; - } - - public: - explicit Iterator(std::weak_ptr storage, bool isMaster) - : storage_{std::move(storage), - [isMaster](auto&& storage) { - if (isMaster) { - auto pointer = storage.lock(); - AD_CORRECTNESS_CHECK(pointer); - pointer->clearMaster(); - } - }}, - isMaster_{isMaster} { - this->storage()->advanceTo(currentIndex_, isMaster); - } - - friend bool operator==(const Iterator& it, IteratorSentinel) noexcept { - return it.storage()->isDone(it.currentIndex_); - } - - friend bool operator==(IteratorSentinel s, const Iterator& it) noexcept { - return (it == s); - } - - Iterator& operator++() { - ++currentIndex_; - storage()->advanceTo(currentIndex_, isMaster_); - return *this; - } - - // Need to provide post-increment operator to implement the 'Range' concept. - void operator++(int) { (void)operator++(); } - - std::shared_ptr operator*() const { - return storage()->getCachedValue(currentIndex_); - } - }; - - Iterator begin(bool isMaster = false) const { - return Iterator{computationStorage_, isMaster}; +cppcoro::generator wrapGeneratorWithCache( + cppcoro::generator generator, + InvocableWithExactReturnType&, const T&> auto + aggregator, + InvocableWithExactReturnType> auto onFullyCached) { + std::optional aggregatedData{}; + bool aggregate = true; + for (auto&& element : generator) { + if (aggregate) { + aggregate = aggregator(aggregatedData, element); + if (!aggregate) { + aggregatedData.reset(); + } + } + co_yield AD_FWD(element); } - - IteratorSentinel end() const noexcept { return IteratorSentinel{}; } - - void setOnSizeChanged( - std::function)> - onSizeChanged) noexcept { - computationStorage_->setOnSizeChanged(std::move(onSizeChanged)); - } - - uint64_t getCurrentSize() const { - return computationStorage_->currentSize_.load(); - } - - void setMaxSize(uint64_t maxSize) { - computationStorage_->setMaxSize(maxSize); + if (aggregate) { + onFullyCached(std::move(aggregatedData)); } -}; +} }; // namespace ad_utility diff --git a/src/util/ConcurrentCache.h b/src/util/ConcurrentCache.h index 88837063a3..bc61265c2d 100644 --- a/src/util/ConcurrentCache.h +++ b/src/util/ConcurrentCache.h @@ -43,12 +43,6 @@ enum struct CacheStatus { notInCacheAndNotComputed }; -enum class CachePolicy { - neverCompute, - computeOnDemand, - alwaysCompute, -}; - // Convert a `CacheStatus` to a human-readable string. We mostly use it for // JSON exports, so we use a hyphenated format. constexpr std::string_view toString(CacheStatus status) { @@ -131,7 +125,7 @@ class ResultInProgress { std::unique_lock uniqueLock(_mutex); _conditionVariable.wait(uniqueLock, [this] { return _status != Status::IN_PROGRESS; }); - if (_status == ResultInProgress::Status::ABORTED) { + if (_status == Status::ABORTED) { throw WaitedForResultWhichThenFailedException{}; } return _result; @@ -185,13 +179,19 @@ class ConcurrentCache { * it is contained in the cache. Otherwise `nullptr` with a cache status of * `notInCacheNotComputed` will be returned. * @return A shared_ptr to the computation result. + * @param suitedForCache Predicate function that will be applied to newly + * computed value to check if it is suited for caching. + * @return A `ResultAndCacheStatus` shared_ptr to the computation result. * */ ResultAndCacheStatus computeOnce( const Key& key, const InvocableWithConvertibleReturnType auto& computeFunction, - CachePolicy cachePolicy = CachePolicy::computeOnDemand) { - return computeOnceImpl(false, key, computeFunction, cachePolicy); + bool onlyReadFromCache, + const InvocableWithConvertibleReturnType auto& + suitedForCache) { + return computeOnceImpl(false, key, computeFunction, onlyReadFromCache, + suitedForCache); } /// Similar to computeOnce, with the following addition: After the call @@ -199,12 +199,23 @@ class ConcurrentCache { ResultAndCacheStatus computeOncePinned( const Key& key, const InvocableWithConvertibleReturnType auto& computeFunction, - CachePolicy cachePolicy = CachePolicy::computeOnDemand) { - return computeOnceImpl(true, key, computeFunction, cachePolicy); + bool onlyReadFromCache, + const InvocableWithConvertibleReturnType auto& + suitedForCache) { + return computeOnceImpl(true, key, computeFunction, onlyReadFromCache, + suitedForCache); } - void recomputeSize(const Key& key) { - _cacheAndInProgressMap.wlock()->_cache.recomputeSize(key); + void tryInsertIfNotPresent(bool pinned, const Key& key, Value value) { + auto lockPtr = _cacheAndInProgressMap.wlock(); + if (pinned) { + if (!lockPtr->_cache.containsAndMakePinnedIfExists(key)) { + lockPtr->_cache.insertPinned(key, + std::make_shared(std::move(value))); + } + } else if (!lockPtr->_cache.contains(key)) { + lockPtr->_cache.insert(key, std::make_shared(std::move(value))); + } } /// Clear the cache (but not the pinned entries) @@ -324,7 +335,9 @@ class ConcurrentCache { ResultAndCacheStatus computeOnceImpl( bool pinned, const Key& key, const InvocableWithConvertibleReturnType auto& computeFunction, - CachePolicy cachePolicy) { + bool onlyReadFromCache, + const InvocableWithConvertibleReturnType auto& + suitedForCache) { using std::make_shared; bool mustCompute; shared_ptr resultInProgress; @@ -341,10 +354,9 @@ class ConcurrentCache { if (contained) { // the result is in the cache, simply return it. return {cache[key], cacheStatus}; - } else if (cachePolicy == CachePolicy::neverCompute) { + } else if (onlyReadFromCache) { return {nullptr, CacheStatus::notInCacheAndNotComputed}; - } else if (lockPtr->_inProgress.contains(key) && - cachePolicy == CachePolicy::computeOnDemand) { + } else if (lockPtr->_inProgress.contains(key)) { // the result is not cached, but someone else is computing it. // it is important, that we do not immediately call getResult() since // this call blocks and we currently hold a lock. @@ -368,7 +380,11 @@ class ConcurrentCache { try { // The actual computation shared_ptr result = make_shared(computeFunction()); - moveFromInProgressToCache(key, result); + if (suitedForCache(*result)) { + moveFromInProgressToCache(key, result); + } else { + _cacheAndInProgressMap.wlock()->_inProgress.erase(key); + } // Signal other threads who are waiting for the results. resultInProgress->finish(result); // result was not cached @@ -384,7 +400,17 @@ class ConcurrentCache { // someone else is computing the result, wait till it is finished and // return the result, we do not count this case as "cached" as we had to // wait. - return {resultInProgress->getResult(), CacheStatus::computed}; + auto resultPointer = resultInProgress->getResult(); + if (resultPointer) { + return {std::move(resultPointer), CacheStatus::computed}; + } + // TODO there's a small chance this will hang indefinitely if + // other processes keep submitting non-cacheable entries before this + // thread can acquire the lock to compute the entry on its own. + + // Retry if computed entry unsuited for caching + return computeOnceImpl(pinned, key, computeFunction, false, + suitedForCache); } } diff --git a/src/util/IteratorWrapper.h b/src/util/IteratorWrapper.h deleted file mode 100644 index 2d177fb7b3..0000000000 --- a/src/util/IteratorWrapper.h +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024, University of Freiburg, -// Chair of Algorithms and Data Structures. -// Author: Robin Textor-Falconi - -#pragma once - -#include - -#include "Exception.h" - -namespace ad_utility { - -/// Helper class allowing to use range-like datastructures with arguments for -/// their begin() member function within range-based for loops like this: -/// -/// This calls something.begin(1, 2, 3): -/// for (auto elem : IteratorWrapper{something, 1, 2, 3}) {} -template -class IteratorWrapper { - bool used_ = false; - OriginalIterable& iterable_; - std::tuple args_; - - public: - explicit IteratorWrapper(OriginalIterable& iterator, Args... args) - : iterable_{iterator}, args_{std::move(args)...} {} - - auto begin() { - AD_CONTRACT_CHECK(!used_); - used_ = true; - return std::apply( - [this](auto... args) { return iterable_.begin(std::move(args)...); }, - std::move(args_)); - } - - auto end() { return iterable_.end(); } -}; - -}; // namespace ad_utility diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 9e9c48dc96..cc859dbcf5 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -395,8 +395,6 @@ addLinkAndDiscoverTest(FsstCompressorTest fsst) addLinkAndDiscoverTest(CopyableSynchronizationTest) -addLinkAndDiscoverTest(IteratorWrapperTest) - addLinkAndDiscoverTest(CacheableGeneratorTest) addLinkAndDiscoverTest(FilterTest) diff --git a/test/CacheTest.cpp b/test/CacheTest.cpp index fbdab252e5..de6491481c 100644 --- a/test/CacheTest.cpp +++ b/test/CacheTest.cpp @@ -13,11 +13,6 @@ using std::string; using namespace ad_utility::memory_literals; -using Vec = std::vector; -[[maybe_unused]] auto vectorSizeGetter = [](const auto& pointer) { - return pointer->size() * sizeof(int) * 1_B; -}; - // first some simple Tests for the general cache interface TEST(FlexibleCacheTest, Simple) { auto accessUpdater = [](const auto& s, [[maybe_unused]] const auto& v) { @@ -143,183 +138,4 @@ TEST(LRUCacheTest, testDecreasingCapacity) { ASSERT_FALSE(cache["3"]); ASSERT_FALSE(cache["4"]); } - -// _____________________________________________________________________________ -TEST(LRUCacheTest, verifyCacheSizeIsCorrectlyTrackedWhenChangedWhenErased) { - LRUCache>, decltype(vectorSizeGetter)> - cache{1}; - - auto vecA = std::make_shared(); - - cache.insert(0, vecA); - - ASSERT_EQ(cache._totalSizeNonPinned, 0_B); - vecA->push_back(0); - - // Cache does was not notified about the size change - ASSERT_EQ(cache._totalSizeNonPinned, 0_B); - - cache.erase(0); - - // Cache should not underflow - ASSERT_EQ(cache._totalSizeNonPinned, 0_B); - - cache.insert(0, vecA); - - ASSERT_EQ(cache._totalSizeNonPinned, 4_B); - vecA->clear(); - - // Cache does was not notified about the size change - ASSERT_EQ(cache._totalSizeNonPinned, 4_B); - - cache.erase(0); - - // Cache correctly remove size, even though the vector is empty by now. - ASSERT_EQ(cache._totalSizeNonPinned, 0_B); -} - -// _____________________________________________________________________________ -TEST(LRUCacheTest, - verifyCacheSizeIsCorrectlyTrackedWhenChangedWhenErasedPinned) { - LRUCache>, decltype(vectorSizeGetter)> - cache{1}; - - auto vecA = std::make_shared(); - - cache.insertPinned(0, vecA); - - ASSERT_EQ(cache._totalSizePinned, 0_B); - vecA->push_back(0); - - // Cache does was not notified about the size change - ASSERT_EQ(cache._totalSizePinned, 0_B); - - cache.erase(0); - - // Cache should not underflow - ASSERT_EQ(cache._totalSizePinned, 0_B); - - cache.insertPinned(0, vecA); - - ASSERT_EQ(cache._totalSizePinned, 4_B); - vecA->clear(); - - // Cache does was not notified about the size change - ASSERT_EQ(cache._totalSizePinned, 4_B); - - cache.erase(0); - - // Cache correctly remove size, even though the vector is empty by now. - ASSERT_EQ(cache._totalSizePinned, 0_B); -} - -// _____________________________________________________________________________ -TEST(LRUCacheTest, verifyCacheSizeIsCorrectlyRecomputed) { - LRUCache>, decltype(vectorSizeGetter)> - cache{3, 12_B, 8_B}; - - auto vecA = std::make_shared(0); - auto vecB = std::make_shared(1); - - cache.insert(0, vecA); - cache.insert(1, vecB); - - ASSERT_EQ(cache._totalSizeNonPinned, 4_B); - - vecA->resize(1); - vecB->resize(2); - - // Cache does was not notified about the size change - ASSERT_EQ(cache._totalSizeNonPinned, 4_B); - - cache.recomputeSize(0); - - ASSERT_EQ(cache._totalSizeNonPinned, 8_B); - ASSERT_TRUE(cache.contains(0)); - ASSERT_TRUE(cache.contains(1)); - - vecA->resize(2); - - cache.recomputeSize(0); - - ASSERT_EQ(cache._totalSizeNonPinned, 12_B); - ASSERT_TRUE(cache.contains(0)); - ASSERT_TRUE(cache.contains(1)); - - cache.recomputeSize(1); - - ASSERT_EQ(cache._totalSizeNonPinned, 8_B); - ASSERT_FALSE(cache.contains(0)); - ASSERT_TRUE(cache.contains(1)); - - vecB->resize(3); - cache.recomputeSize(1); - - ASSERT_EQ(cache._totalSizeNonPinned, 0_B); - ASSERT_FALSE(cache.contains(0)); - ASSERT_FALSE(cache.contains(1)); -} - -// _____________________________________________________________________________ -TEST(LRUCacheTest, verifyNonPinnedEntriesAreRemovedToMakeRoomForResize) { - LRUCache>, decltype(vectorSizeGetter)> - cache{3, 8_B, 4_B}; - - auto vecA = std::make_shared(1); - auto vecB = std::make_shared(1); - auto vecC = std::make_shared(0); - - cache.insertPinned(0, vecA); - cache.insert(1, vecB); - cache.insert(2, vecC); - - vecC->resize(1); - - cache.recomputeSize(2); - ASSERT_TRUE(cache.contains(0)); - ASSERT_FALSE(cache.contains(1)); - ASSERT_TRUE(cache.contains(2)); -} - -// _____________________________________________________________________________ -TEST(LRUCacheTest, verifyRecomputeIsNoOpForNonExistentElement) { - LRUCache> cache{1}; - cache.insert("1", "a"); - - cache.recomputeSize("2"); - - EXPECT_TRUE(cache.contains("1")); - EXPECT_FALSE(cache.contains("2")); -} - -TEST(LRUCacheTest, verifyRecomputeDoesNoticeExceedingSizeOnShrink) { - LRUCache>, decltype(vectorSizeGetter)> - cache{3, 32_B, 16_B}; - - auto vecA = std::make_shared(2); - auto vecB = std::make_shared(1); - auto vecC = std::make_shared(4); - - cache.insert(0, vecA); - cache.insert(1, vecB); - cache.insert(2, vecC); - - cache.setMaxSizeSingleEntry(8_B); - vecC->resize(3); - cache.recomputeSize(2); - - EXPECT_TRUE(cache.contains(0)); - EXPECT_TRUE(cache.contains(1)); - EXPECT_FALSE(cache.contains(2)); -} - -// _____________________________________________________________________________ -TEST(LRUCacheTest, verifyRecomputeDoesErrorOutWhenPinned) { - LRUCache cache{3, 12_B, - 8_B}; - - cache.insertPinned(0, 0); - - EXPECT_THROW(cache.recomputeSize(0), ad_utility::Exception); -} } // namespace ad_utility diff --git a/test/CacheableGeneratorTest.cpp b/test/CacheableGeneratorTest.cpp index 4ad86d1b2a..dc07ecfe60 100644 --- a/test/CacheableGeneratorTest.cpp +++ b/test/CacheableGeneratorTest.cpp @@ -4,13 +4,10 @@ #include -#include - #include "util/CacheableGenerator.h" #include "util/Generator.h" -#include "util/jthread.h" -using ad_utility::CacheableGenerator; +using ad_utility::wrapGeneratorWithCache; using cppcoro::generator; using namespace std::chrono_literals; @@ -20,394 +17,17 @@ generator testGenerator(uint32_t range) { } } -// _____________________________________________________________________________ -TEST(CacheableGenerator, allowsMultiConsumption) { - CacheableGenerator generator{testGenerator(3)}; - - auto iterator1 = generator.begin(true); - - ASSERT_NE(iterator1, generator.end()); - EXPECT_EQ(**iterator1, 0); - ++iterator1; - - ASSERT_NE(iterator1, generator.end()); - EXPECT_EQ(**iterator1, 1); - ++iterator1; - - ASSERT_NE(iterator1, generator.end()); - EXPECT_EQ(**iterator1, 2); - ++iterator1; - - EXPECT_EQ(iterator1, generator.end()); - - auto iterator2 = generator.begin(false); - - ASSERT_NE(iterator2, generator.end()); - EXPECT_EQ(**iterator2, 0); - ++iterator2; - - ASSERT_NE(iterator2, generator.end()); - EXPECT_EQ(**iterator2, 1); - ++iterator2; - - ASSERT_NE(iterator2, generator.end()); - EXPECT_EQ(**iterator2, 2); - ++iterator2; - EXPECT_EQ(iterator2, generator.end()); -} - -// _____________________________________________________________________________ -TEST(CacheableGenerator, masterBlocksSlaves) { - CacheableGenerator generator{testGenerator(3)}; - - // Verify slave is not blocked indefinitely if master has not been started yet - EXPECT_THROW(generator.begin(false), ad_utility::Exception); - - auto masterIterator = generator.begin(true); - std::mutex counterMutex; - std::condition_variable cv; - std::atomic_int counter = 0; - uint32_t proceedStage = 0; - - ad_utility::JThread thread1{[&]() { - auto iterator = generator.begin(false); - - ASSERT_NE(iterator, generator.end()); - { - std::lock_guard guard{counterMutex}; - EXPECT_EQ(counter, 0); - proceedStage = 1; - } - cv.notify_all(); - - EXPECT_EQ(**iterator, 0); - ++iterator; - - ASSERT_NE(iterator, generator.end()); - { - std::lock_guard guard{counterMutex}; - EXPECT_EQ(counter, 1); - proceedStage = 2; - } - cv.notify_all(); - - EXPECT_EQ(**iterator, 1); - ++iterator; - - ASSERT_NE(iterator, generator.end()); - { - std::lock_guard guard{counterMutex}; - EXPECT_EQ(counter, 2); - proceedStage = 3; - } - cv.notify_all(); - - EXPECT_EQ(**iterator, 2); - ++iterator; - - EXPECT_EQ(iterator, generator.end()); - { - std::lock_guard guard{counterMutex}; - EXPECT_EQ(counter, 3); - } - }}; - - ad_utility::JThread thread2{[&]() { - auto iterator = generator.begin(false); - - ASSERT_NE(iterator, generator.end()); - EXPECT_GE(counter, 0); - - EXPECT_EQ(**iterator, 0); - ++iterator; - - ASSERT_NE(iterator, generator.end()); - EXPECT_GE(counter, 1); - - EXPECT_EQ(**iterator, 1); - ++iterator; - - ASSERT_NE(iterator, generator.end()); - EXPECT_GE(counter, 2); - - EXPECT_EQ(**iterator, 2); - ++iterator; - - EXPECT_EQ(iterator, generator.end()); - EXPECT_GE(counter, 3); - }}; - - EXPECT_EQ(**masterIterator, 0); - - { - std::unique_lock guard{counterMutex}; - cv.wait(guard, [&]() { return proceedStage == 1; }); - ++counter; - ++masterIterator; - } - ASSERT_NE(masterIterator, generator.end()); - - EXPECT_EQ(**masterIterator, 1); - { - std::unique_lock guard{counterMutex}; - cv.wait(guard, [&]() { return proceedStage == 2; }); - ++counter; - ++masterIterator; - } - ASSERT_NE(masterIterator, generator.end()); - - EXPECT_EQ(**masterIterator, 2); - { - std::unique_lock guard{counterMutex}; - cv.wait(guard, [&]() { return proceedStage == 3; }); - ++counter; - ++masterIterator; - } - EXPECT_EQ(masterIterator, generator.end()); -} - -// _____________________________________________________________________________ -TEST(CacheableGenerator, verifyExhaustedMasterCausesFreeForAll) { - CacheableGenerator generator{testGenerator(3)}; - - (void)generator.begin(true); - - auto iterator1 = generator.begin(false); - auto iterator2 = generator.begin(false); - - ASSERT_NE(iterator1, generator.end()); - ASSERT_NE(iterator2, generator.end()); - - EXPECT_EQ(**iterator1, 0); - EXPECT_EQ(**iterator2, 0); - - ++iterator1; - ASSERT_NE(iterator1, generator.end()); - EXPECT_EQ(**iterator1, 1); - - ++iterator2; - ASSERT_NE(iterator2, generator.end()); - EXPECT_EQ(**iterator2, 1); - - ++iterator2; - ASSERT_NE(iterator2, generator.end()); - EXPECT_EQ(**iterator2, 2); - - ++iterator1; - ASSERT_NE(iterator1, generator.end()); - EXPECT_EQ(**iterator1, 2); - - ++iterator1; - EXPECT_EQ(iterator1, generator.end()); - - ++iterator2; - EXPECT_EQ(iterator2, generator.end()); -} - -// _____________________________________________________________________________ -TEST(CacheableGenerator, verifyOnSizeChangedIsCalledWithCorrectTimingInfo) { - auto timedGenerator = []() -> generator { - while (true) { -#ifndef _QLEVER_NO_TIMING_TESTS - std::this_thread::sleep_for(2ms); -#endif - co_yield 0; - } - }(); - - uint32_t callCounter = 0; - - CacheableGenerator generator{std::move(timedGenerator)}; - - generator.setOnSizeChanged([&](auto duration) { -#ifndef _QLEVER_NO_TIMING_TESTS - using ::testing::AllOf; - using ::testing::Le; - using ::testing::Ge; - EXPECT_THAT(duration, AllOf(Le(3ms), Ge(1ms))); -#endif - ++callCounter; - }); - - { - auto masterIterator = generator.begin(true); - EXPECT_EQ(callCounter, 1); - ASSERT_NE(masterIterator, generator.end()); - - ++masterIterator; - - EXPECT_EQ(callCounter, 2); - ASSERT_NE(masterIterator, generator.end()); - } - - { - auto slaveIterator1 = generator.begin(); - EXPECT_EQ(callCounter, 2); - ASSERT_NE(slaveIterator1, generator.end()); - - auto slaveIterator2 = generator.begin(); - EXPECT_EQ(callCounter, 2); - ASSERT_NE(slaveIterator2, generator.end()); - - ++slaveIterator2; - - EXPECT_EQ(callCounter, 2); - ASSERT_NE(slaveIterator2, generator.end()); - - ++slaveIterator2; - - EXPECT_EQ(callCounter, 3); - ASSERT_NE(slaveIterator2, generator.end()); - - ++slaveIterator1; - - EXPECT_EQ(callCounter, 3); - ASSERT_NE(slaveIterator1, generator.end()); - - ++slaveIterator1; - - EXPECT_EQ(callCounter, 3); - ASSERT_NE(slaveIterator1, generator.end()); - - ++slaveIterator1; - - EXPECT_EQ(callCounter, 4); - ASSERT_NE(slaveIterator1, generator.end()); - } - - auto slaveIterator3 = generator.begin(); - EXPECT_EQ(callCounter, 4); - ASSERT_NE(slaveIterator3, generator.end()); - - ++slaveIterator3; - - EXPECT_EQ(callCounter, 4); - ASSERT_NE(slaveIterator3, generator.end()); - - ++slaveIterator3; - - EXPECT_EQ(callCounter, 4); - ASSERT_NE(slaveIterator3, generator.end()); - - ++slaveIterator3; - - EXPECT_EQ(callCounter, 4); - ASSERT_NE(slaveIterator3, generator.end()); - - ++slaveIterator3; - - EXPECT_EQ(callCounter, 5); - ASSERT_NE(slaveIterator3, generator.end()); -} - -// _____________________________________________________________________________ -TEST(CacheableGenerator, verifyOnSizeChangedIsCalledAndRespectsShrink) { - CacheableGenerator generator{testGenerator(3)}; - uint32_t callCounter = 0; - generator.setOnSizeChanged([&](auto) { ++callCounter; }); - - auto iterator = generator.begin(true); - EXPECT_EQ(callCounter, 1); - ASSERT_NE(iterator, generator.end()); - - auto slaveIterator1 = generator.begin(); - EXPECT_EQ(callCounter, 1); - ASSERT_NE(slaveIterator1, generator.end()); - EXPECT_EQ(**slaveIterator1, 0); - - ++iterator; - EXPECT_EQ(callCounter, 2); - ASSERT_NE(iterator, generator.end()); - - generator.setMaxSize(1); - - ++slaveIterator1; - EXPECT_EQ(callCounter, 2); - ASSERT_NE(slaveIterator1, generator.end()); - EXPECT_EQ(**slaveIterator1, 1); - - auto slaveIterator2 = generator.begin(); - EXPECT_EQ(callCounter, 2); - ASSERT_NE(slaveIterator2, generator.end()); - EXPECT_EQ(**slaveIterator2, 0); - - ++iterator; - EXPECT_EQ(callCounter, 5); - ASSERT_NE(iterator, generator.end()); - EXPECT_EQ(**iterator, 2); - - ++iterator; - EXPECT_EQ(callCounter, 5); - EXPECT_EQ(iterator, generator.end()); - - ++slaveIterator1; - ASSERT_NE(slaveIterator1, generator.end()); - EXPECT_EQ(**slaveIterator1, 2); - - EXPECT_THROW(++slaveIterator2, ad_utility::IteratorExpired); -} - -// _____________________________________________________________________________ -TEST(CacheableGenerator, verifyShrinkKeepsSingleElement) { - CacheableGenerator generator{testGenerator(3)}; - uint32_t callCounter = 0; - generator.setOnSizeChanged([&](auto) { ++callCounter; }); - - auto iterator = generator.begin(true); - EXPECT_EQ(callCounter, 1); - ASSERT_NE(iterator, generator.end()); - - auto slaveIterator = generator.begin(); - EXPECT_EQ(callCounter, 1); - ASSERT_NE(slaveIterator, generator.end()); - - ++iterator; - EXPECT_EQ(callCounter, 2); - ASSERT_NE(iterator, generator.end()); - - generator.setMaxSize(0); - - ++slaveIterator; - EXPECT_EQ(callCounter, 2); - ASSERT_NE(slaveIterator, generator.end()); - - ++iterator; - EXPECT_EQ(callCounter, 5); - ASSERT_NE(iterator, generator.end()); - EXPECT_EQ(**iterator, 2); - - ++iterator; - EXPECT_EQ(callCounter, 5); - EXPECT_EQ(iterator, generator.end()); - - ++slaveIterator; - ASSERT_NE(slaveIterator, generator.end()); - EXPECT_EQ(**slaveIterator, 2); -} - -// _____________________________________________________________________________ -TEST(CacheableGenerator, verifySlavesCantBlockMasterIterator) { - CacheableGenerator generator{testGenerator(3)}; - generator.setMaxSize(1); - - auto masterIterator = generator.begin(true); - ASSERT_NE(masterIterator, generator.end()); - EXPECT_EQ(**masterIterator, 0); - - auto slaveIterator = generator.begin(false); - ASSERT_NE(slaveIterator, generator.end()); - EXPECT_EQ(**slaveIterator, 0); - - ++masterIterator; - ASSERT_NE(masterIterator, generator.end()); - EXPECT_EQ(**masterIterator, 1); - - ++masterIterator; - ASSERT_NE(masterIterator, generator.end()); - EXPECT_EQ(**masterIterator, 2); - - EXPECT_THROW(**slaveIterator, ad_utility::IteratorExpired); - - ++masterIterator; - EXPECT_EQ(masterIterator, generator.end()); +TEST(CacheableGenerator, placeholder) { + auto test = wrapGeneratorWithCache( + testGenerator(10), + [](std::optional& optionalValue, const uint32_t& newValue) { + if (optionalValue.has_value()) { + optionalValue.value() += newValue; + } else { + optionalValue.emplace(newValue); + } + return true; + }, + [](const std::optional&) {}); + EXPECT_EQ(1, 1); } diff --git a/test/ConcurrentCacheTest.cpp b/test/ConcurrentCacheTest.cpp index 1ab691d610..8e4c023e1a 100644 --- a/test/ConcurrentCacheTest.cpp +++ b/test/ConcurrentCacheTest.cpp @@ -73,12 +73,16 @@ using SimpleConcurrentLruCache = ad_utility::ConcurrentCache>>; +namespace { +auto returnTrue = [](const auto&) { return true; }; +} // namespace + TEST(ConcurrentCache, sequentialComputation) { SimpleConcurrentLruCache a{3ul}; ad_utility::Timer t{ad_utility::Timer::Started}; // Fake computation that takes 5ms and returns value "3", which is then // stored under key 3. - auto result = a.computeOnce(3, waiting_function("3"s, 5)); + auto result = a.computeOnce(3, waiting_function("3"s, 5), false, returnTrue); ASSERT_EQ("3"s, *result._resultPointer); ASSERT_EQ(result._cacheStatus, ad_utility::CacheStatus::computed); ASSERT_GE(t.msecs(), 5ms); @@ -90,7 +94,7 @@ TEST(ConcurrentCache, sequentialComputation) { t.reset(); t.start(); // takes 0 msecs to compute, as the request is served from the cache. - auto result2 = a.computeOnce(3, waiting_function("3"s, 5)); + auto result2 = a.computeOnce(3, waiting_function("3"s, 5), false, returnTrue); // computing result again: still yields "3", was cached and takes 0 // milliseconds (result is read from cache) ASSERT_EQ("3"s, *result2._resultPointer); @@ -107,7 +111,8 @@ TEST(ConcurrentCache, sequentialPinnedComputation) { ad_utility::Timer t{ad_utility::Timer::Started}; // Fake computation that takes 5ms and returns value "3", which is then // stored under key 3. - auto result = a.computeOncePinned(3, waiting_function("3"s, 5)); + auto result = + a.computeOncePinned(3, waiting_function("3"s, 5), false, returnTrue); ASSERT_EQ("3"s, *result._resultPointer); ASSERT_EQ(result._cacheStatus, ad_utility::CacheStatus::computed); ASSERT_GE(t.msecs(), 5ms); @@ -120,7 +125,7 @@ TEST(ConcurrentCache, sequentialPinnedComputation) { t.start(); // takes 0 msecs to compute, as the request is served from the cache. // we don't request a pin, but the original computation was pinned - auto result2 = a.computeOnce(3, waiting_function("3"s, 5)); + auto result2 = a.computeOnce(3, waiting_function("3"s, 5), false, returnTrue); // computing result again: still yields "3", was cached and takes 0 // milliseconds (result is read from cache) ASSERT_EQ("3"s, *result2._resultPointer); @@ -137,7 +142,7 @@ TEST(ConcurrentCache, sequentialPinnedUpgradeComputation) { ad_utility::Timer t{ad_utility::Timer::Started}; // Fake computation that takes 5ms and returns value "3", which is then // stored under key 3. - auto result = a.computeOnce(3, waiting_function("3"s, 5)); + auto result = a.computeOnce(3, waiting_function("3"s, 5), false, returnTrue); ASSERT_EQ("3"s, *result._resultPointer); ASSERT_EQ(result._cacheStatus, ad_utility::CacheStatus::computed); ASSERT_GE(t.msecs(), 5ms); @@ -151,7 +156,8 @@ TEST(ConcurrentCache, sequentialPinnedUpgradeComputation) { // takes 0 msecs to compute, as the request is served from the cache. // request a pin, the result should be read from the cache and upgraded // to a pinned result. - auto result2 = a.computeOncePinned(3, waiting_function("3"s, 5)); + auto result2 = + a.computeOncePinned(3, waiting_function("3"s, 5), false, returnTrue); // computing result again: still yields "3", was cached and takes 0 // milliseconds (result is read from cache) ASSERT_EQ("3"s, *result2._resultPointer); @@ -167,7 +173,8 @@ TEST(ConcurrentCache, concurrentComputation) { auto a = SimpleConcurrentLruCache(3ul); StartStopSignal signal; auto compute = [&a, &signal]() { - return a.computeOnce(3, waiting_function("3"s, 5, &signal)); + return a.computeOnce(3, waiting_function("3"s, 5, &signal), false, + returnTrue); }; auto resultFuture = std::async(std::launch::async, compute); signal.hasStartedSignal_.wait(); @@ -195,7 +202,8 @@ TEST(ConcurrentCache, concurrentPinnedComputation) { auto a = SimpleConcurrentLruCache(3ul); StartStopSignal signal; auto compute = [&a, &signal]() { - return a.computeOncePinned(3, waiting_function("3"s, 5, &signal)); + return a.computeOncePinned(3, waiting_function("3"s, 5, &signal), false, + returnTrue); }; auto resultFuture = std::async(std::launch::async, compute); signal.hasStartedSignal_.wait(); @@ -225,7 +233,8 @@ TEST(ConcurrentCache, concurrentPinnedUpgradeComputation) { auto a = SimpleConcurrentLruCache(3ul); StartStopSignal signal; auto compute = [&a, &signal]() { - return a.computeOnce(3, waiting_function("3"s, 5, &signal)); + return a.computeOnce(3, waiting_function("3"s, 5, &signal), false, + returnTrue); }; auto resultFuture = std::async(std::launch::async, compute); signal.hasStartedSignal_.wait(); @@ -240,7 +249,8 @@ TEST(ConcurrentCache, concurrentPinnedUpgradeComputation) { // this call waits for the background task to compute, and then fetches the // result. After this call completes, nothing is in progress and the result // is cached. - auto result = a.computeOncePinned(3, waiting_function("3"s, 5)); + auto result = + a.computeOncePinned(3, waiting_function("3"s, 5), false, returnTrue); ASSERT_EQ(0ul, a.numNonPinnedEntries()); ASSERT_EQ(1ul, a.numPinnedEntries()); ASSERT_TRUE(a.getStorage().wlock()->_inProgress.empty()); @@ -255,10 +265,12 @@ TEST(ConcurrentCache, abort) { auto a = SimpleConcurrentLruCache(3ul); StartStopSignal signal; auto compute = [&a, &signal]() { - return a.computeOnce(3, waiting_function("3"s, 5, &signal)); + return a.computeOnce(3, waiting_function("3"s, 5, &signal), false, + returnTrue); }; auto computeWithError = [&a, &signal]() { - return a.computeOnce(3, wait_and_throw_function(5, &signal)); + return a.computeOnce(3, wait_and_throw_function(5, &signal), false, + returnTrue); }; auto fut = std::async(std::launch::async, computeWithError); signal.hasStartedSignal_.wait(); @@ -279,10 +291,12 @@ TEST(ConcurrentCache, abortPinned) { auto a = SimpleConcurrentLruCache(3ul); StartStopSignal signal; auto compute = [&]() { - return a.computeOncePinned(3, waiting_function("3"s, 5, &signal)); + return a.computeOncePinned(3, waiting_function("3"s, 5, &signal), false, + returnTrue); }; auto computeWithError = [&a, &signal]() { - return a.computeOncePinned(3, wait_and_throw_function(5, &signal)); + return a.computeOncePinned(3, wait_and_throw_function(5, &signal), false, + returnTrue); }; auto fut = std::async(std::launch::async, computeWithError); signal.hasStartedSignal_.wait(); diff --git a/test/ExportQueryExecutionTreesTest.cpp b/test/ExportQueryExecutionTreesTest.cpp index 70ab494589..06b5ebff89 100644 --- a/test/ExportQueryExecutionTreesTest.cpp +++ b/test/ExportQueryExecutionTreesTest.cpp @@ -1076,10 +1076,9 @@ TEST(ExportQueryExecutionTrees, getIdTablesReturnsSingletonIterator) { idTable.push_back({Id::makeFromInt(42)}); idTable.push_back({Id::makeFromInt(1337)}); - Result result = Result::createResultWithFullyEvaluatedIdTable( - std::make_shared( - ProtoResult{std::move(idTable), {}, LocalVocab{}}, - std::numeric_limits::max())); + Result result = Result::fromProtoResult( + ProtoResult{std::move(idTable), {}, LocalVocab{}}, + [](const auto&) { return false; }, [](auto) {}); auto generator = ExportQueryExecutionTrees::getIdTables(result); auto iterator = generator.begin(); @@ -1109,11 +1108,9 @@ TEST(ExportQueryExecutionTrees, getIdTablesMirrorsGenerator) { co_yield std::move(idTable2); }(); - Result result = Result::createResultAsMasterConsumer( - std::make_shared( - ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, - std::numeric_limits::max()), - []() {}); + Result result = Result::fromProtoResult( + ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, + [](const auto&) { return false; }, [](auto) {}); auto generator = ExportQueryExecutionTrees::getIdTables(result); auto iterator = generator.begin(); @@ -1144,11 +1141,9 @@ TEST(ExportQueryExecutionTrees, ensureCorrectSlicingOfSingleIdTable) { co_yield std::move(idTable1); }(); - Result result = Result::createResultAsMasterConsumer( - std::make_shared( - ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, - std::numeric_limits::max()), - []() {}); + Result result = Result::fromProtoResult( + ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, + [](const auto&) { return false; }, [](auto) {}); auto generator = ExportQueryExecutionTrees::getRowIndices( LimitOffsetClause{._limit = 1, ._offset = 1}, result); @@ -1178,11 +1173,9 @@ TEST(ExportQueryExecutionTrees, co_yield std::move(idTable2); }(); - Result result = Result::createResultAsMasterConsumer( - std::make_shared( - ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, - std::numeric_limits::max()), - []() {}); + Result result = Result::fromProtoResult( + ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, + [](const auto&) { return false; }, [](auto) {}); auto generator = ExportQueryExecutionTrees::getRowIndices( LimitOffsetClause{._limit = std::nullopt, ._offset = 3}, result); @@ -1216,11 +1209,9 @@ TEST(ExportQueryExecutionTrees, co_yield std::move(idTable2); }(); - Result result = Result::createResultAsMasterConsumer( - std::make_shared( - ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, - std::numeric_limits::max()), - []() {}); + Result result = Result::fromProtoResult( + ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, + [](const auto&) { return false; }, [](auto) {}); auto generator = ExportQueryExecutionTrees::getRowIndices( LimitOffsetClause{._limit = 3}, result); @@ -1258,11 +1249,9 @@ TEST(ExportQueryExecutionTrees, co_yield std::move(idTable2); }(); - Result result = Result::createResultAsMasterConsumer( - std::make_shared( - ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, - std::numeric_limits::max()), - []() {}); + Result result = Result::fromProtoResult( + ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, + [](const auto&) { return false; }, [](auto) {}); auto generator = ExportQueryExecutionTrees::getRowIndices( LimitOffsetClause{._limit = 3, ._offset = 1}, result); @@ -1308,11 +1297,9 @@ TEST(ExportQueryExecutionTrees, co_yield std::move(idTable3); }(); - Result result = Result::createResultAsMasterConsumer( - std::make_shared( - ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, - std::numeric_limits::max()), - []() {}); + Result result = Result::fromProtoResult( + ProtoResult{std::move(tableGenerator), {}, LocalVocab{}}, + [](const auto&) { return false; }, [](auto) {}); auto generator = ExportQueryExecutionTrees::getRowIndices( LimitOffsetClause{._limit = 5, ._offset = 2}, result); diff --git a/test/IteratorWrapperTest.cpp b/test/IteratorWrapperTest.cpp deleted file mode 100644 index 1637102eb9..0000000000 --- a/test/IteratorWrapperTest.cpp +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2024, University of Freiburg, -// Chair of Algorithms and Data Structures. -// Author: Robin Textor-Falconi - -#include - -#include - -#include "util/IteratorWrapper.h" - -using ad_utility::IteratorWrapper; - -TEST(IteratorWrapper, transparentWrapper) { - std::vector vec{1, 2, 3}; - int numIterations = 0; - for (auto value : IteratorWrapper{vec}) { - EXPECT_EQ(value, numIterations + 1); - numIterations++; - } - EXPECT_EQ(numIterations, 3); -} - -// _____________________________________________________________________________ - -struct TestIterable { - std::vector vec_{1, 2, 3}; - bool value1_ = false; - int value2_ = 0; - std::string value3_ = ""; - - auto begin(bool value1, int value2, std::string value3) { - value1_ = value1; - value2_ = value2; - value3_ = std::move(value3); - return vec_.begin(); - } - - auto end() { return vec_.end(); } -}; - -TEST(IteratorWrapper, verifyArgumentsArePassed) { - TestIterable testIterable; - int numIterations = 0; - for (auto value : IteratorWrapper{testIterable, true, 42, "Hi"}) { - EXPECT_EQ(value, numIterations + 1); - numIterations++; - } - EXPECT_EQ(numIterations, 3); - EXPECT_TRUE(testIterable.value1_); - EXPECT_EQ(testIterable.value2_, 42); - EXPECT_EQ(testIterable.value3_, "Hi"); -} diff --git a/test/SparqlDataTypesTest.cpp b/test/SparqlDataTypesTest.cpp index cd634b0cdf..d7b3602ab4 100644 --- a/test/SparqlDataTypesTest.cpp +++ b/test/SparqlDataTypesTest.cpp @@ -16,11 +16,10 @@ using enum PositionInTriple; namespace { struct ContextWrapper { Index _index{ad_utility::makeUnlimitedAllocator()}; - Result _resultTable{Result::createResultWithFullyEvaluatedIdTable( - std::make_shared( - ProtoResult{ - IdTable{ad_utility::testing::makeAllocator()}, {}, LocalVocab{}}, - std::numeric_limits::max()))}; + Result _resultTable = Result::fromProtoResult( + ProtoResult{ + IdTable{ad_utility::testing::makeAllocator()}, {}, LocalVocab{}}, + [](const auto&) { return false; }, [](auto) {}); // TODO `VariableToColumnMap` VariableToColumnMap _hashMap{}; @@ -30,11 +29,9 @@ struct ContextWrapper { } void setIdTable(IdTable&& table) { - _resultTable = Result::createResultWithFullyEvaluatedIdTable( - std::make_shared( - ProtoResult{ - std::move(table), {}, _resultTable.getSharedLocalVocab()}, - std::numeric_limits::max())); + _resultTable = Result::fromProtoResult( + ProtoResult{std::move(table), {}, _resultTable.getSharedLocalVocab()}, + [](const auto&) { return false; }, [](auto) {}); } };