Skip to content

Commit

Permalink
Merge pull request #6 from camargo2019/fix/dividing-cache-by-database
Browse files Browse the repository at this point in the history
🩹 fix: dividing cache by database
  • Loading branch information
camargo2019 authored Sep 10, 2024
2 parents 6c00fa5 + db42ce6 commit b07d414
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 33 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/run-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,4 @@ jobs:
- name: Build
run: |
clang++ -o cmr_cache main.cpp -I./vendor/yaml -I/usr/local/include/boost -L/usr/local/lib -lboost_system -lpthread -Wmissing-declarations
clang++ -o cmr_cache main.cpp -I./vendor/yaml -I/usr/local/include/boost -L/usr/local/lib -lboost_system -lpthread -std=c++17
74 changes: 42 additions & 32 deletions core/cache/cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,37 +22,41 @@
#include "cache.h"

Cache::Cache(){
std::ifstream file("data/databases.dat");
if (file.is_open()){
size_t cacheSize;
file.read(reinterpret_cast<char*>(&cacheSize), sizeof(cacheSize));

for (size_t i = 0; i < cacheSize; ++i) {
std::string key;
size_t keySize;
file.read(reinterpret_cast<char*>(&keySize), sizeof(keySize));
key.resize(keySize);
file.read(&key[0], keySize);

std::unordered_map<std::string, CacheStruct> map;

size_t mapSize;
file.read(reinterpret_cast<char*>(&mapSize), sizeof(mapSize));

for (size_t f = 0; f < mapSize; ++f) {
std::string itemKey;
size_t itemKeySize;
file.read(reinterpret_cast<char*>(&itemKeySize), sizeof(itemKeySize));
itemKey.resize(itemKeySize);
file.read(&itemKey[0], itemKeySize);

CacheStruct cacheStruct;
cacheStruct.deserialize(file);

map[itemKey] = cacheStruct;
}
for (const auto& row: std::filesystem::directory_iterator("data")){
if (row.path().extension() != ".dat") continue;

std::ifstream file(row.path(), std::ios::binary);
if (file.is_open()){
size_t cacheSize;
file.read(reinterpret_cast<char*>(&cacheSize), sizeof(cacheSize));

for (size_t i = 0; i < cacheSize; ++i) {
std::string key;
size_t keySize;
file.read(reinterpret_cast<char*>(&keySize), sizeof(keySize));
key.resize(keySize);
file.read(&key[0], keySize);

std::unordered_map<std::string, CacheStruct> map;

size_t mapSize;
file.read(reinterpret_cast<char*>(&mapSize), sizeof(mapSize));

for (size_t f = 0; f < mapSize; ++f) {
std::string itemKey;
size_t itemKeySize;
file.read(reinterpret_cast<char*>(&itemKeySize), sizeof(itemKeySize));
itemKey.resize(itemKeySize);
file.read(&itemKey[0], itemKeySize);

CacheStruct cacheStruct;
cacheStruct.deserialize(file);

cache_[key] = map;
map[itemKey] = cacheStruct;
}

cache_[key] = map;
}
}
}
}
Expand Down Expand Up @@ -101,15 +105,19 @@ std::vector<std::string> Cache::keys(std::string db){
}

void Cache::save(){
std::ofstream file("data/databases.dat");
std::filesystem::create_directory("data");

size_t cacheSize = cache_.size();
file.write(reinterpret_cast<const char*>(&cacheSize), sizeof(cacheSize));

for (const auto& row: cache_) {
const std::string& key = row.first;
const std::unordered_map<std::string, CacheStruct>& map = row.second;

std::string filename = "data/" + key + ".dat";
std::ofstream file(filename, std::ios::binary);

file.write(reinterpret_cast<const char*>(&cacheSize), sizeof(cacheSize));

size_t keySize = key.size();
file.write(reinterpret_cast<const char*>(&keySize), sizeof(keySize));
file.write(key.data(), keySize);
Expand All @@ -127,6 +135,8 @@ void Cache::save(){

cachedata.serialize(file);
}

file.close();
}
}

Expand Down

0 comments on commit b07d414

Please sign in to comment.