diff --git a/.github/workflows/rust-test.yml b/.github/workflows/rust-test.yml index 3665882..e7bb359 100644 --- a/.github/workflows/rust-test.yml +++ b/.github/workflows/rust-test.yml @@ -35,6 +35,10 @@ jobs: ports: # Maps tcp port 5432 on service container to the host - 5432:5432 + redis: + image: redis + ports: + - 6379:6379 steps: - uses: actions/checkout@v4 @@ -57,4 +61,5 @@ jobs: export POSTGRES_USER="postgres" export POSTGRES_PASSWORD="postgres" export POSTGRES_DBNAME="test" + export REDIS_URL="redis://redis:6379" cargo test --verbose diff --git a/Cargo.lock b/Cargo.lock index ca3f11b..60d1061 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -444,6 +444,7 @@ dependencies = [ "async-trait", "chrono", "deadpool-postgres", + "deadpool-redis", "deadpool-sqlite", "encoding_rs", "log", @@ -530,6 +531,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "deadpool-redis" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfae6799b68a735270e4344ee3e834365f707c72da09c9a8bb89b45cc3351395" +dependencies = [ + "deadpool 0.12.1", + "redis 0.27.5", +] + [[package]] name = "deadpool-runtime" version = "0.1.4" @@ -1777,6 +1788,27 @@ dependencies = [ "url", ] +[[package]] +name = "redis" +version = "0.27.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81cccf17a692ce51b86564334614d72dcae1def0fd5ecebc9f02956da74352b5" +dependencies = [ + "arc-swap", + "async-trait", + "bytes", + "combine", + "futures-util", + "itoa", + "num-bigint", + "percent-encoding", + "pin-project-lite", + "ryu", + "tokio", + "tokio-util", + "url", +] + [[package]] name = "redox_syscall" version = "0.4.1" @@ -2093,7 +2125,7 @@ dependencies = [ "ppp", "quick-xml", "rdkafka", - "redis", + "redis 0.25.4", "regex", "roxmltree", "rustls-pemfile", diff --git a/common/Cargo.toml b/common/Cargo.toml index bdf2997..e3a46d5 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -23,6 +23,7 @@ chrono = { version = "0.4.26", default-features = false, features = ["clock"] } encoding_rs = "0.8.32" deadpool-postgres = "0.14.0" deadpool-sqlite = "0.5.0" +deadpool-redis = "0.18.0" openssl = "0.10.66" postgres-openssl = "0.5.0" strum = { version = "0.26.1", features = ["derive"] } diff --git a/common/src/database/mod.rs b/common/src/database/mod.rs index 26a340c..eff7369 100644 --- a/common/src/database/mod.rs +++ b/common/src/database/mod.rs @@ -7,6 +7,7 @@ use crate::{ bookmark::BookmarkData, database::postgres::PostgresDatabase, database::sqlite::SQLiteDatabase, + database::redis::RedisDatabase, heartbeat::{HeartbeatData, HeartbeatsCache}, settings::Settings, subscription::{ @@ -21,6 +22,8 @@ use self::schema::{Migration, Version}; pub mod postgres; pub mod schema; pub mod sqlite; +pub mod redis; +pub mod redisdomain; pub type Db = Arc; @@ -40,6 +43,12 @@ pub async fn db_from_settings(settings: &Settings) -> Result { schema::postgres::register_migrations(&mut db); Ok(Arc::new(db)) } + crate::settings::Database::Redis(redis) => { + let db = RedisDatabase::new(redis.connection_url()) + .await + .context("Failed to initialize Redis client")?; + Ok(Arc::new(db)) + } } } diff --git a/common/src/database/redis.rs b/common/src/database/redis.rs new file mode 100644 index 0000000..2f11f11 --- /dev/null +++ b/common/src/database/redis.rs @@ -0,0 +1,794 @@ +// Some of the following code is inspired from +// https://github.com/SkylerLipthay/schemamama_postgres. As stated by its +// license (MIT), we include below its copyright notice and permission notice: +// +// The MIT License (MIT) +// +// Copyright (c) 2024 Axoflow +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// +// +#![allow(unused_imports)] +use anyhow::{anyhow, ensure, Context, Error, Result}; +use async_trait::async_trait; +use deadpool_redis::redis::{self, RedisError}; +use deadpool_redis::redis::{pipe, AsyncCommands, Pipeline}; +use deadpool_redis::{Config, Connection, Pool, Runtime}; +use log::warn; +use serde::de::value; +use uuid::Uuid; +use std::borrow::Borrow; +use std::collections::btree_map::Entry::Vacant; +use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::str::FromStr; +use std::sync::Arc; +use std::time::SystemTime; + +use super::redisdomain::RedisDomain; +use crate::bookmark::{self, BookmarkData}; +use crate::database::Database; +use crate::heartbeat::{self, HeartbeatData, HeartbeatKey, HeartbeatValue, HeartbeatsCache}; +use crate::subscription::{ + self, ContentFormat, InternalVersion, PrincsFilter, SubscriptionData, SubscriptionMachine, SubscriptionMachineState, SubscriptionStatsCounters, SubscriptionUuid +}; +use crate::transformers::output_files_use_path::new; + +use super::schema::{Migration, MigrationBase, Version}; + +const MIGRATION_TABLE_NAME: &str = "__schema_migrations"; + +#[async_trait] +pub trait RedisMigration: Migration { + /// Called when this migration is to be executed. + async fn up(&self, conn: &mut Connection) -> Result<()>; + + /// Called when this migration is to be reversed. + async fn down(&self, conn: &mut Connection) -> Result<()>; + + fn to_base(&self) -> Arc { + Arc::new(MigrationBase::new(self.version(), self.description())) + } +} + +enum MachineStatusFilter { + Alive, + Active, + Dead, +} + +impl MachineStatusFilter { + fn is_match(&self, last_seen: &i64, last_event_seen: &Option, start_time: i64) -> bool { + match self { + MachineStatusFilter::Alive => { + *last_seen > start_time && last_event_seen.map_or(true, |event_time| event_time <= start_time) + }, + MachineStatusFilter::Active => { + last_event_seen.map_or(false, |event_time| event_time > start_time) + }, + MachineStatusFilter::Dead => { + *last_seen <= start_time && last_event_seen.map_or(true, |event_time| event_time <= start_time) + } + } + } +} + +fn get_value_or_default( + fields: &HashMap, + key: RedisDomain, +) -> String { + fields.get(&key).cloned().unwrap_or_else(|| RedisDomain::Any.to_string()) +} + +#[allow(unused)] +pub struct RedisDatabase { + pool: Pool, + migrations: BTreeMap>, +} + +impl RedisDatabase { + pub async fn new(connection_url: &str) -> Result { + let config = Config::from_url(connection_url); + let pool = config.create_pool(Some(Runtime::Tokio1))?; + let db = RedisDatabase { + pool, + migrations: BTreeMap::new(), + }; + + Ok(db) + } + + /// Register a migration. If a migration with the same version is already registered, a warning + /// is logged and the registration fails. + pub fn register_migration(&mut self, migration: Arc) { + let version = migration.version(); + if let Vacant(e) = self.migrations.entry(version) { + e.insert(migration); + } else { + warn!("Migration with version {:?} is already registered", version); + } + } + + async fn get_heartbeats_by_field( + &self, + fields: HashMap + ) -> Result> { + + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + + let key = format!("{}:{}:{}", RedisDomain::Heartbeat, + get_value_or_default(&fields, RedisDomain::Subscription), + get_value_or_default(&fields, RedisDomain::Machine)); + + let keys = list_keys(&mut conn, &key).await?; + let mut heartbeats = Vec::::new(); + + let mut subscriptions_cache = HashMap::::new(); + + for key in keys { + let heartbeat_data : HashMap = conn.hgetall(&key).await.context("Failed to get heartbeat data")?; + if !heartbeat_data.is_empty() { + + // cache subs + let subscription_uuid = heartbeat_data[RedisDomain::Subscription.as_str()].clone(); + let cached_data = subscriptions_cache.get(&subscription_uuid).cloned(); + + let subscription_data_opt = if cached_data.is_none() { + let fetched_data = self.get_subscription_by_identifier(&subscription_uuid).await?; + if let Some(fetched_subscription) = fetched_data.clone() { + subscriptions_cache.insert(subscription_uuid.clone(), fetched_subscription); + } + fetched_data + } else { + cached_data + }; + + if subscription_data_opt.is_none() { + return Ok(Vec::::new()); + } + + let subscription_data = subscription_data_opt.ok_or_else(|| { + anyhow::anyhow!("Subscription data not found for UUID: {}", subscription_uuid) + })?; + + let expected_ip = fields.get(&RedisDomain::Ip); + if expected_ip.is_some() && heartbeat_data.get(RedisDomain::Ip.as_str()) != expected_ip { + continue; + } + + let hb = HeartbeatData::new( + heartbeat_data[RedisDomain::Machine.as_str()].clone(), + heartbeat_data[RedisDomain::Ip.as_str()].clone(), + subscription_data, + heartbeat_data.get(RedisDomain::FistSeen.as_str()) + .and_then(|value| value.parse::().ok()) + .context(format!("Failed to parse integer for field '{}'", RedisDomain::FistSeen))?, + heartbeat_data.get(RedisDomain::LastSeen.as_str()) + .and_then(|value| value.parse::().ok()) + .context(format!("Failed to parse integer for field '{}'", RedisDomain::LastSeen))?, + heartbeat_data.get(RedisDomain::LastEventSeen.as_str()) + .and_then(|value| value.parse::().ok()), + ); + heartbeats.push(hb); + } else { + log::warn!("No bookmard found for key: {}", key); + } + } + + Ok(heartbeats) + } + +} + +async fn list_keys(con: &mut Connection, key: &str) -> Result> +{ + let res = con.keys(key).await.context("Unable to list keys")?; + Ok(res) +} + +async fn list_keys_with_fallback(con: &mut Connection, key: &str, fallback: &str) -> Result> +{ + let keys:Vec = con.keys(key).await?; + if keys.is_empty() { + let fallback_keys: Vec = con.keys(fallback).await?; + return Ok(fallback_keys); + } + + Ok(keys) +} + +async fn set_heartbeat_inner(conn: &mut Connection, subscription: &str, machine: &str, value: &HeartbeatValue) -> Result<()> { + let redis_key = format!("{}:{}:{}", RedisDomain::Heartbeat, subscription.to_uppercase(), machine); + let key_exists = conn.exists(&redis_key).await.unwrap_or(true); + let mut pipe = Pipeline::new(); + pipe.hset(&redis_key, RedisDomain::Subscription, subscription.to_uppercase()); + pipe.hset(&redis_key, RedisDomain::Machine, machine); + pipe.hset(&redis_key, RedisDomain::Ip, value.ip.clone()); + if !key_exists { + pipe.hset(&redis_key, RedisDomain::FistSeen, value.last_seen); + } + pipe.hset(&redis_key, RedisDomain::LastSeen, value.last_seen); + + if let Some(last_event_seen) = value.last_event_seen { + pipe.hset(&redis_key, RedisDomain::LastEventSeen, last_event_seen); + } + + let _ : Vec = pipe.query_async(conn.as_mut()).await.context("Failed to set heartbeat data")?; + Ok(()) +} + +async fn set_heartbeat(conn: &mut Connection, key: &HeartbeatKey, value: &HeartbeatValue) -> Result<()> { + set_heartbeat_inner(conn, &key.subscription, &key.machine, value).await +} + +#[allow(unused)] +#[async_trait] +impl Database for RedisDatabase { + async fn get_bookmark(&self, machine: &str, subscription: &str) -> Result> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + let key = format!("{}:{}:{}", RedisDomain::BookMark, subscription.to_uppercase(), machine); + let bookmark_data : HashMap = conn.hgetall(&key).await.context("Failed to get bookmark data")?; + Ok(bookmark_data.get(RedisDomain::BookMark.as_str()).cloned()) + } + + async fn get_bookmarks(&self, subscription: &str) -> Result> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + let key = format!("{}:{}:{}", RedisDomain::BookMark, subscription.to_uppercase(), RedisDomain::Any); + let keys = list_keys(&mut conn, &key).await?; + let mut bookmarks = Vec::::new(); + + for key in keys { + let bookmark_data : HashMap = conn.hgetall(&key).await.context("Failed to get bookmark data")?; + if !bookmark_data.is_empty() { + bookmarks.push(BookmarkData { + subscription: bookmark_data[RedisDomain::Subscription.as_str()].clone(), + machine: bookmark_data[RedisDomain::Machine.as_str()].clone(), + bookmark: bookmark_data[RedisDomain::BookMark.as_str()].clone(), + }); + } else { + log::warn!("No bookmard found for key: {}", key); + } + } + + Ok(bookmarks) + } + + async fn store_bookmark( + &self, + machine: &str, + subscription: &str, + bookmark: &str, + ) -> Result<()> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + let key = format!("{}:{}:{}", RedisDomain::BookMark, subscription.to_uppercase(), machine); + + let mut pipe = Pipeline::new(); + pipe.hset(&key, RedisDomain::Subscription, subscription.to_uppercase()); + pipe.hset(&key, RedisDomain::Machine, machine); + pipe.hset(&key, RedisDomain::BookMark, bookmark); + + let _: Vec = pipe.query_async(&mut conn).await.context("Failed to store bookmark data")?; + + Ok(()) + } + + async fn delete_bookmarks( + &self, + machine: Option<&str>, + subscription: Option<&str>, + ) -> Result<()> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + let compose_key = |subscription: &str, machine: &str| -> String { + format!("{}:{}:{}", RedisDomain::BookMark, subscription.to_uppercase(), machine) + }; + let key : String = match (subscription, machine) { + (Some(subscription), Some(machine)) => { + compose_key(subscription, machine) + }, + (Some(subscription), None) => { + compose_key(subscription, RedisDomain::Any.as_str()) + }, + (None, Some(machine)) => { + compose_key(RedisDomain::Any.as_str(), machine) + }, + (None, None) => { + compose_key(RedisDomain::Any.as_str(), RedisDomain::Any.as_str()) + } + }; + + let keys = list_keys(&mut conn, &key).await?; + let mut pipe = Pipeline::new(); + for key in keys.iter() { + pipe.del(key); + } + let _ : Vec = pipe.query_async(&mut conn).await.context("Failed to delete bookmark data")?; + + Ok(()) + } + + async fn get_heartbeats_by_machine( + &self, + machine: &str, + subscription: Option<&str>, + ) -> Result> { + let mut fields = HashMap::::from([ + (RedisDomain::Machine, machine.to_string()), + ]); + if let Some(subs) = subscription { + fields.insert(RedisDomain::Subscription, subs.to_string()); + } + self.get_heartbeats_by_field(fields).await + } + + async fn get_heartbeats_by_ip( + &self, + ip: &str, + subscription: Option<&str>, + ) -> Result> { + let mut fields = HashMap::::from([ + (RedisDomain::Ip, ip.to_string()), + ]); + if let Some(subs) = subscription { + fields.insert(RedisDomain::Subscription, subs.to_string()); + } + self.get_heartbeats_by_field(fields).await + } + + async fn get_heartbeats(&self) -> Result> { + let fields = HashMap::::new(); + self.get_heartbeats_by_field(fields).await + } + + async fn get_heartbeats_by_subscription( + &self, + subscription: &str, + ) -> Result> { + let fields = HashMap::::from([ + (RedisDomain::Subscription, subscription.to_string()), + ]); + self.get_heartbeats_by_field(fields).await + } + + async fn store_heartbeat( + &self, + machine: &str, + ip: String, + subscription: &str, + is_event: bool, + ) -> Result<()> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + let now = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH)? + .as_secs(); + + let hbv = HeartbeatValue{ + ip, + last_seen: now, + last_event_seen: if is_event { Some(now) } else { None }, + }; + + set_heartbeat_inner(&mut conn, subscription, machine, &hbv).await + } + + async fn store_heartbeats(&self, heartbeats: &HeartbeatsCache) -> Result<()> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + for (key, value) in heartbeats.iter() { + let ip:String = value.ip.clone(); + set_heartbeat(&mut conn, key, value).await?; + } + Ok(()) + } + + async fn get_subscriptions(&self) -> Result> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + + let key = format!("{}:{}:{}", RedisDomain::Subscription, RedisDomain::Any, RedisDomain::Any); + + let keys = list_keys(&mut conn, &key).await?; + + let mut subscriptions = Vec::new(); + + for key in keys { + let subscription_json: Option = conn.get(&key).await.context("Failed to get subscription data")?; + + if let Some(subscription_json) = subscription_json { + match serde_json::from_str::(&subscription_json) { + Ok(subscription) => subscriptions.push(subscription), + Err(err) => { + log::warn!("Failed to deserialize subscription data for key {}: {}", key, err); + } + } + } else { + log::warn!("No subscription found for key: {}", key); + } + } + + Ok(subscriptions) + } + + async fn get_subscription_by_identifier( + &self, + identifier: &str, + ) -> Result> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + let first_pass_key = format!("{}:{}:{}", RedisDomain::Subscription, identifier, RedisDomain::Any); + let second_pass_key = format!("{}:{}:{}", RedisDomain::Subscription, RedisDomain::Any, identifier); + + let keys = list_keys_with_fallback(&mut conn, &first_pass_key, &second_pass_key).await?; + + if !keys.is_empty() { + let result: Option = conn.get(&keys[0]).await.context("Failed to get subscription data")?; + if result.is_some() { + let subscription: SubscriptionData = serde_json::from_str(&result.unwrap()).context("Failed to deserialize subscription data")?; + return Ok(Some(subscription)); + } + } + Ok(None) + } + + async fn store_subscription(&self, subscription: &SubscriptionData) -> Result<()> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + + let key_filter = format!("{}:{}:{}",RedisDomain::Subscription, subscription.uuid().to_string().to_uppercase(), RedisDomain::Any); + let keys = list_keys(&mut conn, &key_filter).await?; + if (!keys.is_empty()) { + let _:() = conn.del(keys).await?; + } + + let key = format!("{}:{}:{}", RedisDomain::Subscription, subscription.uuid().to_string().to_uppercase(), subscription.name()); + let value = serde_json::to_string(subscription).context("Failed to serialize subscription data")?; + let _ : String = conn.set(key, value).await.context("Failed to store subscription data")?; + Ok(()) + } + + async fn delete_subscription(&self, uuid: &str) -> Result<()> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + let first_pass_key = format!("{}:{}:{}", RedisDomain::Subscription, uuid.to_uppercase(), RedisDomain::Any); + let second_pass_key = format!("{}:{}:{}", RedisDomain::Subscription, RedisDomain::Any, uuid); + + let keys = list_keys_with_fallback(&mut conn, &first_pass_key, &second_pass_key).await?; + + self.delete_bookmarks(None, Some(uuid)).await.context("Failed to delete subscription releated bookmark data")?; + if !keys.is_empty() { + let _: () = conn.del(keys).await.context("Failed to delete subscription data")?; + } + Ok(()) + } + + /// Fails if `setup_schema` hasn't previously been called or if the query otherwise fails. + async fn current_version(&self) -> Result> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + let key = MIGRATION_TABLE_NAME; + let versions:Vec = conn.zrange(key, -1, -1).await.context("There is no version info stored in DB.")?; + let last_version = versions.last().and_then(|v| v.parse::().ok()); + Ok(last_version) + } + + /// Fails if `setup_schema` hasn't previously been called or if the query otherwise fails. + async fn migrated_versions(&self) -> Result> { + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + let key = MIGRATION_TABLE_NAME; + let versions:Vec = conn.zrange(key, 0, -1).await.context("There is no version info stored in DB.")?; + let result : BTreeSet = versions.into_iter().map(|v| v.parse::().context(format!("Failed to parse version: {}", v))).collect::>()?; + Ok(result) + } + + /// Fails if `setup_schema` hasn't previously been called or if the migration otherwise fails. + async fn apply_migration(&self, version: Version) -> Result<()> { + let migration = self + .migrations + .get(&version) + .ok_or_else(|| anyhow!("Could not retrieve migration with version {}", version))? + .clone(); + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + migration.up(&mut conn).await?; + let key = MIGRATION_TABLE_NAME; + let version = migration.version(); + let added_count: i64 = conn.zadd(key, version, version).await.context(format!("Unable to add version: {}", version))?; + if added_count > 0 { + println!("Successfully added version {} to sorted set", version); + } else { + println!("Version {} was not added (it may already exist)", version); + } + Ok(()) + } + + /// Fails if `setup_schema` hasn't previously been called or if the migration otherwise fails. + async fn revert_migration(&self, version: Version) -> Result<()> { + let migration = self + .migrations + .get(&version) + .ok_or_else(|| anyhow!("Could not retrieve migration with version {}", version))? + .clone(); + let mut conn = self.pool.get().await.context("Failed to get Redis connection")?; + migration.down(&mut conn).await?; + let key = MIGRATION_TABLE_NAME; + let version = migration.version(); + let removed_count: i64 = conn.zrem(key, version).await.context("Failed to remove version")?; + if removed_count > 0 { + println!("Successfully removed version: {}", version); + } else { + println!("Version {} not found in the sorted set.", version); + } + Ok(()) + } + + /// Create the tables required to keep track of schema state. If the tables already + /// exist, this function has no operation. + async fn setup_schema(&self) -> Result<()> { + Ok(()) + } + + async fn migrations(&self) -> BTreeMap> { + let mut base_migrations = BTreeMap::new(); + for (version, migration) in self.migrations.iter() { + base_migrations.insert(*version, migration.to_base()); + } + base_migrations + } + + async fn get_stats( + &self, + subscription: &str, + start_time: i64, + ) -> Result { + let fields = HashMap::::from([ + (RedisDomain::Subscription, subscription.to_string()), + ]); + let heartbeats = self.get_heartbeats_by_field(fields).await?; + + let total_machines_count = i64::try_from(heartbeats.len())?; + let mut alive_machines_count = 0; + let mut active_machines_count = 0; + let mut dead_machines_count = 0; + + for hb in heartbeats.iter() { + match hb { + HeartbeatData{last_seen, last_event_seen, ..} if MachineStatusFilter::Alive.is_match(last_seen, last_event_seen, start_time) => { + alive_machines_count += 1; + }, + HeartbeatData{last_seen, last_event_seen, ..} if MachineStatusFilter::Active.is_match(last_seen, last_event_seen, start_time) => { + active_machines_count += 1; + }, + HeartbeatData{last_seen, last_event_seen, ..} if MachineStatusFilter::Dead.is_match(last_seen, last_event_seen, start_time) => { + dead_machines_count += 1; + }, + _ => {}, + }; + } + + Ok(SubscriptionStatsCounters::new( + total_machines_count, + alive_machines_count, + active_machines_count, + dead_machines_count, + )) + } + + async fn get_machines( + &self, + subscription: &str, + start_time: i64, + stat_type: Option, + ) -> Result> { + let fields = HashMap::::from([ + (RedisDomain::Subscription, subscription.to_string()), + ]); + let heartbeats = self.get_heartbeats_by_field(fields).await?; + let mut result = Vec::::new(); + + for hb in heartbeats.iter() { + + match stat_type { + None => {}, + Some(SubscriptionMachineState::Active) => { + if !MachineStatusFilter::Active.is_match(&hb.last_seen, &hb.last_event_seen, start_time) { + continue; + } + }, + Some(SubscriptionMachineState::Alive) => { + if !MachineStatusFilter::Alive.is_match(&hb.last_seen, &hb.last_event_seen, start_time) { + continue; + } + }, + Some(SubscriptionMachineState::Dead) => { + if !MachineStatusFilter::Dead.is_match(&hb.last_seen, &hb.last_event_seen, start_time) { + continue; + } + }, + } + result.push(SubscriptionMachine::new(hb.machine().to_string(), hb.ip().to_string())); + } + + Ok(result) + } +} + + +#[cfg(test)] +mod tests { + + use tempfile::TempPath; + use std::env; + + use crate::{ + database::schema::{self, Migrator}, + migration, + }; + + use super::*; + use std::{fs, future::IntoFuture, net::Shutdown, process::{Child, Command, Stdio}}; + use std::path::PathBuf; + use tempfile::NamedTempFile; + use std::io::{self, BufRead, Write}; + use std::fs::{File, OpenOptions}; + use std::time::Duration; + use tokio::{net::TcpListener, sync::{oneshot, Notify}, time::sleep}; + use tokio::sync::Mutex; + use std::net::SocketAddr; + use serial_test::serial; + + #[allow(unused)] + async fn cleanup_db(db: &RedisDatabase) -> Result<()> { + let mut con = db.pool.get().await?; + let _ : () = deadpool_redis::redis::cmd("FLUSHALL").query_async(&mut con).await?; + Ok(()) + } + + async fn drop_migrations_table(db: &RedisDatabase) -> Result<()> { + let mut conn = db.pool.get().await.context("Failed to get Redis connection")?; + let key = MIGRATION_TABLE_NAME; + let _:() = conn.del(key).await?; + Ok(()) + } + + async fn redis_db() -> Result { + let connection_string = env::var("REDIS_URL").unwrap_or("redis://127.0.0.1:6379".to_string()); + RedisDatabase::new(connection_string.as_str()).await + } + + async fn db_with_migrations() -> Result> { + let mut db = redis_db().await?; + schema::redis::register_migrations(&mut db); + drop_migrations_table(&db).await?; + Ok(Arc::new(db)) + } + + #[tokio::test] + #[serial] + async fn test_open_and_close() -> Result<()> { + redis_db() + .await + .expect("Could not connect to database"); + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_bookmarks() -> Result<()> { + crate::database::tests::test_bookmarks(db_with_migrations().await?).await?; + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_heartbeats() -> Result<()> { + crate::database::tests::test_heartbeats(db_with_migrations().await?).await?; + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_heartbeats_cache() -> Result<()> { + crate::database::tests::test_heartbeats_cache(db_with_migrations().await?).await?; + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_subscriptions() -> Result<()> { + crate::database::tests::test_subscriptions(db_with_migrations().await?).await?; + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_stats() -> Result<()> { + crate::database::tests::test_stats_and_machines(db_with_migrations().await?).await?; + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_current_version_empty() -> Result<()> { + let db = db_with_migrations().await?; + let res = db.current_version().await?; + assert_eq!(res, None); + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_current_version() -> Result<()> { + let db = redis_db().await?; + let mut con = db.pool.get().await?; + let members = vec![(1.0, 1),(2.0, 2),(3.0, 3)]; + let _:() = con.zadd_multiple(MIGRATION_TABLE_NAME, &members).await?; + let res = db.current_version().await?; + assert_eq!(res, Some(3)); + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_migrated_versions() -> Result<()> { + let db = redis_db().await?; + let mut con = db.pool.get().await?; + let members = vec![(1.0, 1),(2.0, 2),(3.0, 3)]; + let _:() = con.zadd_multiple(MIGRATION_TABLE_NAME, &members).await?; + let res = db.migrated_versions().await?; + assert_eq!(res, BTreeSet::::from_iter(vec![1,2,3])); + Ok(()) + } + + struct CreateUsers; + migration!(CreateUsers, 1, "create users table"); + + #[async_trait] + impl RedisMigration for CreateUsers { + async fn up(&self, conn: &mut Connection) -> Result<()> { + let key = format!("{}", RedisDomain::Users); + let _:() = conn.set(key, "").await?; + Ok(()) + } + + async fn down(&self, conn: &mut Connection) -> Result<()> { + let key = format!("{}", RedisDomain::Users); + let _:() = conn.del(key).await?; + Ok(()) + } + } + + #[tokio::test] + #[serial] + async fn test_register() -> Result<()> { + let mut db = redis_db() + .await + .expect("Could not connect to database"); + + drop_migrations_table(&db).await?; + db.register_migration(Arc::new(CreateUsers)); + + db.setup_schema().await.expect("Could not setup schema"); + + let db_arc = Arc::new(db); + + let migrator = Migrator::new(db_arc.clone()); + + migrator.up(None, false).await.unwrap(); + + assert_eq!(db_arc.current_version().await.unwrap(), Some(1)); + + migrator.down(None, false).await.unwrap(); + + assert_eq!(db_arc.current_version().await.unwrap(), None); + Ok(()) + } + +} diff --git a/common/src/database/redisdomain.rs b/common/src/database/redisdomain.rs new file mode 100644 index 0000000..5603eae --- /dev/null +++ b/common/src/database/redisdomain.rs @@ -0,0 +1,45 @@ +use deadpool_redis::redis::{self, ToRedisArgs}; +use std::fmt; + +#[derive(Debug, Eq, Hash, PartialEq)] +pub enum RedisDomain { + Users, + Subscription, + Machine, + Heartbeat, + BookMark, + Ip, + FistSeen, + LastSeen, + LastEventSeen, + Any, +} + +impl fmt::Display for RedisDomain { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.as_str()) + } +} + +impl ToRedisArgs for RedisDomain { + fn write_redis_args(&self, out: &mut W) { + out.write_arg(self.as_str().as_bytes()); + } +} + +impl RedisDomain { + pub fn as_str(&self) -> &str { + match self { + RedisDomain::Users => "users", + RedisDomain::Subscription => "subscription", + RedisDomain::Machine => "machine", + RedisDomain::Heartbeat => "heartbeat", + RedisDomain::BookMark => "bookmark", + RedisDomain::Ip => "ip", + RedisDomain::FistSeen => "first_seen", + RedisDomain::LastSeen => "last_seen", + RedisDomain::LastEventSeen => "last_event_seen", + RedisDomain::Any => "*", + } + } +} diff --git a/common/src/database/schema/mod.rs b/common/src/database/schema/mod.rs index dcee775..c1ba456 100644 --- a/common/src/database/schema/mod.rs +++ b/common/src/database/schema/mod.rs @@ -34,6 +34,7 @@ use super::Database; pub mod postgres; pub mod sqlite; +pub mod redis; /// The version type alias used to uniquely reference migrations. pub type Version = i64; diff --git a/common/src/database/schema/redis/_001_create_subscriptions_table.rs b/common/src/database/schema/redis/_001_create_subscriptions_table.rs new file mode 100644 index 0000000..df5ead9 --- /dev/null +++ b/common/src/database/schema/redis/_001_create_subscriptions_table.rs @@ -0,0 +1,26 @@ +use anyhow::{Context, Result}; +use async_trait::async_trait; +use redis::AsyncCommands; +use crate::database::redis::RedisMigration; +use crate::database::redisdomain::RedisDomain; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct CreateSubscriptionsTable; +migration!(CreateSubscriptionsTable, 1, "create subscriptions table"); + +#[async_trait] +impl RedisMigration for CreateSubscriptionsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, conn: &mut Connection) -> Result<()> { + let key = format!("{}:{}:{}", RedisDomain::Subscription, RedisDomain::Any, RedisDomain::Any); + let subs : Vec = conn.keys(key).await.context("Unable to list keys")?; + if !subs.is_empty() { + let _: () = conn.del(subs).await.context("Failed to delete subscription data")?; + } + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_002_create_bookmarks_table.rs b/common/src/database/schema/redis/_002_create_bookmarks_table.rs new file mode 100644 index 0000000..a54c8c9 --- /dev/null +++ b/common/src/database/schema/redis/_002_create_bookmarks_table.rs @@ -0,0 +1,26 @@ +use anyhow::{Context, Result}; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::database::redisdomain::RedisDomain; +use crate::migration; +use deadpool_redis::*; +use redis::AsyncCommands; + +pub(super) struct CreateBookmarksTable; +migration!(CreateBookmarksTable, 2, "create bookmarks table"); + +#[async_trait] +impl RedisMigration for CreateBookmarksTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, conn: &mut Connection) -> Result<()> { + let key = format!("{}:{}:{}", RedisDomain::BookMark, RedisDomain::Any, RedisDomain::Any); + let bms : Vec = conn.keys(key).await.context("Unable to list keys")?; + if !bms.is_empty() { + let _: () = conn.del(bms).await.context("Failed to delete bookmark data")?; + } + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_003_create_heartbeats_table.rs b/common/src/database/schema/redis/_003_create_heartbeats_table.rs new file mode 100644 index 0000000..bee8661 --- /dev/null +++ b/common/src/database/schema/redis/_003_create_heartbeats_table.rs @@ -0,0 +1,26 @@ +use anyhow::{Context, Result}; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::database::redisdomain::RedisDomain; +use crate::migration; +use deadpool_redis::*; +use redis::AsyncCommands; + +pub(super) struct CreateHeartbeatsTable; +migration!(CreateHeartbeatsTable, 3, "create heartbeats table"); + +#[async_trait] +impl RedisMigration for CreateHeartbeatsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, conn: &mut Connection) -> Result<()> { + let key = format!("{}:{}:{}", RedisDomain::Heartbeat, RedisDomain::Any, RedisDomain::Any); + let hbs : Vec = conn.keys(key).await.context("Unable to list keys")?; + if !hbs.is_empty() { + let _: () = conn.del(hbs).await.context("Failed to delete hearthbeat data")?; + } + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_004_add_last_event_seen_field_in_heartbeats_table.rs b/common/src/database/schema/redis/_004_add_last_event_seen_field_in_heartbeats_table.rs new file mode 100644 index 0000000..6565075 --- /dev/null +++ b/common/src/database/schema/redis/_004_add_last_event_seen_field_in_heartbeats_table.rs @@ -0,0 +1,23 @@ +use anyhow::Result; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct AddLastEventSeenFieldInHeartbeatsTable; +migration!( + AddLastEventSeenFieldInHeartbeatsTable, + 4, + "add last_event_seen field in heartbeats table" +); + +#[async_trait] +impl RedisMigration for AddLastEventSeenFieldInHeartbeatsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_005_add_uri_field_in_subscriptions_table.rs b/common/src/database/schema/redis/_005_add_uri_field_in_subscriptions_table.rs new file mode 100644 index 0000000..68ea372 --- /dev/null +++ b/common/src/database/schema/redis/_005_add_uri_field_in_subscriptions_table.rs @@ -0,0 +1,23 @@ +use anyhow::Result; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct AddUriFieldInSubscriptionsTable; +migration!( + AddUriFieldInSubscriptionsTable, + 5, + "add uri field in subscriptions table" +); + +#[async_trait] +impl RedisMigration for AddUriFieldInSubscriptionsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_006_add_content_format_field_in_subscriptions_table.rs b/common/src/database/schema/redis/_006_add_content_format_field_in_subscriptions_table.rs new file mode 100644 index 0000000..db83a8e --- /dev/null +++ b/common/src/database/schema/redis/_006_add_content_format_field_in_subscriptions_table.rs @@ -0,0 +1,23 @@ +use anyhow::Result; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct AddContentFormatFieldInSubscriptionsTable; +migration!( + AddContentFormatFieldInSubscriptionsTable, + 6, + "add content_format field in subscriptions table" +); + +#[async_trait] +impl RedisMigration for AddContentFormatFieldInSubscriptionsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_007_add_ignore_channel_error_field_in_subscriptions_table.rs b/common/src/database/schema/redis/_007_add_ignore_channel_error_field_in_subscriptions_table.rs new file mode 100644 index 0000000..faeb244 --- /dev/null +++ b/common/src/database/schema/redis/_007_add_ignore_channel_error_field_in_subscriptions_table.rs @@ -0,0 +1,23 @@ +use anyhow::Result; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct AddIgnoreChannelErrorFieldInSubscriptionsTable; +migration!( + AddIgnoreChannelErrorFieldInSubscriptionsTable, + 7, + "add ignore_channel_error field in subscriptions table" +); + +#[async_trait] +impl RedisMigration for AddIgnoreChannelErrorFieldInSubscriptionsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_008_add_princs_filter_fields_in_subscriptions_table.rs b/common/src/database/schema/redis/_008_add_princs_filter_fields_in_subscriptions_table.rs new file mode 100644 index 0000000..035d8a3 --- /dev/null +++ b/common/src/database/schema/redis/_008_add_princs_filter_fields_in_subscriptions_table.rs @@ -0,0 +1,23 @@ +use anyhow::Result; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct AddPrincsFilterFieldsInSubscriptionsTable; +migration!( + AddPrincsFilterFieldsInSubscriptionsTable, + 8, + "add princs_filter fields in subscriptions table" +); + +#[async_trait] +impl RedisMigration for AddPrincsFilterFieldsInSubscriptionsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_009_alter_outputs_format.rs b/common/src/database/schema/redis/_009_alter_outputs_format.rs new file mode 100644 index 0000000..a7c1434 --- /dev/null +++ b/common/src/database/schema/redis/_009_alter_outputs_format.rs @@ -0,0 +1,19 @@ +use anyhow::Result; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct AlterOutputsFormat; +migration!(AlterOutputsFormat, 9, "alter outputs format"); + +#[async_trait] +impl RedisMigration for AlterOutputsFormat { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_010_add_revision_field_in_subscriptions_table.rs b/common/src/database/schema/redis/_010_add_revision_field_in_subscriptions_table.rs new file mode 100644 index 0000000..a594732 --- /dev/null +++ b/common/src/database/schema/redis/_010_add_revision_field_in_subscriptions_table.rs @@ -0,0 +1,23 @@ +use anyhow::Result; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct AddRevisionFieldInSubscriptionsTable; +migration!( + AddRevisionFieldInSubscriptionsTable, + 10, + "add revision field in subscriptions table" +); + +#[async_trait] +impl RedisMigration for AddRevisionFieldInSubscriptionsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_011_add_locale_fields_in_subscriptions_table.rs b/common/src/database/schema/redis/_011_add_locale_fields_in_subscriptions_table.rs new file mode 100644 index 0000000..55c78ac --- /dev/null +++ b/common/src/database/schema/redis/_011_add_locale_fields_in_subscriptions_table.rs @@ -0,0 +1,23 @@ +use anyhow::Result; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct AddLocaleFieldsInSubscriptionsTable; +migration!( + AddLocaleFieldsInSubscriptionsTable, + 11, + "add locale fields in subscriptions table" +); + +#[async_trait] +impl RedisMigration for AddLocaleFieldsInSubscriptionsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_012_alter_outputs_files_config.rs b/common/src/database/schema/redis/_012_alter_outputs_files_config.rs new file mode 100644 index 0000000..2bcef09 --- /dev/null +++ b/common/src/database/schema/redis/_012_alter_outputs_files_config.rs @@ -0,0 +1,19 @@ +use anyhow::Result; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::migration; +use deadpool_redis::*; + +pub(super) struct AlterOutputsFilesConfig; +migration!(AlterOutputsFilesConfig, 12, "alter outputs files config"); + +#[async_trait] +impl RedisMigration for AlterOutputsFilesConfig { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } +} diff --git a/common/src/database/schema/redis/_013_add_max_elements_field_in_subscriptions_table.rs b/common/src/database/schema/redis/_013_add_max_elements_field_in_subscriptions_table.rs new file mode 100644 index 0000000..ad25329 --- /dev/null +++ b/common/src/database/schema/redis/_013_add_max_elements_field_in_subscriptions_table.rs @@ -0,0 +1,47 @@ +use anyhow::{Context, Result}; +use async_trait::async_trait; +use crate::database::redis::RedisMigration; +use crate::database::redisdomain::RedisDomain; +use crate::migration; +use deadpool_redis::*; +use redis::AsyncCommands; + +pub(super) struct AddMaxElementsFieldInSubscriptionsTable; +migration!( + AddMaxElementsFieldInSubscriptionsTable, + 13, + "add max_elements field in subscriptions table" +); + +#[async_trait] +impl RedisMigration for AddMaxElementsFieldInSubscriptionsTable { + async fn up(&self, _conn: &mut Connection) -> Result<()> { + Ok(()) + } + + async fn down(&self, conn: &mut Connection) -> Result<()> { + let key = format!("{}:{}:{}", RedisDomain::Heartbeat, RedisDomain::Any, RedisDomain::Any); + let hbs : Vec = conn.keys(key).await.context("Unable to list keys")?; + if !hbs.is_empty() { + let _: () = conn.del(hbs).await.context("Failed to delete hearthbeat data")?; + } + Ok(()) + } + // async fn up(&self, tx: &mut Transaction) -> Result<()> { + // tx.execute( + // "ALTER TABLE subscriptions ADD COLUMN IF NOT EXISTS max_elements INT4;", + // &[], + // ) + // .await?; + // Ok(()) + // } + + // async fn down(&self, tx: &mut Transaction) -> Result<()> { + // tx.execute( + // "ALTER TABLE subscriptions DROP COLUMN IF EXISTS max_elements", + // &[], + // ) + // .await?; + // Ok(()) + // } +} diff --git a/common/src/database/schema/redis/mod.rs b/common/src/database/schema/redis/mod.rs new file mode 100644 index 0000000..0c255ad --- /dev/null +++ b/common/src/database/schema/redis/mod.rs @@ -0,0 +1,49 @@ +use std::sync::Arc; + +use crate::database::redis::RedisDatabase; + +use self::{ + _001_create_subscriptions_table::CreateSubscriptionsTable, + _002_create_bookmarks_table::CreateBookmarksTable, + _003_create_heartbeats_table::CreateHeartbeatsTable, + _004_add_last_event_seen_field_in_heartbeats_table::AddLastEventSeenFieldInHeartbeatsTable, + _005_add_uri_field_in_subscriptions_table::AddUriFieldInSubscriptionsTable, + _006_add_content_format_field_in_subscriptions_table::AddContentFormatFieldInSubscriptionsTable, + _007_add_ignore_channel_error_field_in_subscriptions_table::AddIgnoreChannelErrorFieldInSubscriptionsTable, + _008_add_princs_filter_fields_in_subscriptions_table::AddPrincsFilterFieldsInSubscriptionsTable, + _009_alter_outputs_format::AlterOutputsFormat, + _010_add_revision_field_in_subscriptions_table::AddRevisionFieldInSubscriptionsTable, + _011_add_locale_fields_in_subscriptions_table::AddLocaleFieldsInSubscriptionsTable, + _012_alter_outputs_files_config::AlterOutputsFilesConfig, + _013_add_max_elements_field_in_subscriptions_table::AddMaxElementsFieldInSubscriptionsTable, +}; + +mod _001_create_subscriptions_table; +mod _002_create_bookmarks_table; +mod _003_create_heartbeats_table; +mod _004_add_last_event_seen_field_in_heartbeats_table; +mod _005_add_uri_field_in_subscriptions_table; +mod _006_add_content_format_field_in_subscriptions_table; +mod _007_add_ignore_channel_error_field_in_subscriptions_table; +mod _008_add_princs_filter_fields_in_subscriptions_table; +mod _009_alter_outputs_format; +mod _010_add_revision_field_in_subscriptions_table; +mod _011_add_locale_fields_in_subscriptions_table; +mod _012_alter_outputs_files_config; +mod _013_add_max_elements_field_in_subscriptions_table; + +pub fn register_migrations(redis_db: &mut RedisDatabase) { + redis_db.register_migration(Arc::new(CreateSubscriptionsTable)); + redis_db.register_migration(Arc::new(CreateBookmarksTable)); + redis_db.register_migration(Arc::new(CreateHeartbeatsTable)); + redis_db.register_migration(Arc::new(AddLastEventSeenFieldInHeartbeatsTable)); + redis_db.register_migration(Arc::new(AddUriFieldInSubscriptionsTable)); + redis_db.register_migration(Arc::new(AddContentFormatFieldInSubscriptionsTable)); + redis_db.register_migration(Arc::new(AddIgnoreChannelErrorFieldInSubscriptionsTable)); + redis_db.register_migration(Arc::new(AddPrincsFilterFieldsInSubscriptionsTable)); + redis_db.register_migration(Arc::new(AlterOutputsFormat)); + redis_db.register_migration(Arc::new(AddRevisionFieldInSubscriptionsTable)); + redis_db.register_migration(Arc::new(AddLocaleFieldsInSubscriptionsTable)); + redis_db.register_migration(Arc::new(AlterOutputsFilesConfig)); + redis_db.register_migration(Arc::new(AddMaxElementsFieldInSubscriptionsTable)); +} diff --git a/common/src/heartbeat.rs b/common/src/heartbeat.rs index e4a79a9..e31fb94 100644 --- a/common/src/heartbeat.rs +++ b/common/src/heartbeat.rs @@ -13,9 +13,9 @@ pub struct HeartbeatData { #[serde(serialize_with = "utils::serialize_timestamp")] first_seen: Timestamp, #[serde(serialize_with = "utils::serialize_timestamp")] - last_seen: Timestamp, + pub last_seen: Timestamp, #[serde(serialize_with = "utils::serialize_option_timestamp")] - last_event_seen: Option, + pub last_event_seen: Option, } fn serialize_subscription_data( diff --git a/common/src/settings.rs b/common/src/settings.rs index a3391cf..921e0c4 100644 --- a/common/src/settings.rs +++ b/common/src/settings.rs @@ -18,6 +18,7 @@ pub enum Authentication { pub enum Database { SQLite(SQLite), Postgres(Postgres), + Redis(Redis), } #[derive(Debug, Deserialize, Clone)] @@ -102,6 +103,18 @@ impl Kerberos { } } +#[derive(Debug, Deserialize, Clone)] +#[serde(deny_unknown_fields)] +pub struct Redis { + connection_url: String, +} + +impl Redis { + pub fn connection_url(&self) -> &str { + &self.connection_url + } +} + #[derive(Debug, Deserialize, Clone)] #[serde(deny_unknown_fields)] pub struct SQLite { diff --git a/common/src/subscription.rs b/common/src/subscription.rs index 81d00ad..7dffa29 100644 --- a/common/src/subscription.rs +++ b/common/src/subscription.rs @@ -205,7 +205,7 @@ impl SubscriptionOutputFormat { } } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub enum PrincsFilterOperation { Only, Except, @@ -234,7 +234,7 @@ impl PrincsFilterOperation { } } -#[derive(Debug, Clone, Eq, PartialEq)] +#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] pub struct PrincsFilter { operation: Option, princs: HashSet, @@ -323,7 +323,7 @@ impl PrincsFilter { } } -#[derive(Debug, Clone, Eq, PartialEq, Hash)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] pub enum ContentFormat { Raw, RenderedText, @@ -352,7 +352,7 @@ impl FromStr for ContentFormat { } } -#[derive(Debug, PartialEq, Clone, Eq, Hash, Copy, Serialize)] +#[derive(Debug, PartialEq, Clone, Eq, Hash, Copy, Serialize, Deserialize)] pub struct SubscriptionUuid(pub Uuid); impl Display for SubscriptionUuid { @@ -364,7 +364,7 @@ impl Display for SubscriptionUuid { // We use the newtype pattern so that the compiler can check that // we don't use one instead of the other -#[derive(Debug, PartialEq, Clone, Eq, Hash, Copy)] +#[derive(Debug, PartialEq, Clone, Eq, Hash, Copy, Serialize, Deserialize)] pub struct InternalVersion(pub Uuid); impl Display for InternalVersion { @@ -387,7 +387,7 @@ impl Display for PublicVersion { /// of the subscription is updated and clients are expected to update /// their configuration. /// Every elements must implement the Hash trait -#[derive(Debug, PartialEq, Clone, Eq, Hash)] +#[derive(Debug, PartialEq, Clone, Eq, Hash, Serialize, Deserialize)] pub struct SubscriptionParameters { pub name: String, pub query: String, @@ -404,7 +404,7 @@ pub struct SubscriptionParameters { pub data_locale: Option, } -#[derive(Debug, PartialEq, Clone, Eq)] +#[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)] pub struct SubscriptionData { // Unique identifier of the subscription uuid: SubscriptionUuid,