Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cherry pick health probes #904

Merged
merged 4 commits into from
Dec 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 25 additions & 15 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions control-plane/rest/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ rustls = "0.21.12"
rustls-pemfile = "1.0.3"
actix-web = { version = "4.4.0", features = ["rustls-0_21"] }
actix-service = "2.0.2"
tokio = { version = "1.41.0", features = ["sync"] }
opentelemetry = { version = "0.22.0" }
actix-web-opentelemetry = "0.17.0"
tracing = "0.1.37"
Expand Down
54 changes: 54 additions & 0 deletions control-plane/rest/service/src/health/core_state.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
use crate::v0::core_grpc;
use grpc::operations::node::traits::NodeOperations;
use std::time::{Duration, Instant};
use tokio::sync::Mutex;

/// This is a type to cache the liveness of the agent-core service.
/// This is meant to be wrapped inside an Arc and used across threads.
pub struct CachedCoreState {
state: Mutex<ServerState>,
cache_duration: Duration,
}

/// This type remembers a liveness state, and when this data was refreshed.
struct ServerState {
is_live: bool,
last_updated: Instant,
}

impl ServerState {
/// Update the state of the agent-core service, or assume it's unavailable if something
/// went wrong.
async fn update_or_assume_unavailable(&mut self) {
let new_value = core_grpc().node().probe(None).await.unwrap_or(false);
self.is_live = new_value;
self.last_updated = Instant::now();
}
}

impl CachedCoreState {
/// Create a new cache for serving readiness health checks based on agent-core health.
pub async fn new(cache_duration: Duration) -> Self {
let agent_core_is_live = core_grpc().node().probe(None).await.unwrap_or(false);

CachedCoreState {
state: Mutex::new(ServerState {
is_live: agent_core_is_live,
last_updated: Instant::now(),
}),
cache_duration,
}
}

/// Get the cached state of the agent-core service, or assume it's unavailable if something
/// went wrong.
pub async fn get_or_assume_unavailable(&self) -> bool {
let mut state = self.state.lock().await;

if state.last_updated.elapsed() >= self.cache_duration {
state.update_or_assume_unavailable().await;
}

state.is_live
}
}
28 changes: 28 additions & 0 deletions control-plane/rest/service/src/health/handlers.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
use crate::CachedCoreState;
use actix_web::{get, web::Data, HttpResponse, Responder};

/// Liveness probe check. Failure will result in Pod restart. 200 on success.
#[get("/live")]
async fn liveness(_cached_core_state: Data<CachedCoreState>) -> impl Responder {
HttpResponse::Ok()
.content_type("text/plain; charset=utf-8")
.insert_header(("X-Content-Type-Options", "nosniff"))
.body("live")
}

/// Readiness probe check. Failure will result in removal of Container from Kubernetes service
/// target pool. 200 on success, 503 on failure.
#[get("/ready")]
async fn readiness(cached_core_state: Data<CachedCoreState>) -> HttpResponse {
if cached_core_state.get_or_assume_unavailable().await {
return HttpResponse::Ok()
.content_type("text/plain; charset=utf-8")
.insert_header(("X-Content-Type-Options", "nosniff"))
.body("ready");
}

HttpResponse::ServiceUnavailable()
.content_type("text/plain; charset=utf-8")
.insert_header(("X-Content-Type-Options", "nosniff"))
.body("not ready")
}
4 changes: 4 additions & 0 deletions control-plane/rest/service/src/health/mod.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
/// Has tools to collect the liveness state of the agent-core service.
pub mod core_state;
/// Actix request handlers for health checks.
pub mod handlers;
43 changes: 31 additions & 12 deletions control-plane/rest/service/src/main.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,25 @@
mod authentication;
mod health;
mod v0;

use crate::v0::{CORE_CLIENT, JSON_GRPC_CLIENT};
use crate::{
health::{
core_state::CachedCoreState,
handlers::{liveness, readiness},
},
v0::{CORE_CLIENT, JSON_GRPC_CLIENT},
};
use actix_service::ServiceFactory;
use actix_web::{
body::MessageBody,
dev::{ServiceRequest, ServiceResponse},
middleware, App, HttpServer,
middleware,
web::Data,
App, HttpServer,
};
use rustls::{Certificate, PrivateKey, ServerConfig};
use rustls_pemfile::{certs, rsa_private_keys};
use std::{fs::File, io::BufReader};
use std::{fs::File, io::BufReader, time::Duration};
use utils::DEFAULT_GRPC_CLIENT_ADDR;

#[derive(Debug, Parser)]
Expand All @@ -28,6 +37,10 @@ pub(crate) struct CliArgs {
#[clap(long, short = 'z', default_value = DEFAULT_GRPC_CLIENT_ADDR)]
core_grpc: Uri,

/// Set the frequency of probing the agent-core for a liveness check.
#[arg(long = "core-health-freq", value_parser = humantime::parse_duration, default_value = "2m")]
core_liveness_check_frequency: Duration,

/// The json gRPC Server URL or address to connect to the service.
#[clap(long, short = 'J')]
json_grpc: Option<Uri>,
Expand Down Expand Up @@ -222,24 +235,30 @@ async fn main() -> anyhow::Result<()> {
.with_tracing_tags(cli_args.tracing_tags.clone())
.init("rest-server");

// Initialize the core client to be used in rest
CORE_CLIENT
.set(CoreClient::new(cli_args.core_grpc, timeout_opts()).await)
.ok()
.expect("Expect to be initialised only once");

let cached_core_state =
Data::new(CachedCoreState::new(cli_args.core_liveness_check_frequency).await);

let app = move || {
App::new()
.app_data(cached_core_state.clone())
.service(liveness)
.service(readiness)
.wrap(RequestTracing::new())
.wrap(middleware::Logger::default())
.app_data(authentication::init(get_jwk_path()))
.configure_api(&v0::configure_api)
};

// Initialise the core client to be used in rest
CORE_CLIENT
.set(CoreClient::new(CliArgs::args().core_grpc, timeout_opts()).await)
.ok()
.expect("Expect to be initialised only once");

// Initialise the json grpc client to be used in rest
if CliArgs::args().json_grpc.is_some() {
// Initialize the json grpc client to be used in rest
if let Some(json_grpc) = CliArgs::args().json_grpc {
JSON_GRPC_CLIENT
.set(JsonGrpcClient::new(CliArgs::args().json_grpc.unwrap(), timeout_opts()).await)
.set(JsonGrpcClient::new(json_grpc, timeout_opts()).await)
.ok()
.expect("Expect to be initialised only once");
}
Expand Down
4 changes: 4 additions & 0 deletions deployer/src/infra/rest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,10 @@ impl ComponentAction for Rest {
}
}

if let Some(core_health_freq) = &options.rest_core_health_freq {
binary = binary.with_args(vec!["--core-health-freq", core_health_freq]);
}

if cfg.container_exists("jaeger") {
let jaeger_config = format!("jaeger.{}:4317", cfg.get_name());
binary = binary.with_args(vec!["--jaeger", &jaeger_config])
Expand Down
4 changes: 4 additions & 0 deletions deployer/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,10 @@ pub struct StartOptions {
#[clap(long, conflicts_with = "no_rest")]
pub rest_jwk: Option<String>,

/// Set the rest-to-core health probe frequency on the rest.
#[arg(long)]
pub rest_core_health_freq: Option<String>,

/// Use the following image pull policy when creating containers from images.
#[clap(long, default_value = "ifnotpresent")]
pub image_pull_policy: composer::ImagePullPolicy,
Expand Down
18 changes: 15 additions & 3 deletions tests/bdd/common/deployer.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,10 @@
import os
import subprocess
from datetime import datetime

import pytest
from dataclasses import dataclass
from datetime import datetime

import common
import pytest
from common.docker import Docker
from common.nvme import nvme_disconnect_allours_wait

Expand Down Expand Up @@ -36,6 +35,9 @@ class StartOptions:
io_engine_devices: [str] = ()
request_timeout: str = ""
no_min_timeouts: bool = False
rust_log: str = None
rust_log_silence: str = None
rest_core_health_freq: str = None

def args(self):
args = [
Expand Down Expand Up @@ -84,13 +86,17 @@ def args(self):
if self.no_min_timeouts:
args.append(f"--no-min-timeouts")

if self.rest_core_health_freq:
args.append(f"--rest-core-health-freq={self.rest_core_health_freq}")

agent_arg = "--agents=Core"
if self.ha_node_agent:
agent_arg += ",HaNode"
if self.ha_cluster_agent:
agent_arg += ",HaCluster"
if self.ha_cluster_agent_fast is not None:
args.append(f"--cluster-fast-requeue={self.ha_cluster_agent_fast}")

args.append(agent_arg)

return args
Expand Down Expand Up @@ -122,6 +128,9 @@ def start(
io_engine_devices=[],
request_timeout="",
no_min_timeouts=False,
rust_log: str = None,
rust_log_silence: str = None,
rest_core_health_freq: str = None,
):
options = StartOptions(
io_engines,
Expand All @@ -146,6 +155,9 @@ def start(
io_engine_devices=io_engine_devices,
request_timeout=request_timeout,
no_min_timeouts=no_min_timeouts,
rust_log=rust_log,
rust_log_silence=rust_log_silence,
rest_core_health_freq=rest_core_health_freq,
)
pytest.deployer_options = options
Deployer.start_with_opts(options)
Expand Down
12 changes: 12 additions & 0 deletions tests/bdd/features/health_probes/readiness_probe.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
Feature: Readiness Probe

Background:
Given a running agent-core service
And a running REST service with the cache refresh period set to "800ms"

Scenario: The REST API /ready service should not update its readiness status more than once in the cache refresh period
Given agent-core service is available
And the REST service returns a 200 status code for an HTTP GET request to the /ready endpoint
When the agent-core service is brought down forcefully
Then the REST service return changes from 200 to 503 within double of the cache refresh period
And it keeps returning 503 at least for the cache refresh period
Loading
Loading