Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add pool spread topology key feature #756

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ impl DefaultBasePolicy {

/// Return true if all the keys present in volume's pool/node inclusion matches with the pool/node
/// labels otherwise returns false.
pub(crate) fn qualifies_inclusion_labels(
pub(crate) fn qualifies_label_criteria(
vol_pool_inc_labels: HashMap<String, String>,
pool_labels: &HashMap<String, String>,
) -> bool {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use crate::controller::scheduling::{
nexus::GetSuitableNodesContext,
resources::{NodeItem, PoolItem},
volume::GetSuitablePoolsContext,
volume_policy::qualifies_inclusion_labels,
volume_policy::qualifies_label_criteria,
};
use std::collections::HashMap;
use stor_port::types::v0::transport::NodeTopology;
Expand Down Expand Up @@ -96,10 +96,8 @@ impl NodeFilters {
// We will reach this part of code only if the volume has inclusion/exclusion labels.
match request.registry().specs().node(&item.pool.node) {
Ok(spec) => {
let inc_match = qualifies_inclusion_labels(
volume_node_topology_inclusion_labels,
spec.labels(),
);
let inc_match =
qualifies_label_criteria(volume_node_topology_inclusion_labels, spec.labels());
inc_match
}
Err(_) => false,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::controller::scheduling::{
resources::{ChildItem, PoolItem},
volume::{GetSuitablePoolsContext, ReplicaResizePoolsContext},
volume_policy::qualifies_inclusion_labels,
volume_policy::qualifies_label_criteria,
};
use std::collections::HashMap;
use stor_port::types::v0::transport::{PoolStatus, PoolTopology};
Expand Down Expand Up @@ -78,6 +78,7 @@ impl PoolBaseFilters {
/// Should only attempt to use pools having specific creation label if topology has it.
pub(crate) fn topology(request: &GetSuitablePoolsContext, item: &PoolItem) -> bool {
let volume_pool_topology_inclusion_labels: HashMap<String, String>;
let volume_pool_topology_exclusion_labels: HashMap<String, String>;
match request.topology.clone() {
None => return true,
Some(topology) => match topology.pool {
Expand All @@ -86,8 +87,11 @@ impl PoolBaseFilters {
PoolTopology::Labelled(labelled_topology) => {
// The labels in Volume Pool Topology should match the pool labels if
// present, otherwise selection of any pool is allowed.
if !labelled_topology.inclusion.is_empty() {
volume_pool_topology_inclusion_labels = labelled_topology.inclusion
if !labelled_topology.inclusion.is_empty()
|| !labelled_topology.exclusion.is_empty()
{
volume_pool_topology_inclusion_labels = labelled_topology.inclusion;
volume_pool_topology_exclusion_labels = labelled_topology.exclusion;
} else {
return true;
}
Expand All @@ -101,7 +105,11 @@ impl PoolBaseFilters {
Ok(spec) => match spec.labels {
None => false,
Some(pool_labels) => {
qualifies_inclusion_labels(volume_pool_topology_inclusion_labels, &pool_labels)
qualifies_label_criteria(volume_pool_topology_inclusion_labels, &pool_labels)
&& qualifies_label_criteria(
volume_pool_topology_exclusion_labels,
&pool_labels,
)
}
},
Err(_) => false,
Expand Down
76 changes: 63 additions & 13 deletions control-plane/agents/src/bin/core/volume/operations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,10 @@ use stor_port::{
volume::{PublishOperation, RepublishOperation, VolumeOperation, VolumeSpec},
},
transport::{
CreateVolume, DestroyNexus, DestroyReplica, DestroyShutdownTargets, DestroyVolume,
Protocol, PublishVolume, Replica, ReplicaId, ReplicaOwners, RepublishVolume,
ResizeVolume, SetVolumeReplica, ShareNexus, ShareVolume, ShutdownNexus,
UnpublishVolume, UnshareNexus, UnshareVolume, Volume,
CreateReplica, CreateVolume, DestroyNexus, DestroyReplica, DestroyShutdownTargets,
DestroyVolume, PoolTopology, Protocol, PublishVolume, Replica, ReplicaId,
ReplicaOwners, RepublishVolume, ResizeVolume, SetVolumeReplica, ShareNexus,
ShareVolume, ShutdownNexus, UnpublishVolume, UnshareNexus, UnshareVolume, Volume,
},
},
};
Expand Down Expand Up @@ -191,13 +191,6 @@ impl ResourceResize for OperationGuardArc<VolumeSpec> {
let spec = self.as_ref().clone();
let state = registry.volume_state(&request.uuid).await?;

// Pre-check - Don't allow resize if the volume has snapshots.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why did you remove the snapshot check?

// NOTE: This is temporary till the fix for handling this in spdk is done.
if !registry.specs().snapshots_by_vol(self.uuid()).is_empty() {
return Err(SvcError::Internal {
details: "Volume can't be resized while it has snapshots".into(),
});
}
// Pre-checks - volume state eligible for resize.
vol_status_ok_for_resize(&spec)?;
// Pre-check - Ensure pools that host replicas have enough space to resize the replicas,
Expand Down Expand Up @@ -942,8 +935,18 @@ impl CreateVolumeExe for CreateVolume {
for replica in candidates.candidates() {
if replicas.len() >= self.replicas as usize {
break;
} else if replicas.iter().any(|r| r.node == replica.node) {
// don't reuse the same node
} else if replicas.iter().any(|r| {
r.node == replica.node
|| spread_label_is_same(r, replica, context).unwrap_or_else(|error| {
context.volume.error(&format!(
"Failed to create replica {:?} for volume, error: {}",
replica,
error.full_string()
));
false
})
}) {
// don't reuse the same node or same exclusion labels
continue;
}
let replica = if replicas.is_empty() {
Expand Down Expand Up @@ -1001,3 +1004,50 @@ impl CreateVolumeExeVal for CreateVolumeSource<'_> {
}
}
}
// spread_label_is_same checks if the pool labels is same as that of all existing replicas.
fn spread_label_is_same(
replica: &Replica,
replica_candidate: &CreateReplica,
context: &Context,
) -> Result<bool, SvcError> {
let is_spread_label_same = match &context.volume.as_ref().topology {
None => false,
Comment on lines +1013 to +1014
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
let is_spread_label_same = match &context.volume.as_ref().topology {
None => false,
let Some(topology) = context.volume.topology.as_ref() else {
return Ok(false);
};

Some(topology) => {
match &topology.pool {
None => false,
Comment on lines +1016 to +1017
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

again let Some else to reduce nesting..

Some(pool_topology) => match pool_topology {
PoolTopology::Labelled(labelled_topology) => {
let mut pool_exclusion_label_match = false;

// If the PoolSpreadTopologyKey is present in the Volume Pool Topology, then
// get the pool details from replica candidate and
// check if the pool labels is same as that of all existing replicas.
// If its same then don't consider the pool for replica creation
if !labelled_topology.exclusion.is_empty() {
let candidate_pool = context
.registry
.specs()
.pool(&replica_candidate.pool_id)?
.labels
.unwrap_or_default();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we not pass this information through the candidates type?

let replica_pool = context
.registry
.specs()
.pool(&replica.pool_id)?
.labels
.unwrap_or_default();
for (key, _) in labelled_topology.exclusion.iter() {
if candidate_pool[key] == replica_pool[key] {
pool_exclusion_label_match = true;
break;
}
}
}
pool_exclusion_label_match
}
},
}
}
};
Ok(is_spread_label_same)
}
10 changes: 9 additions & 1 deletion control-plane/csi-driver/src/bin/controller/controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1009,6 +1009,7 @@ fn context_into_topology(context: &CreateParams) -> CreateVolumeTopology {
// labels for pool inclusion
let mut pool_inclusive_label_topology: HashMap<String, String> = HashMap::new();
let mut node_inclusive_label_topology: HashMap<String, String> = HashMap::new();
let mut pool_exclusive_label_topology: HashMap<String, String> = HashMap::new();
pool_inclusive_label_topology.insert(String::from(CREATED_BY_KEY), String::from(DSP_OPERATOR));
pool_inclusive_label_topology.extend(
context
Expand All @@ -1024,6 +1025,13 @@ fn context_into_topology(context: &CreateParams) -> CreateVolumeTopology {
.clone()
.unwrap_or_default(),
);
pool_exclusive_label_topology.extend(
context
.publish_params()
.pool_spread_topology_key()
.clone()
.unwrap_or_default(),
);
node_inclusive_label_topology.extend(
context
.publish_params()
Expand All @@ -1044,7 +1052,7 @@ fn context_into_topology(context: &CreateParams) -> CreateVolumeTopology {
inclusion: node_inclusive_label_topology,
})),
Some(PoolTopology::labelled(LabelledTopology {
exclusion: Default::default(),
exclusion: pool_exclusive_label_topology,
inclusion: pool_inclusive_label_topology,
})),
)
Expand Down
18 changes: 18 additions & 0 deletions control-plane/csi-driver/src/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ pub enum Parameters {
PoolAffinityTopologyLabel,
#[strum(serialize = "poolHasTopologyKey")]
PoolHasTopologyKey,
#[strum(serialize = "poolSpreadTopologyKey")]
PoolSpreadTopologyKey,
#[strum(serialize = "nodeAffinityTopologyLabel")]
NodeAffinityTopologyLabel,
#[strum(serialize = "nodeHasTopologyKey")]
Expand Down Expand Up @@ -180,6 +182,12 @@ impl Parameters {
) -> Result<Option<HashMap<String, String>>, tonic::Status> {
Self::parse_topology_param(value)
}
/// Parse the value for `Self::PoolSpreadTopologyKey`.
pub fn pool_spread_topology_key(
value: Option<&String>,
) -> Result<Option<HashMap<String, String>>, tonic::Status> {
Self::parse_topology_param(value)
}
/// Parse the value for `Self::NodeAffinityTopologyLabel`.
pub fn node_affinity_topology_label(
value: Option<&String>,
Expand Down Expand Up @@ -210,6 +218,7 @@ pub struct PublishParams {
fs_id: Option<Uuid>,
pool_affinity_topology_label: Option<HashMap<String, String>>,
pool_has_topology_key: Option<HashMap<String, String>>,
pool_spread_topology_key: Option<HashMap<String, String>>,
node_affinity_topology_label: Option<HashMap<String, String>>,
node_has_topology_key: Option<HashMap<String, String>>,
}
Expand Down Expand Up @@ -242,6 +251,10 @@ impl PublishParams {
pub fn pool_has_topology_key(&self) -> &Option<HashMap<String, String>> {
&self.pool_has_topology_key
}
/// Get the `Parameters::PoolSpreadTopologyKey` value.
pub fn pool_spread_topology_key(&self) -> &Option<HashMap<String, String>> {
&self.pool_spread_topology_key
}
/// Get the `Parameters::NodeAffinityTopologyLabel` value.
pub fn node_affinity_topology_label(&self) -> &Option<HashMap<String, String>> {
&self.node_affinity_topology_label
Expand Down Expand Up @@ -307,6 +320,10 @@ impl TryFrom<&HashMap<String, String>> for PublishParams {
let pool_has_topology_key =
Parameters::pool_has_topology_key(args.get(Parameters::PoolHasTopologyKey.as_ref()))
.map_err(|_| tonic::Status::invalid_argument("Invalid pool_has_topology_key"))?;
let pool_spread_topology_key = Parameters::pool_spread_topology_key(
args.get(Parameters::PoolSpreadTopologyKey.as_ref()),
)
.map_err(|_| tonic::Status::invalid_argument("Invalid pool_spread_topology_key"))?;
let node_affinity_topology_label = Parameters::node_affinity_topology_label(
args.get(Parameters::NodeAffinityTopologyLabel.as_ref()),
)
Expand All @@ -323,6 +340,7 @@ impl TryFrom<&HashMap<String, String>> for PublishParams {
fs_id,
pool_affinity_topology_label,
pool_has_topology_key,
pool_spread_topology_key,
node_affinity_topology_label,
node_has_topology_key,
})
Expand Down
21 changes: 20 additions & 1 deletion tests/bdd/features/volume/topology/pool-topology.feature
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@ Feature: Volume Pool Topology
# the label || node2pool3 || zone-eu=eu-west-3 || io-engine-2 ||
# "zone-eu=eu-west-3" || node3pool3 || zone-eu=eu-west-3 || io-engine-3 ||
#==============================================================================================
# "poolX" has || node1poolX || zone-eu=eu-west-4 || io-engine-1 ||
# the label || || || ||
# "zone-eu=eu-west-X" || || || ||
#==============================================================================================


Scenario Outline: Suitable pools which contain volume topology labels
Given a request for a <replica> replica volume with poolAffinityTopologyLabel as <pool_affinity_topology_label> and pool topology inclusion as <volume_pool_topology_inclusion_label>
Expand Down Expand Up @@ -72,4 +77,18 @@ Feature: Volume Pool Topology
| True | zone-eu | 1 | <= | succeed | must be | zone-eu=eu-west-3 |
| True | zone-eu | 2 | <= | succeed | must be | zone-eu=eu-west-3 |
| True | zone-eu | 3 | <= | succeed | must be | zone-eu=eu-west-3 |
| True | zone-eu | 4 | > | fail | not | zone-eu=eu-west-3 |
| True | zone-eu | 5 | > | fail | not | zone-eu=eu-west-3 |


Scenario Outline: Suitable pools which contain volume pool spread topology key
Given a request for a <replica> replica volume with poolSpreadTopologyKey as <spread_topology_key> and pool topology exclusion as <volume_pool_topology_exclusion_label>
When the desired number of replica of volume i.e. <replica> here; is <expression> number of the pools satisfying pool spread key <volume_pool_topology_exclusion_label>
Then the <replica> replica volume creation should <result> and <provisioned> provisioned on pools with labels <pool_label>
Examples:
| spread_topology_key | volume_pool_topology_exclusion_label | replica | expression | result | provisioned | pool_label |
| True | zone-us | 1 | <= | succeed | must be | zone-us=us-west-1 |
| True | zone-us | 2 | > | fail | not | zone-us=us-west-1 |
| True | zone-ap | 1 | <= | succeed | must be | zone-ap=ap-south-1 |
| True | zone-ap | 2 | > | fail | not | zone-ap=ap-south-1 |
| True | zone-eu | 1 | <= | succeed | must be | zone-eu=eu-west-3 |
| True | zone-eu | 2 | <= | succeed | must be | zone-eu=eu-west-3,zone-eu=eu-west-X |
Loading
Loading