From a9ee35ae6bb5b4a78b9a321b1f1affcf696e2579 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Mon, 4 Dec 2023 12:07:23 +0200 Subject: [PATCH 01/63] Early draft [skip ci] --- CHANGELOG.md | 12 +- src/client.rs | 260 ++++++++--- src/client/diagnostics.rs | 39 +- src/lib.rs | 11 +- src/network_event/server_event.rs | 27 +- src/replicon_core.rs | 34 +- src/replicon_core/replication_rules.rs | 8 +- src/server.rs | 415 +++++++++--------- src/server/despawn_tracker.rs | 35 +- src/server/message.rs | 93 ---- src/server/removal_tracker.rs | 32 +- src/server/replication_messages.rs | 276 ++++++++++++ .../replication_buffer.rs | 25 +- tests/replication.rs | 31 +- 14 files changed, 813 insertions(+), 485 deletions(-) delete mode 100644 src/server/message.rs create mode 100644 src/server/replication_messages.rs rename src/server/{message => replication_messages}/replication_buffer.rs (92%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7b383226..622b4fea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,17 +9,25 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added -- `TicksMap` with mapping from server ticks to system change ticks. +- `ServerEntityTicks` with ticks for each entity on client. ### Changed +- Send all component mappings, inserts, removals and despawns over reliable channel in form of deltas and component updates over unreliable channel packed by packet size. This will significantly reduce packet loss. +- Packets in client diagnostics was replaced with init and update messages (it was actually number of messages before). +- `ClientMapping` no longer contains `tick` field. +- Replace `REPLICATION_CHANNEL_ID` with `ReplicationChannel` enum. The previous constant corresponded to the unreliable channel. - Use `EntityHashMap` instead of `HashMap` with entities as keys. - Use `Cursor<&[u8]>` instead of `Cursor`. - Replace `LastRepliconTick` with `RepliconTick` on client. -- `AckedTicks` now returns the map via `deref` instead of via separate method. - Fix missing reset of `RepliconTick` on server disconnect. - Rename `replicate_into_scene` into `replicate_into` and move it to `scene` module. +### Removed + +- `AckedTicks` resource. +- `TicksMap` resource. + ## [0.17.0] - 2023-11-13 ### Added diff --git a/src/client.rs b/src/client.rs index d7bcc063..71a5de5c 100644 --- a/src/client.rs +++ b/src/client.rs @@ -6,7 +6,7 @@ use bevy::{ prelude::*, utils::{hashbrown::hash_map::Entry, EntityHashMap}, }; -use bevy_renet::client_connected; +use bevy_renet::{client_connected, renet::Bytes}; use bevy_renet::{renet::RenetClient, transport::NetcodeClientPlugin, RenetClientPlugin}; use bincode::{DefaultOptions, Options}; use varint_rs::VarintReader; @@ -14,7 +14,7 @@ use varint_rs::VarintReader; use crate::replicon_core::{ replication_rules::{Mapper, Replication, ReplicationRules}, replicon_tick::RepliconTick, - REPLICATION_CHANNEL_ID, + ReplicationChannel, }; use diagnostics::ClientStats; @@ -24,6 +24,7 @@ impl Plugin for ClientPlugin { fn build(&self, app: &mut App) { app.add_plugins((RenetClientPlugin, NetcodeClientPlugin)) .init_resource::() + .init_resource::() .configure_sets( PreUpdate, ClientSet::Receive.after(NetcodeClientPlugin::update_system), @@ -41,74 +42,135 @@ impl Plugin for ClientPlugin { ) .add_systems( PostUpdate, - ( - Self::ack_sending_system - .in_set(ClientSet::Send) - .run_if(client_connected()), - Self::reset_system.run_if(resource_removed::()), - ), + Self::reset_system.run_if(resource_removed::()), ); } } impl ClientPlugin { - pub(super) fn replication_receiving_system(world: &mut World) -> bincode::Result<()> { + /// Receives and applies replication messages from the server. + /// + /// Init messages applied first to ensure the valid state. + /// + /// Then update messages will be applied only if an init message with their tick has arrived. + /// If an update message received before the required init message, then they will be buffered until then. + /// Since they could arrive in any order, entities will be updated only if the received update requires a more recent tick. + /// + /// And then the buffered messages from the last run are processed. + /// + /// Sends acknowledgments back for update messages. + /// + /// See also [`ReplicationMessages`](crate::server::replication_messages::ReplicationMessages). + pub(super) fn replication_receiving_system( + world: &mut World, + mut buffered_updates: Local>, + mut retain_buffer: Local>, + ) -> bincode::Result<()> { world.resource_scope(|world, mut client: Mut| { world.resource_scope(|world, mut entity_map: Mut| { - world.resource_scope(|world, replication_rules: Mut| { - let mut stats = world.remove_resource::(); - while let Some(message) = client.receive_message(REPLICATION_CHANNEL_ID) { - apply_message( - &message, - world, - &mut entity_map, - stats.as_mut(), - &replication_rules, - )?; - } - - if let Some(stats) = stats { - world.insert_resource(stats); - } - - Ok(()) + world.resource_scope(|world, mut entity_ticks: Mut| { + world.resource_scope(|world, replication_rules: Mut| { + let mut stats = world.remove_resource::(); + while let Some(message) = + client.receive_message(ReplicationChannel::Reliable) + { + apply_init_message( + &message, + world, + &mut entity_map, + &mut entity_ticks, + stats.as_mut(), + &replication_rules, + )?; + } + + let old_buffers = buffered_updates.len(); + let replicon_tick = *world.resource::(); + while let Some(message) = + client.receive_message(ReplicationChannel::Unreliable) + { + let index = apply_update_message( + message, + world, + &mut entity_map, + &mut entity_ticks, + &mut buffered_updates, + stats.as_mut(), + &replication_rules, + replicon_tick, + )?; + + client.send_message( + ReplicationChannel::Reliable, + bincode::serialize(&index)?, + ) + } + + retain_buffer.clear(); + retain_buffer.reserve(old_buffers); + for update in buffered_updates.iter().take(old_buffers) { + let retain = update.tick < replicon_tick; + if retain { + let mut cursor = Cursor::new(&*update.message); + cursor.set_position(update.position); + + apply_components( + &mut cursor, + world, + &mut entity_map, + &mut entity_ticks, + stats.as_mut(), + ComponentsKind::Update, + &replication_rules, + replicon_tick, + )?; + } + retain_buffer.push(retain); + } + let mut iter = retain_buffer.iter(); + buffered_updates.retain(|_| *iter.next().unwrap_or(&true)); + + if let Some(stats) = stats { + world.insert_resource(stats); + } + + Ok(()) + }) }) }) }) } - fn ack_sending_system(replicon_tick: Res, mut client: ResMut) { - let message = bincode::serialize(&*replicon_tick) - .unwrap_or_else(|e| panic!("client ack should be serialized: {e}")); - client.send_message(REPLICATION_CHANNEL_ID, message); - } - fn reset_system( mut replicon_tick: ResMut, mut entity_map: ResMut, + mut entity_ticks: ResMut, ) { *replicon_tick = Default::default(); entity_map.clear(); + entity_ticks.0.clear(); } } -fn apply_message( +/// Applies [`InitMessage`](crate::server::replication_messages::InitMessage). +fn apply_init_message( message: &[u8], world: &mut World, entity_map: &mut ServerEntityMap, + entity_ticks: &mut ServerEntityTicks, mut stats: Option<&mut ClientStats>, replication_rules: &ReplicationRules, ) -> bincode::Result<()> { let end_pos: u64 = message.len().try_into().unwrap(); let mut cursor = Cursor::new(message); if let Some(stats) = &mut stats { - stats.packets += 1; + stats.init_messages += 1; stats.bytes += end_pos; } - let Some(tick) = apply_tick(&mut cursor, world)? else { - return Ok(()); - }; + let replicon_tick = bincode::deserialize_from(&mut cursor)?; + trace!("applying {replicon_tick:?}"); + *world.resource_mut::() = replicon_tick; if cursor.position() == end_pos { return Ok(()); } @@ -122,10 +184,11 @@ fn apply_message( &mut cursor, world, entity_map, + entity_ticks, stats.as_deref_mut(), - ComponentsKind::Change, + ComponentsKind::Insert, replication_rules, - tick, + replicon_tick, )?; if cursor.position() == end_pos { return Ok(()); @@ -135,10 +198,11 @@ fn apply_message( &mut cursor, world, entity_map, + entity_ticks, stats.as_deref_mut(), ComponentsKind::Removal, replication_rules, - tick, + replicon_tick, )?; if cursor.position() == end_pos { return Ok(()); @@ -148,32 +212,61 @@ fn apply_message( &mut cursor, world, entity_map, + entity_ticks, replication_rules, - tick, + replicon_tick, stats, )?; Ok(()) } -/// Deserializes server tick and applies it to [`LastTick`] if it is newer. +/// Applies [`UpdateMessage`](crate::server::replication_messages::UpdateMessage). /// -/// Returns the tick if [`LastTick`] has been updated. -fn apply_tick( - cursor: &mut Cursor<&[u8]>, +/// If the update message can't be applied yet (because the init message with the +/// corresponding tick hasn't arrived), it will be buffered. +/// +/// Returns update index to be used for acknowledgment. +#[allow(clippy::too_many_arguments)] +fn apply_update_message( + message: Bytes, world: &mut World, -) -> bincode::Result> { - let tick = bincode::deserialize_from(cursor)?; - - let mut replicon_tick = world.resource_mut::(); - if *replicon_tick < tick { - trace!("applying {tick:?} over {replicon_tick:?}"); - *replicon_tick = tick; - Ok(Some(tick)) - } else { - trace!("discarding {tick:?}, which is older then {replicon_tick:?}"); - Ok(None) + entity_map: &mut ServerEntityMap, + entity_ticks: &mut ServerEntityTicks, + buffered_updates: &mut Vec, + mut stats: Option<&mut ClientStats>, + replication_rules: &ReplicationRules, + replicon_tick: RepliconTick, +) -> bincode::Result { + let end_pos: u64 = message.len().try_into().unwrap(); + let mut cursor = Cursor::new(&*message); + if let Some(stats) = &mut stats { + stats.update_messages += 1; + stats.bytes += end_pos; + } + + let (tick, update_index) = bincode::deserialize_from(&mut cursor)?; + if tick < replicon_tick { + buffered_updates.push(BufferedUpdate { + tick, + position: cursor.position(), + message, + }); + return Ok(update_index); } + + apply_components( + &mut cursor, + world, + entity_map, + entity_ticks, + stats, + ComponentsKind::Update, + replication_rules, + tick, + )?; + + Ok(update_index) } /// Applies received server mappings from client's pre-spawned entities. @@ -212,19 +305,36 @@ fn apply_entity_mappings( } /// Deserializes replicated components of `components_kind` and applies them to the `world`. +#[allow(clippy::too_many_arguments)] fn apply_components( cursor: &mut Cursor<&[u8]>, world: &mut World, entity_map: &mut ServerEntityMap, + entity_ticks: &mut ServerEntityTicks, mut stats: Option<&mut ClientStats>, components_kind: ComponentsKind, replication_rules: &ReplicationRules, - tick: RepliconTick, + replicon_tick: RepliconTick, ) -> bincode::Result<()> { let entities_count: u16 = bincode::deserialize_from(&mut *cursor)?; for _ in 0..entities_count { let entity = deserialize_entity(cursor)?; let mut entity = entity_map.get_by_server_or_spawn(world, entity); + match components_kind { + ComponentsKind::Update => { + let Some(tick) = entity_ticks.0.get_mut(&entity.id()) else { + continue; // Update arrived arrive after a despawn from init message. + }; + if *tick >= replicon_tick { + continue; // Update for this entity is outdated. + } + *tick = replicon_tick; + } + ComponentsKind::Insert | ComponentsKind::Removal => { + entity_ticks.0.insert(entity.id(), replicon_tick); + } + } + let components_count: u8 = bincode::deserialize_from(&mut *cursor)?; if let Some(stats) = &mut stats { stats.entities_changed += 1; @@ -235,10 +345,10 @@ fn apply_components( // SAFETY: server and client have identical `ReplicationRules` and server always sends valid IDs. let replication_info = unsafe { replication_rules.get_info_unchecked(replication_id) }; match components_kind { - ComponentsKind::Change => { - (replication_info.deserialize)(&mut entity, entity_map, cursor, tick)? + ComponentsKind::Insert | ComponentsKind::Update => { + (replication_info.deserialize)(&mut entity, entity_map, cursor, replicon_tick)? } - ComponentsKind::Removal => (replication_info.remove)(&mut entity, tick), + ComponentsKind::Removal => (replication_info.remove)(&mut entity, replicon_tick), } } } @@ -251,8 +361,9 @@ fn apply_despawns( cursor: &mut Cursor<&[u8]>, world: &mut World, entity_map: &mut ServerEntityMap, + entity_ticks: &mut ServerEntityTicks, replication_rules: &ReplicationRules, - tick: RepliconTick, + replicon_tick: RepliconTick, stats: Option<&mut ClientStats>, ) -> bincode::Result<()> { let entities_count: u16 = bincode::deserialize_from(&mut *cursor)?; @@ -268,7 +379,8 @@ fn apply_despawns( .remove_by_server(server_entity) .and_then(|entity| world.get_entity_mut(entity)) { - (replication_rules.despawn_fn)(client_entity, tick); + entity_ticks.0.remove(&client_entity.id()); + (replication_rules.despawn_fn)(client_entity, replicon_tick); } } @@ -277,7 +389,7 @@ fn apply_despawns( /// Deserializes `entity` from compressed index and generation. /// -/// For details see [`ReplicationBuffer::write_entity`](crate::server::replication_buffer::ReplicationBuffer::write_entity). +/// For details see [`ReplicationBuffer::write_entity`](crate::server::replication_message::replication_buffer::write_entity). fn deserialize_entity(cursor: &mut Cursor<&[u8]>) -> bincode::Result { let flagged_index: u64 = cursor.read_u64_varint()?; let has_generation = (flagged_index & 1) > 0; @@ -296,7 +408,8 @@ fn deserialize_entity(cursor: &mut Cursor<&[u8]>) -> bincode::Result { /// /// Parameter for [`apply_components`]. enum ComponentsKind { - Change, + Insert, + Update, Removal, } @@ -314,8 +427,6 @@ pub enum ClientSet { } /// Maps server entities to client entities and vice versa. -/// -/// Used only on client. #[derive(Default, Resource)] pub struct ServerEntityMap { server_to_client: EntityHashMap, @@ -399,3 +510,20 @@ impl Mapper for ClientMapper<'_> { }) } } + +/// Last received tick for each entity. +/// +/// Used to avoid applying old updates. +#[derive(Default, Deref, Resource)] +pub struct ServerEntityTicks(EntityHashMap); + +/// Caches buffer with deserialized tick that we received earlier +/// then the required init message with this tick. +pub(super) struct BufferedUpdate { + /// Required tick to wait for. + tick: RepliconTick, + /// Position of the message data. + position: u64, + /// Update data. + message: Bytes, +} diff --git a/src/client/diagnostics.rs b/src/client/diagnostics.rs index 025d9ef0..0c506c02 100644 --- a/src/client/diagnostics.rs +++ b/src/client/diagnostics.rs @@ -19,8 +19,10 @@ pub struct ClientStats { pub mappings: u32, /// Incremented per entity despawn. pub despawns: u32, - /// Replication packets received. - pub packets: u32, + /// Init messages received. + pub init_messages: u32, + /// Update messages received. + pub update_messages: u32, /// Replication bytes received in packet payloads (without internal Renet data). pub bytes: u64, } @@ -58,7 +60,7 @@ impl Plugin for ClientDiagnosticsPlugin { Self::DIAGNOSTIC_HISTORY_LEN, )) .register_diagnostic(Diagnostic::new( - Self::PACKETS, + Self::INIT_MESSAGES, "packets per second", Self::DIAGNOSTIC_HISTORY_LEN, )) @@ -81,8 +83,12 @@ impl ClientDiagnosticsPlugin { pub const MAPPINGS: DiagnosticId = DiagnosticId::from_u128(61564098061172206743744706749187); /// How many despawns per second from replication. pub const DESPAWNS: DiagnosticId = DiagnosticId::from_u128(11043323327351349675112378115868); - /// How many replication packets processed per second. - pub const PACKETS: DiagnosticId = DiagnosticId::from_u128(40094818756895929689855772983865); + /// How many init messages processed per second. + pub const INIT_MESSAGES: DiagnosticId = + DiagnosticId::from_u128(40094818756895929689855772983865); + /// How many update messages processed per second. + pub const UPDATE_MESSAGES: DiagnosticId = + DiagnosticId::from_u128(40094818756895929689855772983865); /// How many bytes of replication packets payloads per second. pub const BYTES: DiagnosticId = DiagnosticId::from_u128(87998088176776397493423835383418); @@ -91,41 +97,42 @@ impl ClientDiagnosticsPlugin { fn diagnostic_system(mut stats: ResMut, mut diagnostics: Diagnostics) { diagnostics.add_measurement(Self::ENTITY_CHANGES, || { - if stats.packets == 0 { + if stats.init_messages == 0 { 0_f64 } else { - stats.entities_changed as f64 / stats.packets as f64 + stats.entities_changed as f64 / stats.init_messages as f64 } }); diagnostics.add_measurement(Self::COMPONENT_CHANGES, || { - if stats.packets == 0 { + if stats.init_messages == 0 { 0_f64 } else { - stats.components_changed as f64 / stats.packets as f64 + stats.components_changed as f64 / stats.init_messages as f64 } }); diagnostics.add_measurement(Self::MAPPINGS, || { - if stats.packets == 0 { + if stats.init_messages == 0 { 0_f64 } else { - stats.mappings as f64 / stats.packets as f64 + stats.mappings as f64 / stats.init_messages as f64 } }); diagnostics.add_measurement(Self::DESPAWNS, || { - if stats.packets == 0 { + if stats.init_messages == 0 { 0_f64 } else { - stats.despawns as f64 / stats.packets as f64 + stats.despawns as f64 / stats.init_messages as f64 } }); diagnostics.add_measurement(Self::BYTES, || { - if stats.packets == 0 { + if stats.init_messages == 0 { 0_f64 } else { - stats.bytes as f64 / stats.packets as f64 + stats.bytes as f64 / stats.init_messages as f64 } }); - diagnostics.add_measurement(Self::PACKETS, || stats.packets as f64); + diagnostics.add_measurement(Self::INIT_MESSAGES, || stats.init_messages as f64); + diagnostics.add_measurement(Self::UPDATE_MESSAGES, || stats.init_messages as f64); *stats = ClientStats::default(); } } diff --git a/src/lib.rs b/src/lib.rs index 6ab1ac39..66e19302 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -117,7 +117,7 @@ fn deserialize_transform( entity: &mut EntityWorldMut, _entity_map: &mut ServerEntityMap, cursor: &mut Cursor<&[u8]>, - _tick: RepliconTick, + _replicon_tick: RepliconTick, ) -> bincode::Result<()> { let translation: Vec3 = bincode::deserialize_from(cursor)?; entity.insert(Transform::from_translation(translation)); @@ -379,7 +379,8 @@ creation / connection systems and corresponding UI. To reduce packet size there are the following limits per replication update: -- Up to [`u16::MAX`] entities that have changed/added components with up to [`u8::MAX`] such components. +- Up to [`u16::MAX`] entities that have added components with up to [`u8::MAX`] such components. +- Up to [`u16::MAX`] entities that have changed components with up to [`u8::MAX`] such components. - Up to [`u16::MAX`] entities that have removed components with up to [`u8::MAX`] such components. - Up to [`u16::MAX`] entities that were despawned. */ @@ -395,7 +396,7 @@ pub mod prelude { pub use super::{ client::{ diagnostics::{ClientDiagnosticsPlugin, ClientStats}, - ClientMapper, ClientPlugin, ClientSet, ServerEntityMap, + ClientMapper, ClientPlugin, ClientSet, ServerEntityMap, ServerEntityTicks, }, network_event::{ client_event::{ClientEventAppExt, FromClient}, @@ -410,10 +411,10 @@ pub mod prelude { ReplicationRules, }, replicon_tick::RepliconTick, - NetworkChannels, RepliconCorePlugin, REPLICATION_CHANNEL_ID, + NetworkChannels, ReplicationChannel, RepliconCorePlugin, }, server::{ - has_authority, AckedTicks, ClientEntityMap, ClientMapping, ServerPlugin, ServerSet, + has_authority, ClientEntityMap, ClientMapping, LastChangeTick, ServerPlugin, ServerSet, TickPolicy, SERVER_ID, }, ReplicationPlugins, diff --git a/src/network_event/server_event.rs b/src/network_event/server_event.rs index ecff838e..8bcfc2d2 100644 --- a/src/network_event/server_event.rs +++ b/src/network_event/server_event.rs @@ -15,7 +15,7 @@ use crate::{ replicon_core::{ replication_rules::MapNetworkEntities, replicon_tick::RepliconTick, NetworkChannels, }, - server::{has_authority, MinRepliconTick, ServerSet, SERVER_ID}, + server::{has_authority, LastChangeTick, ServerSet, SERVER_ID}, }; /// An extension trait for [`App`] for creating server events. @@ -66,13 +66,13 @@ pub trait ServerEventAppExt { fn sending_reflect_system( mut server: ResMut, mut reflect_events: EventReader>, - tick: Res, + last_change_tick: Res, channel: Res>, registry: Res, ) { let registry = registry.read(); for ToClients { event, mode } in reflect_events.read() { - let message = serialize_reflect_event(*tick, &event, ®istry) + let message = serialize_reflect_event(**last_change_tick, &event, ®istry) .expect("server event should be serializable"); server_event::send(&mut server, *channel, *mode, message) @@ -185,8 +185,7 @@ impl ServerEventAppExt for App { PostUpdate, ( ( - (min_tick_update_system::, sending_system) - .run_if(resource_exists::()), + sending_system.run_if(resource_exists::()), local_resending_system::.run_if(has_authority()), ) .chain() @@ -261,32 +260,18 @@ fn receiving_and_mapping_system( mut server: ResMut, mut server_events: EventReader>, - tick: Res, + last_change_tick: Res, channel: Res>, ) { for ToClients { event, mode } in server_events.read() { let message = DefaultOptions::new() - .serialize(&(*tick, event)) + .serialize(&(**last_change_tick, event)) .expect("server event should be serializable"); send(&mut server, *channel, *mode, message); } } -/// Updates [`MinRepliconTick`] to force server to send replication message even if there were no world changes. -/// -/// Needed because events on a client won't be emitted until the client acknowledges the event tick. -/// See also [`ServerEventQueue`]. -fn min_tick_update_system( - mut server_events: EventReader>, - mut min_tick: ResMut, - tick: Res, -) { - if server_events.read().count() > 0 { - **min_tick = *tick; - } -} - /// Transforms [`ToClients`] events into `T` events to "emulate" /// message sending for offline mode or when server is also a player fn local_resending_system( diff --git a/src/replicon_core.rs b/src/replicon_core.rs index 682c75e4..6ade076f 100644 --- a/src/replicon_core.rs +++ b/src/replicon_core.rs @@ -1,6 +1,8 @@ pub mod replication_rules; pub mod replicon_tick; +use std::time::Duration; + use bevy::prelude::*; use bevy_renet::renet::{ChannelConfig, SendType}; @@ -18,10 +20,22 @@ impl Plugin for RepliconCorePlugin { } } -/// ID of the server replication channel. +/// ID of a server replication channel. /// /// See also [`NetworkChannels`]. -pub const REPLICATION_CHANNEL_ID: u8 = 0; +#[repr(u8)] +pub enum ReplicationChannel { + /// For sending messages with entity mappings, inserts, removals and despawns. + Reliable, + /// For sending messages with component updates. + Unreliable, +} + +impl From for u8 { + fn from(value: ReplicationChannel) -> Self { + value as u8 + } +} /// A resource to configure and setup channels for [`ConnectionConfig`](bevy_renet::renet::ConnectionConfig) #[derive(Clone, Resource)] @@ -41,9 +55,19 @@ pub struct NetworkChannels { /// Stores only replication channel by default. impl Default for NetworkChannels { fn default() -> Self { + let replication_channels = vec![ + ( + SendType::ReliableOrdered { + resend_time: Duration::ZERO, + }, + None, + ), + (SendType::Unreliable, None), + ]; + Self { - server: vec![(SendType::Unreliable, None)], - client: vec![(SendType::Unreliable, None)], + server: replication_channels.clone(), + client: replication_channels, default_max_bytes: 5 * 1024 * 1024, // Value from `DefaultChannel::config()`. } } @@ -62,7 +86,7 @@ impl NetworkChannels { /// Sets maximum usage bytes for specific client channel. /// - /// [`REPLICATION_CHANNEL_ID`] or [`EventChannel`](crate::network_event::EventChannel) can be passed as `id`. + /// [`ReplicationChannel`] or [`EventChannel`](crate::network_event::EventChannel) can be passed as `id`. /// Without calling this function, the default value will be used. /// See also [`Self::set_default_max_bytes`]. pub fn set_server_max_bytes(&mut self, id: impl Into, max_bytes: usize) { diff --git a/src/replicon_core/replication_rules.rs b/src/replicon_core/replication_rules.rs index b2d2f71b..1aee85da 100644 --- a/src/replicon_core/replication_rules.rs +++ b/src/replicon_core/replication_rules.rs @@ -231,7 +231,7 @@ pub fn deserialize_component( entity: &mut EntityWorldMut, _entity_map: &mut ServerEntityMap, cursor: &mut Cursor<&[u8]>, - _tick: RepliconTick, + _replicon_tick: RepliconTick, ) -> bincode::Result<()> { let component: C = DefaultOptions::new().deserialize_from(cursor)?; entity.insert(component); @@ -244,7 +244,7 @@ pub fn deserialize_mapped_component, - _tick: RepliconTick, + _replicon_tick: RepliconTick, ) -> bincode::Result<()> { let mut component: C = DefaultOptions::new().deserialize_from(cursor)?; @@ -258,11 +258,11 @@ pub fn deserialize_mapped_component(entity: &mut EntityWorldMut, _tick: RepliconTick) { +pub fn remove_component(entity: &mut EntityWorldMut, _replicon_tick: RepliconTick) { entity.remove::(); } /// Default entity despawn function. -pub fn despawn_recursive(entity: EntityWorldMut, _tick: RepliconTick) { +pub fn despawn_recursive(entity: EntityWorldMut, _replicon_tick: RepliconTick) { entity.despawn_recursive(); } diff --git a/src/server.rs b/src/server.rs index bacf0666..3e2ac2b6 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,18 +1,19 @@ pub(super) mod despawn_tracker; -pub(super) mod message; pub(super) mod removal_tracker; +pub(super) mod replication_messages; -use std::time::Duration; +use std::{mem, time::Duration}; use bevy::{ ecs::{ archetype::ArchetypeId, - component::{StorageType, Tick}, + component::{ComponentTicks, StorageType, Tick}, system::SystemChangeTick, }, prelude::*, + ptr::Ptr, time::common_conditions::on_timer, - utils::HashMap, + utils::{EntityHashMap, HashMap}, }; use bevy_renet::{ renet::{ClientId, RenetClient, RenetServer, ServerEvent}, @@ -21,11 +22,13 @@ use bevy_renet::{ }; use crate::replicon_core::{ - replication_rules::ReplicationRules, replicon_tick::RepliconTick, REPLICATION_CHANNEL_ID, + replication_rules::{ReplicationId, ReplicationInfo, ReplicationRules}, + replicon_tick::RepliconTick, + ReplicationChannel, }; use despawn_tracker::{DespawnTracker, DespawnTrackerPlugin}; -use message::ReplicationMessage; use removal_tracker::{RemovalTracker, RemovalTrackerPlugin}; +use replication_messages::ReplicationMessages; pub const SERVER_ID: ClientId = ClientId::from_raw(0); @@ -49,9 +52,8 @@ impl Plugin for ServerPlugin { RemovalTrackerPlugin, DespawnTrackerPlugin, )) - .init_resource::() - .init_resource::() - .init_resource::() + .init_resource::() + .init_resource::() .init_resource::() .configure_sets(PreUpdate, ServerSet::Receive.after(RenetReceive)) .configure_sets( @@ -62,7 +64,7 @@ impl Plugin for ServerPlugin { ) .add_systems( PreUpdate, - (Self::acks_receiving_system, Self::acks_cleanup_system) + (Self::acks_receiving_system, Self::disconnect_cleanup_system) .in_set(ServerSet::Receive) .run_if(resource_exists::()), ) @@ -90,7 +92,9 @@ impl Plugin for ServerPlugin { TickPolicy::EveryFrame => { app.add_systems( PostUpdate, - Self::increment_tick.before(Self::replication_sending_system), + Self::increment_tick + .before(Self::replication_sending_system) + .run_if(resource_exists::()), ); } TickPolicy::Manual => (), @@ -104,149 +108,130 @@ impl ServerPlugin { } /// Increments current server tick which causes the server to replicate this frame. - pub fn increment_tick(mut tick: ResMut) { - tick.increment(); - trace!("incremented {tick:?}"); + pub fn increment_tick(mut replicon_tick: ResMut) { + replicon_tick.increment(); + trace!("incremented {replicon_tick:?}"); } fn acks_receiving_system( - mut acked_ticks: ResMut, - mut ticks_map: ResMut, mut server: ResMut, - mut entity_map: ResMut, + mut clients_info: ResMut, ) { - for client_id in server.clients_id() { - while let Some(message) = server.receive_message(client_id, REPLICATION_CHANNEL_ID) { - match bincode::deserialize::(&message) { - Ok(tick) => { - let acked_tick = acked_ticks.0.entry(client_id).or_default(); - if *acked_tick < tick { - *acked_tick = tick; - entity_map.cleanup_acked(client_id, *acked_tick); - trace!("client {client_id} acknowledged {tick:?}"); + for client_info in clients_info.iter_mut() { + while let Some(message) = + server.receive_message(client_info.id, ReplicationChannel::Reliable) + { + match bincode::deserialize::(&message) { + Ok(update_index) => { + let Some((tick, entities)) = + client_info.update_entities.remove(&update_index) + else { + error!( + "received unknown update index {update_index} from client {}", + client_info.id + ); + continue; + }; + + for entity in entities { + client_info.ticks.insert(entity, tick); } + trace!( + "client {} acknowledged an update with {tick:?}", + client_info.id + ); } - Err(e) => error!("unable to deserialize tick from client {client_id}: {e}"), + Err(e) => error!( + "unable to deserialize update index from client {}: {e}", + client_info.id + ), } } } - - ticks_map.cleanup_acked(&acked_ticks) } - fn acks_cleanup_system( + fn disconnect_cleanup_system( mut server_events: EventReader, - mut acked_ticks: ResMut, + mut entity_map: ResMut, + mut clients_info: ResMut, ) { for event in server_events.read() { - match event { + match *event { ServerEvent::ClientDisconnected { client_id, .. } => { - acked_ticks.0.remove(client_id); + entity_map.0.remove(&client_id); + let index = clients_info + .iter() + .position(|info| info.id == client_id) + .expect("clients info should contain all connected clients"); + clients_info.remove(index); } ServerEvent::ClientConnected { client_id } => { - acked_ticks.0.entry(*client_id).or_default(); + clients_info.push(ClientInfo::new(client_id)); } } } } - #[allow(clippy::too_many_arguments)] + /// Collects [`ReplicationMessages`] and sends them. + #[allow(clippy::type_complexity)] pub(super) fn replication_sending_system( - mut messages: Local>, + mut messages: Local, change_tick: SystemChangeTick, - mut set: ParamSet<(&World, ResMut, ResMut)>, - acked_ticks: Res, + mut set: ParamSet<( + &World, + ResMut, + ResMut, + ResMut, + ResMut, + ResMut, + Query<(Entity, &mut RemovalTracker)>, + )>, replication_rules: Res, - despawn_tracker: Res, replicon_tick: Res, - min_replicon_tick: Res, - removal_trackers: Query<(Entity, &RemovalTracker)>, - entity_map: Res, ) -> bincode::Result<()> { - set.p1().0.insert(*replicon_tick, change_tick.this_run()); - - let messages = prepare_messages( - &mut messages, - &acked_ticks, - &set.p1(), - *replicon_tick, - *min_replicon_tick, - )?; - - collect_mappings(messages, &entity_map)?; - collect_changes( - messages, - set.p0(), - change_tick.this_run(), - &replication_rules, - )?; - collect_removals(messages, &removal_trackers, change_tick.this_run())?; - collect_despawns(messages, &despawn_tracker, change_tick.this_run())?; - - for messages in messages { - messages.send(&mut set.p2()); - } + let clients_info = mem::take(&mut set.p1().0); // Take ownership to avoid borrowing issues. + messages.prepare(clients_info, *replicon_tick, change_tick.this_run())?; + + collect_mappings(&mut messages, &mut set.p2())?; + collect_changes(&mut messages, set.p0(), &change_tick, &replication_rules)?; + collect_removals(&mut messages, &mut set.p6(), &change_tick)?; + collect_despawns(&mut messages, &mut set.p4(), &change_tick)?; + + let last_change_tick = *set.p5(); + let (last_change_tick, clients_info) = + messages.send(&mut set.p3(), last_change_tick, *replicon_tick)?; + + // Return borrowed data back. + **set.p1() = clients_info; + *set.p5() = last_change_tick; Ok(()) } fn reset_system( mut replicon_tick: ResMut, - mut acked_ticks: ResMut, - mut ticks_map: ResMut, + mut entity_map: ResMut, + mut clients_info: ResMut, ) { *replicon_tick = Default::default(); - acked_ticks.0.clear(); - ticks_map.0.clear(); + entity_map.0.clear(); + clients_info.clear(); } } -/// Initializes message for each client and returns it as mutable slice. +/// Collects and writes any new entity mappings happened in this tick. /// -/// Reuses already allocated messages. -/// Creates new messages if number of clients is bigger then the number of allocated messages. -/// If there are more messages than the number of clients, then the extra messages remain untouched -/// and the returned slice will not include them. -fn prepare_messages<'a>( - messages: &'a mut Vec, - acked_ticks: &AckedTicks, - ticks_map: &TicksMap, - replicon_tick: RepliconTick, - min_replicon_tick: MinRepliconTick, -) -> bincode::Result<&'a mut [ReplicationMessage]> { - messages.reserve(acked_ticks.len()); - for (index, (&client_id, &acked_tick)) in acked_ticks.iter().enumerate() { - let system_tick = *ticks_map.get(&acked_tick).unwrap_or(&Tick::new(0)); - - let send_empty = acked_tick < *min_replicon_tick; - if let Some(message) = messages.get_mut(index) { - message.reset(replicon_tick, client_id, system_tick, send_empty)?; - } else { - messages.push(ReplicationMessage::new( - replicon_tick, - client_id, - system_tick, - send_empty, - )?); - } - } - - Ok(&mut messages[..acked_ticks.len()]) -} - -/// Collect and write any new entity mappings into messages since last acknowledged tick. -/// -/// Mappings will be processed first, so all referenced entities after it will behave correctly. +/// On deserialization mappings should be processed first, so all referenced entities after it will behave correctly. fn collect_mappings( - messages: &mut [ReplicationMessage], - entity_map: &ClientEntityMap, + messages: &mut ReplicationMessages, + entity_map: &mut ClientEntityMap, ) -> bincode::Result<()> { - for message in &mut *messages { + for (message, _, client_info) in messages.iter_mut_with_info() { message.start_array(); - if let Some(mappings) = entity_map.get(&message.client_id) { - for mapping in mappings { - message.write_client_mapping(mapping)?; + if let Some(mappings) = entity_map.0.get_mut(&client_info.id) { + for mapping in mappings.drain(..) { + message.write_client_mapping(&mapping)?; } } @@ -255,15 +240,16 @@ fn collect_mappings( Ok(()) } -/// Collect component changes into messages based on last acknowledged tick. +/// Collects component insertions from this tick into init messages and changes into update messages since the last entity tick. fn collect_changes( - messages: &mut [ReplicationMessage], + messages: &mut ReplicationMessages, world: &World, - system_tick: Tick, + change_tick: &SystemChangeTick, replication_rules: &ReplicationRules, ) -> bincode::Result<()> { - for message in &mut *messages { - message.start_array(); + for (init_message, update_message) in messages.iter_mut() { + init_message.start_array(); + update_message.start_array(); } for archetype in world @@ -280,8 +266,9 @@ fn collect_changes( .expect("archetype should be valid"); for archetype_entity in archetype.entities() { - for message in &mut *messages { - message.start_entity_data(archetype_entity.entity()); + for (init_message, update_message) in messages.iter_mut() { + init_message.start_entity_data(archetype_entity.entity()); + update_message.start_entity_data(archetype_entity.entity()) } for component_id in archetype.components() { @@ -310,15 +297,15 @@ fn collect_changes( let component = unsafe { column.get_data_unchecked(archetype_entity.table_row()) }; - for message in &mut *messages { - if ticks.is_changed(message.system_tick, system_tick) { - message.write_component( - replication_info, - replication_id, - component, - )?; - } - } + collect_component_change( + messages, + archetype_entity.entity(), + ticks, + change_tick, + replication_info, + replication_id, + component, + )?; } StorageType::SparseSet => { let sparse_set = world @@ -335,47 +322,82 @@ fn collect_changes( .get(entity) .unwrap_or_else(|| panic!("{entity:?} should have {component_id:?}")); - for message in &mut *messages { - if ticks.is_changed(message.system_tick, system_tick) { - message.write_component( - replication_info, - replication_id, - component, - )?; - } - } + collect_component_change( + messages, + entity, + ticks, + change_tick, + replication_info, + replication_id, + component, + )?; } } } - for message in &mut *messages { - message.end_entity_data()?; + for (init_message, update_message) in messages.iter_mut() { + init_message.end_entity_data()?; + update_message.register_entity(); + update_message.end_entity_data()?; } } } - for message in &mut *messages { - message.end_array()?; + for (init_message, update_message) in messages.iter_mut() { + init_message.end_array()?; + update_message.end_array()?; } Ok(()) } -/// Collect component removals into messages based on last acknowledged tick. +/// Collects the component if it has been changed. +/// +/// If the component has been changed in this tick, it will be collected into init buffer and last entity tick will be bumped. +/// Otherwise if the component has been changed since the last entity tick for a client - it will be collected into update message. +fn collect_component_change( + messages: &mut ReplicationMessages, + entity: Entity, + ticks: ComponentTicks, + change_tick: &SystemChangeTick, + replication_info: &ReplicationInfo, + replication_id: ReplicationId, + component: Ptr, +) -> bincode::Result<()> { + for (init_message, update_message, client_info) in messages.iter_mut_with_info() { + if ticks.is_added(change_tick.last_run(), change_tick.this_run()) { + client_info.ticks.insert(entity, change_tick.this_run()); + init_message.write_component(replication_info, replication_id, component)?; + } else { + let tick = *client_info + .ticks + .get(&entity) + .expect("entity should present after adding component"); + if ticks.is_changed(tick, change_tick.this_run()) { + update_message.write_component(replication_info, replication_id, component)?; + } + } + } + + Ok(()) +} + +/// Collects component removals from this tick into init messages. fn collect_removals( - messages: &mut [ReplicationMessage], - removal_trackers: &Query<(Entity, &RemovalTracker)>, - system_tick: Tick, + messages: &mut ReplicationMessages, + removal_trackers: &mut Query<(Entity, &mut RemovalTracker)>, + change_tick: &SystemChangeTick, ) -> bincode::Result<()> { - for message in &mut *messages { + for (message, _) in messages.iter_mut() { message.start_array(); } - for (entity, removal_tracker) in removal_trackers { - for message in &mut *messages { + for (entity, mut removal_tracker) in removal_trackers { + for (message, _, client_info) in messages.iter_mut_with_info() { message.start_entity_data(entity); - for (&replication_id, &tick) in &removal_tracker.0 { - if tick.is_newer_than(message.system_tick, system_tick) { + for (replication_id, tick) in removal_tracker.drain() { + if tick.is_newer_than(change_tick.last_run(), change_tick.this_run()) { + client_info.ticks.insert(entity, change_tick.this_run()); message.write_replication_id(replication_id)?; } } @@ -383,32 +405,33 @@ fn collect_removals( } } - for message in &mut *messages { + for (message, _) in messages.iter_mut() { message.end_array()?; } Ok(()) } -/// Collect entity despawns into messages based on last acknowledged tick. +/// Collect entity despawns from this tick into init messages. fn collect_despawns( - messages: &mut [ReplicationMessage], - despawn_tracker: &DespawnTracker, - system_tick: Tick, + messages: &mut ReplicationMessages, + despawn_tracker: &mut DespawnTracker, + change_tick: &SystemChangeTick, ) -> bincode::Result<()> { - for message in &mut *messages { + for (message, _) in messages.iter_mut() { message.start_array(); } - for &(entity, tick) in &despawn_tracker.0 { - for message in &mut *messages { - if tick.is_newer_than(message.system_tick, system_tick) { + for (entity, tick) in despawn_tracker.drain(..) { + for (message, _, client_info) in messages.iter_mut_with_info() { + if tick.is_newer_than(change_tick.last_run(), change_tick.this_run()) { + client_info.ticks.remove(&entity); message.write_entity(entity)?; } } } - for message in &mut *messages { + for (message, _) in messages.iter_mut() { message.end_array()?; } @@ -444,36 +467,47 @@ pub enum TickPolicy { Manual, } -/// Stores mapping from server ticks to system change ticks. -/// -/// Used only on server. -#[derive(Default, Deref, Resource)] -pub struct TicksMap(HashMap); - -impl TicksMap { - fn cleanup_acked(&mut self, acked_ticks: &AckedTicks) { - self.0 - .retain(|tick, _| acked_ticks.values().any(|acked_tick| acked_tick <= tick)); - } +/// Stores meta-information about connected clients. +#[derive(Default, Resource, Deref, DerefMut)] +pub(super) struct ClientsInfo(Vec); + +pub(super) struct ClientInfo { + id: ClientId, + ticks: EntityHashMap, + update_entities: HashMap)>, + next_update_index: u16, } -/// Last acknowledged server ticks for all clients. -/// -/// Used only on server. -#[derive(Default, Deref, Resource)] -pub struct AckedTicks(HashMap); +impl ClientInfo { + fn new(id: ClientId) -> Self { + Self { + id, + ticks: Default::default(), + update_entities: Default::default(), + next_update_index: Default::default(), + } + } -/// Contains the lowest replicon tick that should be acknowledged by clients. -/// -/// If a client has not acked this tick, then replication messages >= this tick -/// will be sent even if they do not contain data. + /// Remembers `entities` and `tick` of an update message and returns its index. + /// + /// Used later to acknowledge updated entities. + #[must_use] + fn register_update(&mut self, tick: Tick, entities: Vec) -> u16 { + let update_index = self.next_update_index; + self.update_entities.insert(update_index, (tick, entities)); + + self.next_update_index = self.next_update_index.overflowing_add(1).0; + + update_index + } +} + +/// Contains the last tick on which the world was changed. /// -/// Used to synchronize server-sent events with clients. A client cannot consume -/// a server-sent event until it has acknowledged the tick where that event was -/// created. This means we need to replicate ticks after a server-sent event is -/// emitted to guarantee the client can eventually consume the event. -#[derive(Clone, Copy, Debug, Default, Deref, DerefMut, Resource)] -pub(super) struct MinRepliconTick(RepliconTick); +/// It should be included in update messages and server events instead of the current tick +/// to avoid needless waiting for the next init message to arrive. +#[derive(Clone, Copy, Debug, Default, Deref, Resource)] +pub struct LastChangeTick(RepliconTick); /** A resource that exists on the server for mapping server entities to @@ -519,7 +553,6 @@ fn confirm_bullet( mut commands: Commands, mut bullet_events: EventReader>, mut entity_map: ResMut, - tick: Res, ) { for FromClient { client_id, event } in bullet_events.read() { let server_entity = commands.spawn(Bullet).id(); // You can insert more components, they will be sent to the client's entity correctly. @@ -527,7 +560,6 @@ fn confirm_bullet( entity_map.insert( *client_id, ClientMapping { - tick: *tick, server_entity, client_entity: event.0, }, @@ -554,22 +586,11 @@ impl ClientEntityMap { pub fn insert(&mut self, client_id: ClientId, mapping: ClientMapping) { self.0.entry(client_id).or_default().push(mapping); } - - /// Removes acknowledged mappings. - fn cleanup_acked(&mut self, client_id: ClientId, acked_tick: RepliconTick) { - if let Some(mappings) = self.0.get_mut(&client_id) { - mappings.retain(|mapping| mapping.tick > acked_tick); - } - } } /// Stores the server entity corresponding to a client's pre-spawned entity. -/// -/// The `tick` is stored here so that this prediction data can be cleaned up once the tick -/// has been acked by the client. #[derive(Debug)] pub struct ClientMapping { - pub tick: RepliconTick, pub server_entity: Entity, pub client_entity: Entity, } diff --git a/src/server/despawn_tracker.rs b/src/server/despawn_tracker.rs index 80c9961a..5310a1f3 100644 --- a/src/server/despawn_tracker.rs +++ b/src/server/despawn_tracker.rs @@ -4,7 +4,7 @@ use bevy::{ }; use bevy_renet::renet::RenetServer; -use super::{AckedTicks, ServerSet, TicksMap}; +use super::ServerSet; use crate::replicon_core::replication_rules::Replication; /// Tracks entity despawns of entities with [`Replication`] component in [`DespawnTracker`] resource. @@ -16,7 +16,7 @@ impl Plugin for DespawnTrackerPlugin { fn build(&self, app: &mut App) { app.init_resource::().add_systems( PostUpdate, - (Self::cleanup_system, Self::detection_system) + Self::detection_system .before(ServerSet::Send) .run_if(resource_exists::()), ); @@ -24,23 +24,6 @@ impl Plugin for DespawnTrackerPlugin { } impl DespawnTrackerPlugin { - /// Cleanups all acknowledged despawns. - /// - /// Cleans all despawns if [`AckedTicks`] is empty. - fn cleanup_system( - change_tick: SystemChangeTick, - mut despawn_tracker: ResMut, - acked_ticks: Res, - ticks_map: Res, - ) { - despawn_tracker.retain(|(_, tick)| { - acked_ticks.values().any(|acked_tick| { - let system_tick = *ticks_map.get(acked_tick).unwrap_or(&Tick::new(0)); - tick.is_newer_than(system_tick, change_tick.this_run()) - }) - }); - } - fn detection_system( change_tick: SystemChangeTick, mut removed_replications: RemovedComponents, @@ -58,28 +41,16 @@ pub(crate) struct DespawnTracker(pub(super) Vec<(Entity, Tick)>); #[cfg(test)] mod tests { - use bevy_renet::renet::ClientId; - use super::*; - use crate::server::RepliconTick; #[test] fn detection() { let mut app = App::new(); app.add_plugins(DespawnTrackerPlugin) - .insert_resource(RenetServer::new(Default::default())) - .init_resource::() - .init_resource::(); + .insert_resource(RenetServer::new(Default::default())); app.update(); - // To avoid cleanup. - const DUMMY_CLIENT_ID: ClientId = ClientId::from_raw(0); - app.world - .resource_mut::() - .0 - .insert(DUMMY_CLIENT_ID, RepliconTick(0)); - let replicated_entity = app.world.spawn(Replication).id(); app.update(); diff --git a/src/server/message.rs b/src/server/message.rs deleted file mode 100644 index 3e793f9c..00000000 --- a/src/server/message.rs +++ /dev/null @@ -1,93 +0,0 @@ -pub(super) mod replication_buffer; - -use bevy::{ecs::component::Tick, prelude::*}; -use bevy_renet::renet::{Bytes, ClientId, RenetServer}; - -use crate::replicon_core::{replicon_tick::RepliconTick, REPLICATION_CHANNEL_ID}; -use replication_buffer::ReplicationBuffer; - -/// A reusable message with replicated data for a client. -/// -/// See also [Limits](../index.html#limits) -#[derive(Deref, DerefMut)] -pub(crate) struct ReplicationMessage { - /// ID of a client for which this message is written. - pub(super) client_id: ClientId, - - /// Last system tick acknowledged by the client. - /// - /// Used for changes preparation. - pub(super) system_tick: Tick, - - /// Send message even if it doesn't contain replication data. - /// - /// See also [`Self::send_to`] - send_empty: bool, - - /// Message data. - #[deref] - buffer: ReplicationBuffer, -} - -impl ReplicationMessage { - /// Creates a new message with assigned client ID. - /// - /// `replicon_tick` is the current tick that will be written into - /// the message to read by client on receive. - /// - /// `system_tick` is the last acknowledged system tick for this client. - /// Changes since this tick should be written into the message. - /// - /// If `send_empty` is set to `true`, then [`Self::send_to`] - /// will send the message even if it doesn't contain any data. - pub(super) fn new( - replicon_tick: RepliconTick, - client_id: ClientId, - system_tick: Tick, - send_empty: bool, - ) -> bincode::Result { - let mut buffer = ReplicationBuffer::default(); - buffer.write(&replicon_tick)?; - - Ok(Self { - client_id, - system_tick, - send_empty, - buffer, - }) - } - - /// Clears the message and assigns it to a different client ID. - /// - /// Keeps allocated capacity of the buffer. - pub(super) fn reset( - &mut self, - replicon_tick: RepliconTick, - client_id: ClientId, - system_tick: Tick, - send_empty: bool, - ) -> bincode::Result<()> { - self.client_id = client_id; - self.system_tick = system_tick; - self.send_empty = send_empty; - self.buffer.reset(); - self.buffer.write(&replicon_tick) - } - - /// Sends the message to the designated client. - pub(super) fn send(&mut self, server: &mut RenetServer) { - if !self.buffer.contains_data() && !self.send_empty { - trace!("no changes to send for client {}", self.client_id); - return; - } - - self.buffer.trim_empty_arrays(); - - trace!("sending replication message to client {}", self.client_id); - server.send_message( - self.client_id, - REPLICATION_CHANNEL_ID, - Bytes::copy_from_slice(self.buffer.as_slice()), - ); - } -} diff --git a/src/server/removal_tracker.rs b/src/server/removal_tracker.rs index 0e589fdd..2f363f18 100644 --- a/src/server/removal_tracker.rs +++ b/src/server/removal_tracker.rs @@ -5,7 +5,7 @@ use bevy::{ }; use bevy_renet::renet::RenetServer; -use super::{AckedTicks, ServerSet, TicksMap}; +use super::ServerSet; use crate::replicon_core::replication_rules::{Replication, ReplicationId, ReplicationRules}; /// Stores component removals in [`RemovalTracker`] component to make them persistent across ticks. @@ -19,7 +19,6 @@ impl Plugin for RemovalTrackerPlugin { PostUpdate, ( Self::insertion_system, - Self::cleanup_system, Self::detection_system.run_if(resource_exists::()), ) .before(ServerSet::Send) @@ -38,23 +37,6 @@ impl RemovalTrackerPlugin { } } - /// Cleanups all acknowledged despawns. - fn cleanup_system( - change_tick: SystemChangeTick, - acked_ticks: Res, - ticks_map: Res, - mut removal_trackers: Query<&mut RemovalTracker>, - ) { - for mut removal_tracker in &mut removal_trackers { - removal_tracker.retain(|_, tick| { - acked_ticks.values().any(|acked_tick| { - let system_tick = *ticks_map.get(acked_tick).unwrap_or(&Tick::new(0)); - tick.is_newer_than(system_tick, change_tick.this_run()) - }) - }); - } - } - fn detection_system( change_tick: SystemChangeTick, remove_events: &RemovedComponentEvents, @@ -82,31 +64,21 @@ pub(crate) struct RemovalTracker(pub(super) HashMap); #[cfg(test)] mod tests { - use bevy_renet::renet::ClientId; use serde::{Deserialize, Serialize}; use super::*; - use crate::{replicon_core::replication_rules::AppReplicationExt, server::RepliconTick}; + use crate::replicon_core::replication_rules::AppReplicationExt; #[test] fn detection() { let mut app = App::new(); app.add_plugins(RemovalTrackerPlugin) .insert_resource(RenetServer::new(Default::default())) - .init_resource::() - .init_resource::() .init_resource::() .replicate::(); app.update(); - // To avoid cleanup. - const DUMMY_CLIENT_ID: ClientId = ClientId::from_raw(0); - app.world - .resource_mut::() - .0 - .insert(DUMMY_CLIENT_ID, RepliconTick(0)); - let replicated_entity = app.world.spawn((DummyComponent, Replication)).id(); app.update(); diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs new file mode 100644 index 00000000..f5a78df0 --- /dev/null +++ b/src/server/replication_messages.rs @@ -0,0 +1,276 @@ +pub(super) mod replication_buffer; + +use std::mem; + +use bevy::{ecs::component::Tick, prelude::*}; +use bevy_renet::renet::{Bytes, ClientId, RenetServer}; + +use super::{ClientInfo, LastChangeTick}; +use crate::replicon_core::{replicon_tick::RepliconTick, ReplicationChannel}; +use replication_buffer::ReplicationBuffer; + +/// Accumulates replication messages and sends them to clients. +/// +/// Messages serialized and deserialized manually because using an intermediate structure +/// leads to allocations and according to our benchmarks it's much slower. +/// +/// Reuses allocated memory from older messages. +#[derive(Default)] +pub(crate) struct ReplicationMessages { + clients_info: Vec, + data: Vec<(InitMessage, UpdateMessage)>, + clients_count: usize, +} + +impl ReplicationMessages { + /// Initializes messages for each client. + /// + /// `replicon_tick` and `tick` are current ticks. + /// + /// Reuses already allocated messages. + /// Creates new messages if number of clients is bigger then the number of allocated messages. + /// If there are more messages than the number of clients, then the extra messages remain untouched + /// and iteration methods will not include them. + pub(super) fn prepare( + &mut self, + clients_info: Vec, + replicon_tick: RepliconTick, + tick: Tick, + ) -> bincode::Result<()> { + self.clients_count = clients_info.len(); + + self.data.reserve(self.clients_count); + + for index in 0..clients_info.len() { + if let Some((init_message, update_message)) = self.data.get_mut(index) { + init_message.reset(replicon_tick)?; + update_message.reset()?; + } else { + let init_message = InitMessage::new(replicon_tick)?; + let update_message = UpdateMessage::new(tick)?; + self.data.push((init_message, update_message)); + } + } + + self.clients_info = clients_info; + + Ok(()) + } + + /// Returns iterator over messages for each client. + pub(super) fn iter_mut(&mut self) -> impl Iterator { + self.data.iter_mut().take(self.clients_count) + } + + /// Same as [`Self::iter_mut`], but also iterates over clients info. + pub(super) fn iter_mut_with_info( + &mut self, + ) -> impl Iterator { + self.data + .iter_mut() + .take(self.clients_count) + .zip(&mut self.clients_info) + .map(|((init_message, update_message), client_info)| { + (init_message, update_message, client_info) + }) + } + + /// Sends all messages and returns updated last change tick with clients info that was consumed in [`Self::prepare`]. + pub(super) fn send( + &mut self, + server: &mut RenetServer, + mut last_change_tick: LastChangeTick, + replicon_tick: RepliconTick, + ) -> bincode::Result<(LastChangeTick, Vec)> { + if let Some((init_message, _)) = self.data.last() { + if init_message.arrays_with_data() > 1 { + last_change_tick.0 = replicon_tick; + } + } + + for ((init_message, update_message), client_info) in self + .data + .iter_mut() + .take(self.clients_count) + .zip(&mut self.clients_info) + { + init_message.send(server, client_info.id); + update_message.send(server, client_info)?; + } + + Ok((last_change_tick, mem::take(&mut self.clients_info))) + } +} + +/// A reusable message with replicated data. +/// +/// Contains tick and mappings, insertions, removals and despawns that +/// happened on this tick. +/// Sent over [`ReplicationChannel::Reliable`] channel. +/// +/// See also [Limits](../index.html#limits) +#[derive(Deref, DerefMut)] +pub(super) struct InitMessage { + /// Message data. + #[deref] + buffer: ReplicationBuffer, +} + +impl InitMessage { + /// Creates a new message for the specified tick. + /// + /// `tick` is the current tick that will be written into + /// the message to read by client on receive. + fn new(replicon_tick: RepliconTick) -> bincode::Result { + let mut buffer = ReplicationBuffer::default(); + buffer.write(&replicon_tick)?; + + Ok(Self { buffer }) + } + + /// Clears the message and assigns `tick` to it. + /// + /// Keeps allocated capacity of the buffer. + fn reset(&mut self, replicon_tick: RepliconTick) -> bincode::Result<()> { + self.buffer.reset(); + self.buffer.write(&replicon_tick) + } + + /// Trims empty arrays from message and sends it to the specified client. + /// + /// Does nothing if there is no data to send. + fn send(&mut self, server: &mut RenetServer, client_id: ClientId) { + if self.buffer.arrays_with_data() == 0 { + trace!("no init data to send for client {client_id}"); + return; + } + + self.buffer.trim_empty_arrays(); + + trace!("sending init message to client {client_id}"); + server.send_message( + client_id, + ReplicationChannel::Reliable, + Bytes::copy_from_slice(self.buffer.as_slice()), + ); + } +} + +/// A reusable message with component updates. +/// +/// Contains tick and component updates since the last tick until this tick for each entity. +/// Requires init message with the same tick to be applied to keep the world in valid state. +/// The message will be manually split into packets up to max size that can be applied independently. +/// Splits will happen per-entity to avoid weird behavior of partially changed entity. +/// Sent over [`ReplicationChannel::Unreliable`] channel. +/// +/// See also [Limits](../index.html#limits) +#[derive(Deref, DerefMut)] +pub(super) struct UpdateMessage { + /// Entities and their data sizes. + entities: Vec<(Entity, usize)>, + + /// Tick until which updates are collected. + tick: Tick, + + /// Part of the message that will be inserted into each splitted part. + header: Vec, + + /// Message data. + #[deref] + buffer: ReplicationBuffer, +} + +impl UpdateMessage { + /// Creates a new message for the specified tick. + /// + /// `system_tick` is the current Bevy's tick. + fn new(tick: Tick) -> bincode::Result { + Ok(Self { + tick, + header: Default::default(), + entities: Default::default(), + buffer: Default::default(), + }) + } + + /// Clears the message. + /// + /// Keeps allocated capacity of the buffer. + fn reset(&mut self) -> bincode::Result<()> { + self.entities.clear(); + self.header.clear(); + self.buffer.reset(); + + Ok(()) + } + + /// Registers entity from buffer's entity data for possible splitting. + /// + /// Ignores the entity if data length is 0. + /// Should be called before [`ReplicationBuffer::end_entity_data`]. + pub(super) fn register_entity(&mut self) { + if self.buffer.entity_data_len() != 0 { + let data_size = self.buffer.entity_data_pos() - self.buffer.as_slice().len(); + self.entities.push((self.buffer.data_entity(), data_size)); + } + } + + /// Splits message according to `entities` and sends it to the specified client. + /// + /// Does nothing if there is no data to send. + fn send( + &mut self, + server: &mut RenetServer, + client_info: &mut ClientInfo, + ) -> bincode::Result<()> { + if self.buffer.arrays_with_data() == 0 { + trace!("no updates to send for client {}", client_info.id); + return Ok(()); + } + + trace!("sending update message(s) to client {}", client_info.id); + const MAX_PACKET_SIZE: usize = 1200; // https://github.com/lucaspoffo/renet/blob/acee8b470e34c70d35700d96c00fb233d9cf6919/renet/src/packet.rs#L7 + let tick_pos = self.header.len(); // Remember position of the written tick to ovewrite next changes later. + let mut slice = self.buffer.as_slice(); + let mut entities = Vec::new(); + let mut message_size = 0; + for &(entity, data_size) in &self.entities { + if message_size + data_size + self.header.len() > MAX_PACKET_SIZE { + let (message, remaining) = slice.split_at(message_size); + slice = remaining; + message_size = data_size; + + let update_index = client_info.register_update(self.tick, entities.clone()); + bincode::serialize_into(&mut self.header, &update_index)?; + + server.send_message( + client_info.id, + ReplicationChannel::Unreliable, + Bytes::from_iter(self.header.iter().copied().chain(message.iter().copied())), + ); + + self.header.truncate(tick_pos); + entities.clear(); + } else { + entities.push(entity); + message_size += data_size; + } + } + + if !slice.is_empty() { + let update_index = client_info.register_update(self.tick, entities.clone()); + bincode::serialize_into(&mut self.header, &update_index)?; + + server.send_message( + client_info.id, + ReplicationChannel::Unreliable, + Bytes::from_iter(self.header.iter().copied().chain(slice.iter().copied())), + ); + + self.header.truncate(tick_pos); + } + + Ok(()) + } +} diff --git a/src/server/message/replication_buffer.rs b/src/server/replication_messages/replication_buffer.rs similarity index 92% rename from src/server/message/replication_buffer.rs rename to src/server/replication_messages/replication_buffer.rs index bd792bdc..a28d4cf9 100644 --- a/src/server/message/replication_buffer.rs +++ b/src/server/replication_messages/replication_buffer.rs @@ -51,9 +51,28 @@ impl ReplicationBuffer { self.trailing_empty_arrays = 0; } - /// Returns `true` if the buffer contains at least one non-empty array. - pub(super) fn contains_data(&self) -> bool { - self.arrays_with_data != 0 + /// Returns the number of arrays excluding trailing empty arrays. + pub(super) fn arrays_with_data(&self) -> usize { + self.arrays_with_data + } + + /// Returns position from the last [`Self::start_entity_data`] call. + pub(super) fn entity_data_pos(&self) -> usize { + self.entity_data_pos as usize + } + + /// Returns length of the current entity data. + /// + /// See also [`Self::start_entity_data`] and [`Self::end_entity_data`]. + pub(super) fn entity_data_len(&self) -> u8 { + self.entity_data_len + } + + /// Returns entity from last call of [`Self::start_entity_data`]. + /// + /// See also [`Self::start_entity_data`] and [`Self::end_entity_data`]. + pub(super) fn data_entity(&self) -> Entity { + self.data_entity } /// Returns the buffer as a byte array. diff --git a/tests/replication.rs b/tests/replication.rs index 038a00f8..659cbf1b 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -7,7 +7,7 @@ use bevy_renet::renet::{transport::NetcodeClientTransport, ClientId}; use serde::{Deserialize, Serialize}; #[test] -fn acked_ticks_cleanup() { +fn reset() { let mut server_app = App::new(); let mut client_app = App::new(); for app in [&mut server_app, &mut client_app] { @@ -19,16 +19,29 @@ fn acked_ticks_cleanup() { common::connect(&mut server_app, &mut client_app); - let mut client_transport = client_app.world.resource_mut::(); - client_transport.disconnect(); - let client_id = ClientId::from_raw(client_transport.client_id()); + client_app + .world + .resource_mut::() + .disconnect(); client_app.update(); server_app.update(); + + client_app.world.remove_resource::(); + server_app.world.remove_resource::(); + server_app.update(); + client_app.update(); + + assert_eq!(server_app.world.resource::().get(), 0); + assert!(server_app.world.resource::().is_empty()); + + assert_eq!(client_app.world.resource::().get(), 0); + assert!(client_app.world.resource::().is_empty()); - let acked_ticks = server_app.world.resource::(); - assert!(!acked_ticks.contains_key(&client_id)); + let server_entity_map = client_app.world.resource::(); + assert!(server_entity_map.to_client().is_empty()); + assert!(server_entity_map.to_server().is_empty()); } #[test] @@ -93,7 +106,6 @@ fn client_spawn_replication() { let client_entity = client_app.world.spawn_empty().id(); let server_entity = server_app.world.spawn((Replication, TableComponent)).id(); - let tick = *server_app.world.get_resource::().unwrap(); let client_transport = client_app.world.resource::(); let client_id = ClientId::from_raw(client_transport.client_id()); @@ -101,7 +113,6 @@ fn client_spawn_replication() { entity_map.insert( client_id, ClientMapping { - tick, server_entity, client_entity, }, @@ -338,7 +349,6 @@ fn diagnostics() { let client_entity = client_app.world.spawn_empty().id(); let server_entity = server_app.world.spawn((Replication, TableComponent)).id(); - let tick = *server_app.world.get_resource::().unwrap(); let client_transport = client_app.world.resource::(); let client_id = ClientId::from_raw(client_transport.client_id()); @@ -346,7 +356,6 @@ fn diagnostics() { entity_map.insert( client_id, ClientMapping { - tick, server_entity, client_entity, }, @@ -362,7 +371,7 @@ fn diagnostics() { assert_eq!(stats.components_changed, 1); assert_eq!(stats.mappings, 1); assert_eq!(stats.despawns, 1); - assert_eq!(stats.packets, 1); + assert_eq!(stats.init_messages, 1); assert_eq!(stats.bytes, 18); } From fb8ce9f529732eb7054eebe37cda0481f2c05912 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Tue, 5 Dec 2023 01:53:59 +0200 Subject: [PATCH 02/63] Fix tests --- src/network_event/server_event.rs | 2 +- src/replicon_core/replicon_tick.rs | 1 - src/server.rs | 11 ++++------- tests/server_event.rs | 26 ++++---------------------- 4 files changed, 9 insertions(+), 31 deletions(-) diff --git a/src/network_event/server_event.rs b/src/network_event/server_event.rs index 8bcfc2d2..3cdbfe37 100644 --- a/src/network_event/server_event.rs +++ b/src/network_event/server_event.rs @@ -189,7 +189,7 @@ impl ServerEventAppExt for App { local_resending_system::.run_if(has_authority()), ) .chain() - .before(ServerPlugin::replication_sending_system) + .after(ServerPlugin::replication_sending_system) .in_set(ServerSet::Send), reset_system::.run_if(resource_removed::()), ), diff --git a/src/replicon_core/replicon_tick.rs b/src/replicon_core/replicon_tick.rs index fdbe419a..9d1e4392 100644 --- a/src/replicon_core/replicon_tick.rs +++ b/src/replicon_core/replicon_tick.rs @@ -6,7 +6,6 @@ use serde::{Deserialize, Serialize}; /// A tick that increments each time we need the server to compute and send an update. /// /// Updated on clients every time they receive replication from the server. -/// Mapped to the Bevy's `Tick` in [`AckedTicks`](crate::server::AckedTicks). /// See also [`TickPolicy`](crate::server::TickPolicy). #[derive(Clone, Copy, Debug, Default, Deserialize, Eq, Hash, PartialEq, Resource, Serialize)] pub struct RepliconTick(pub(crate) u32); diff --git a/src/server.rs b/src/server.rs index 3e2ac2b6..c10657e9 100644 --- a/src/server.rs +++ b/src/server.rs @@ -56,12 +56,7 @@ impl Plugin for ServerPlugin { .init_resource::() .init_resource::() .configure_sets(PreUpdate, ServerSet::Receive.after(RenetReceive)) - .configure_sets( - PostUpdate, - ServerSet::Send - .before(RenetSend) - .run_if(resource_changed::()), - ) + .configure_sets(PostUpdate, ServerSet::Send.before(RenetSend)) .add_systems( PreUpdate, (Self::acks_receiving_system, Self::disconnect_cleanup_system) @@ -74,7 +69,8 @@ impl Plugin for ServerPlugin { Self::replication_sending_system .map(Result::unwrap) .in_set(ServerSet::Send) - .run_if(resource_exists::()), + .run_if(resource_exists::()) + .run_if(resource_changed::()), Self::reset_system.run_if(resource_removed::()), ), ); @@ -86,6 +82,7 @@ impl Plugin for ServerPlugin { PostUpdate, Self::increment_tick .before(Self::replication_sending_system) + .run_if(resource_exists::()) .run_if(on_timer(tick_time)), ); } diff --git a/tests/server_event.rs b/tests/server_event.rs index c4f40838..6d5b4a76 100644 --- a/tests/server_event.rs +++ b/tests/server_event.rs @@ -173,32 +173,14 @@ fn event_queue() { .resource_mut::>() .insert(tick, DummyEvent(Entity::PLACEHOLDER)); - // Send another event to trigger world update. - server_app - .world - .resource_mut::>>() - .send(ToClients { - mode: SendMode::Broadcast, - event: DummyEvent(Entity::PLACEHOLDER), - }); - - server_app.update(); client_app.update(); - let mut dummy_events = client_app.world.resource_mut::>(); - assert_eq!( - dummy_events.drain().count(), - 1, - "should emit only single event for current tick" - ); + assert!(client_app.world.resource::>().is_empty()); + + client_app.insert_resource(tick); - server_app.update(); client_app.update(); let dummy_events = client_app.world.resource::>(); - assert_eq!( - dummy_events.len(), - 1, - "should emit another event received earlier" - ); + assert_eq!(dummy_events.len(), 1); } From 56fb26c35ea70a6568fe40873583a9aea80428d5 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Tue, 5 Dec 2023 02:27:33 +0200 Subject: [PATCH 03/63] Update changelog --- CHANGELOG.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 622b4fea..64f6f681 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,13 +13,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed -- Send all component mappings, inserts, removals and despawns over reliable channel in form of deltas and component updates over unreliable channel packed by packet size. This will significantly reduce packet loss. -- Packets in client diagnostics was replaced with init and update messages (it was actually number of messages before). -- `ClientMapping` no longer contains `tick` field. +- Send all component mappings, inserts, removals and despawns over reliable channel in form of deltas and component updates over unreliable channel packed by packet size. This significantly reduces the possibility of packet loss. - Replace `REPLICATION_CHANNEL_ID` with `ReplicationChannel` enum. The previous constant corresponded to the unreliable channel. +- Server events use tick with the last change instead of waiting for replication message without changes. +- `TickPolicy::EveryFrame` and `TickPolicy::MaxTickRate` now increment tick only if `RenetServer` exists. +- `ServerSet::Send` now always runs. Replication sending system still runs on `RepliconTick` change. +- `ClientMapping` no longer contains `tick` field. - Use `EntityHashMap` instead of `HashMap` with entities as keys. - Use `Cursor<&[u8]>` instead of `Cursor`. - Replace `LastRepliconTick` with `RepliconTick` on client. +- Replace Packets in client diagnostics with init and update messages (it was never actually a number of packets). - Fix missing reset of `RepliconTick` on server disconnect. - Rename `replicate_into_scene` into `replicate_into` and move it to `scene` module. From 77e8e18cccd9166918e825972fac9a138b9c23a8 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Tue, 5 Dec 2023 03:21:33 +0200 Subject: [PATCH 04/63] Remove trackers No longer needed, but it will require Bevy 0.12.1 --- Cargo.toml | 2 +- src/server.rs | 113 ++++++++++++++++++---------------- src/server/despawn_tracker.rs | 66 -------------------- src/server/removal_tracker.rs | 101 ------------------------------ 4 files changed, 60 insertions(+), 222 deletions(-) delete mode 100644 src/server/despawn_tracker.rs delete mode 100644 src/server/removal_tracker.rs diff --git a/Cargo.toml b/Cargo.toml index 89a405cf..d7f75a92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ include = ["/benches", "/src", "/tests", "/LICENSE*"] [dependencies] bevy_renet = "0.0.10" -bevy = { version = "0.12", default-features = false, features = ["bevy_scene"] } +bevy = { version = "0.12.1", default-features = false, features = ["bevy_scene"] } bincode = "1.3" serde = "1.0" varint-rs = "2.2" diff --git a/src/server.rs b/src/server.rs index c10657e9..7694b1c9 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,5 +1,3 @@ -pub(super) mod despawn_tracker; -pub(super) mod removal_tracker; pub(super) mod replication_messages; use std::{mem, time::Duration}; @@ -8,6 +6,7 @@ use bevy::{ ecs::{ archetype::ArchetypeId, component::{ComponentTicks, StorageType, Tick}, + removal_detection::RemovedComponentEvents, system::SystemChangeTick, }, prelude::*, @@ -22,12 +21,10 @@ use bevy_renet::{ }; use crate::replicon_core::{ - replication_rules::{ReplicationId, ReplicationInfo, ReplicationRules}, + replication_rules::{Replication, ReplicationId, ReplicationInfo, ReplicationRules}, replicon_tick::RepliconTick, ReplicationChannel, }; -use despawn_tracker::{DespawnTracker, DespawnTrackerPlugin}; -use removal_tracker::{RemovalTracker, RemovalTrackerPlugin}; use replication_messages::ReplicationMessages; pub const SERVER_ID: ClientId = ClientId::from_raw(0); @@ -46,34 +43,29 @@ impl Default for ServerPlugin { impl Plugin for ServerPlugin { fn build(&self, app: &mut App) { - app.add_plugins(( - RenetServerPlugin, - NetcodeServerPlugin, - RemovalTrackerPlugin, - DespawnTrackerPlugin, - )) - .init_resource::() - .init_resource::() - .init_resource::() - .configure_sets(PreUpdate, ServerSet::Receive.after(RenetReceive)) - .configure_sets(PostUpdate, ServerSet::Send.before(RenetSend)) - .add_systems( - PreUpdate, - (Self::acks_receiving_system, Self::disconnect_cleanup_system) - .in_set(ServerSet::Receive) - .run_if(resource_exists::()), - ) - .add_systems( - PostUpdate, - ( - Self::replication_sending_system - .map(Result::unwrap) - .in_set(ServerSet::Send) - .run_if(resource_exists::()) - .run_if(resource_changed::()), - Self::reset_system.run_if(resource_removed::()), - ), - ); + app.add_plugins((RenetServerPlugin, NetcodeServerPlugin)) + .init_resource::() + .init_resource::() + .init_resource::() + .configure_sets(PreUpdate, ServerSet::Receive.after(RenetReceive)) + .configure_sets(PostUpdate, ServerSet::Send.before(RenetSend)) + .add_systems( + PreUpdate, + (Self::acks_receiving_system, Self::disconnect_cleanup_system) + .in_set(ServerSet::Receive) + .run_if(resource_exists::()), + ) + .add_systems( + PostUpdate, + ( + Self::replication_sending_system + .map(Result::unwrap) + .in_set(ServerSet::Send) + .run_if(resource_exists::()) + .run_if(resource_changed::()), + Self::reset_system.run_if(resource_removed::()), + ), + ); match self.tick_policy { TickPolicy::MaxTickRate(max_tick_rate) => { @@ -174,15 +166,15 @@ impl ServerPlugin { pub(super) fn replication_sending_system( mut messages: Local, change_tick: SystemChangeTick, + remove_events: &RemovedComponentEvents, mut set: ParamSet<( &World, ResMut, ResMut, ResMut, - ResMut, ResMut, - Query<(Entity, &mut RemovalTracker)>, )>, + mut removed_replication: RemovedComponents, replication_rules: Res, replicon_tick: Res, ) -> bincode::Result<()> { @@ -191,16 +183,21 @@ impl ServerPlugin { collect_mappings(&mut messages, &mut set.p2())?; collect_changes(&mut messages, set.p0(), &change_tick, &replication_rules)?; - collect_removals(&mut messages, &mut set.p6(), &change_tick)?; - collect_despawns(&mut messages, &mut set.p4(), &change_tick)?; - - let last_change_tick = *set.p5(); + collect_removals( + &mut messages, + remove_events, + &change_tick, + &replication_rules, + )?; + collect_despawns(&mut messages, &mut removed_replication)?; + + let last_change_tick = *set.p4(); let (last_change_tick, clients_info) = messages.send(&mut set.p3(), last_change_tick, *replicon_tick)?; // Return borrowed data back. **set.p1() = clients_info; - *set.p5() = last_change_tick; + *set.p4() = last_change_tick; Ok(()) } @@ -382,21 +379,32 @@ fn collect_component_change( /// Collects component removals from this tick into init messages. fn collect_removals( messages: &mut ReplicationMessages, - removal_trackers: &mut Query<(Entity, &mut RemovalTracker)>, + remove_events: &RemovedComponentEvents, change_tick: &SystemChangeTick, + replication_rules: &ReplicationRules, ) -> bincode::Result<()> { for (message, _) in messages.iter_mut() { message.start_array(); } - for (entity, mut removal_tracker) in removal_trackers { + let mut removals: EntityHashMap<_, Vec<_>> = Default::default(); + for (&component_id, &replication_id) in replication_rules.get_ids() { + for entity in remove_events + .get(component_id) + .into_iter() + .flat_map(|removed| removed.iter_current_update_events().cloned()) + .map(Into::into) + { + removals.entry(entity).or_default().push(replication_id); + } + } + + for (entity, components) in removals { for (message, _, client_info) in messages.iter_mut_with_info() { message.start_entity_data(entity); - for (replication_id, tick) in removal_tracker.drain() { - if tick.is_newer_than(change_tick.last_run(), change_tick.this_run()) { - client_info.ticks.insert(entity, change_tick.this_run()); - message.write_replication_id(replication_id)?; - } + for &replication_id in &components { + client_info.ticks.insert(entity, change_tick.this_run()); + message.write_replication_id(replication_id)?; } message.end_entity_data()?; } @@ -412,19 +420,16 @@ fn collect_removals( /// Collect entity despawns from this tick into init messages. fn collect_despawns( messages: &mut ReplicationMessages, - despawn_tracker: &mut DespawnTracker, - change_tick: &SystemChangeTick, + removed_replication: &mut RemovedComponents, ) -> bincode::Result<()> { for (message, _) in messages.iter_mut() { message.start_array(); } - for (entity, tick) in despawn_tracker.drain(..) { + for entity in removed_replication.read() { for (message, _, client_info) in messages.iter_mut_with_info() { - if tick.is_newer_than(change_tick.last_run(), change_tick.this_run()) { - client_info.ticks.remove(&entity); - message.write_entity(entity)?; - } + client_info.ticks.remove(&entity); + message.write_entity(entity)?; } } diff --git a/src/server/despawn_tracker.rs b/src/server/despawn_tracker.rs deleted file mode 100644 index 5310a1f3..00000000 --- a/src/server/despawn_tracker.rs +++ /dev/null @@ -1,66 +0,0 @@ -use bevy::{ - ecs::{component::Tick, system::SystemChangeTick}, - prelude::*, -}; -use bevy_renet::renet::RenetServer; - -use super::ServerSet; -use crate::replicon_core::replication_rules::Replication; - -/// Tracks entity despawns of entities with [`Replication`] component in [`DespawnTracker`] resource. -/// -/// Used only on server. Despawns will be cleaned after all clients acknowledge them. -pub(super) struct DespawnTrackerPlugin; - -impl Plugin for DespawnTrackerPlugin { - fn build(&self, app: &mut App) { - app.init_resource::().add_systems( - PostUpdate, - Self::detection_system - .before(ServerSet::Send) - .run_if(resource_exists::()), - ); - } -} - -impl DespawnTrackerPlugin { - fn detection_system( - change_tick: SystemChangeTick, - mut removed_replications: RemovedComponents, - mut despawn_tracker: ResMut, - ) { - for entity in removed_replications.read() { - despawn_tracker.push((entity, change_tick.this_run())); - } - } -} - -/// Entities and ticks when they were despawned. -#[derive(Default, Resource, Deref, DerefMut)] -pub(crate) struct DespawnTracker(pub(super) Vec<(Entity, Tick)>); - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn detection() { - let mut app = App::new(); - app.add_plugins(DespawnTrackerPlugin) - .insert_resource(RenetServer::new(Default::default())); - - app.update(); - - let replicated_entity = app.world.spawn(Replication).id(); - - app.update(); - - app.world.entity_mut(replicated_entity).despawn(); - - app.update(); - - let despawn_tracker = app.world.resource::(); - assert_eq!(despawn_tracker.len(), 1); - assert_eq!(despawn_tracker.first().unwrap().0, replicated_entity); - } -} diff --git a/src/server/removal_tracker.rs b/src/server/removal_tracker.rs deleted file mode 100644 index 2f363f18..00000000 --- a/src/server/removal_tracker.rs +++ /dev/null @@ -1,101 +0,0 @@ -use bevy::{ - ecs::{component::Tick, removal_detection::RemovedComponentEvents, system::SystemChangeTick}, - prelude::*, - utils::HashMap, -}; -use bevy_renet::renet::RenetServer; - -use super::ServerSet; -use crate::replicon_core::replication_rules::{Replication, ReplicationId, ReplicationRules}; - -/// Stores component removals in [`RemovalTracker`] component to make them persistent across ticks. -/// -/// Used only on server and tracks only entities with [`Replication`] component. -pub(super) struct RemovalTrackerPlugin; - -impl Plugin for RemovalTrackerPlugin { - fn build(&self, app: &mut App) { - app.add_systems( - PostUpdate, - ( - Self::insertion_system, - Self::detection_system.run_if(resource_exists::()), - ) - .before(ServerSet::Send) - .run_if(resource_exists::()), - ); - } -} - -impl RemovalTrackerPlugin { - fn insertion_system( - mut commands: Commands, - new_replicated_entities: Query, Without)>, - ) { - for entity in &new_replicated_entities { - commands.entity(entity).insert(RemovalTracker::default()); - } - } - - fn detection_system( - change_tick: SystemChangeTick, - remove_events: &RemovedComponentEvents, - replication_rules: Res, - mut removal_trackers: Query<&mut RemovalTracker>, - ) { - for (&component_id, &replication_id) in replication_rules.get_ids() { - for entity in remove_events - .get(component_id) - .map(|removed| removed.iter_current_update_events().cloned()) - .into_iter() - .flatten() - .map(Into::into) - { - if let Ok(mut removal_tracker) = removal_trackers.get_mut(entity) { - removal_tracker.insert(replication_id, change_tick.this_run()); - } - } - } - } -} - -#[derive(Component, Default, Deref, DerefMut)] -pub(crate) struct RemovalTracker(pub(super) HashMap); - -#[cfg(test)] -mod tests { - use serde::{Deserialize, Serialize}; - - use super::*; - use crate::replicon_core::replication_rules::AppReplicationExt; - - #[test] - fn detection() { - let mut app = App::new(); - app.add_plugins(RemovalTrackerPlugin) - .insert_resource(RenetServer::new(Default::default())) - .init_resource::() - .replicate::(); - - app.update(); - - let replicated_entity = app.world.spawn((DummyComponent, Replication)).id(); - - app.update(); - - app.world - .entity_mut(replicated_entity) - .remove::(); - - app.update(); - - let component_id = app.world.init_component::(); - let replcation_rules = app.world.resource::(); - let (replication_id, _) = replcation_rules.get(component_id).unwrap(); - let removal_tracker = app.world.get::(replicated_entity).unwrap(); - assert!(removal_tracker.contains_key(&replication_id)); - } - - #[derive(Serialize, Deserialize, Component)] - struct DummyComponent; -} From 388a5427edb29c589b724294720b595342d100e2 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Tue, 5 Dec 2023 10:30:33 +0200 Subject: [PATCH 05/63] Apply formatting --- Cargo.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index d7f75a92..79cf473d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,9 @@ include = ["/benches", "/src", "/tests", "/LICENSE*"] [dependencies] bevy_renet = "0.0.10" -bevy = { version = "0.12.1", default-features = false, features = ["bevy_scene"] } +bevy = { version = "0.12.1", default-features = false, features = [ + "bevy_scene", +] } bincode = "1.3" serde = "1.0" varint-rs = "2.2" From c204fb0cfa596ea15c4aa3fd70e29fd5396e2ae0 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Tue, 5 Dec 2023 18:51:52 +0200 Subject: [PATCH 06/63] Add test and fix discovered issues Splitting logic is wrong, still needs to be fixed. --- src/client.rs | 4 +- src/server.rs | 3 + src/server/replication_messages.rs | 36 ++++++------ .../replication_buffer.rs | 8 +-- tests/replication.rs | 56 ++++++++++++++++--- 5 files changed, 74 insertions(+), 33 deletions(-) diff --git a/src/client.rs b/src/client.rs index 71a5de5c..2f6b18bb 100644 --- a/src/client.rs +++ b/src/client.rs @@ -246,7 +246,7 @@ fn apply_update_message( } let (tick, update_index) = bincode::deserialize_from(&mut cursor)?; - if tick < replicon_tick { + if tick > replicon_tick { buffered_updates.push(BufferedUpdate { tick, position: cursor.position(), @@ -325,7 +325,7 @@ fn apply_components( let Some(tick) = entity_ticks.0.get_mut(&entity.id()) else { continue; // Update arrived arrive after a despawn from init message. }; - if *tick >= replicon_tick { + if *tick > replicon_tick { continue; // Update for this entity is outdated. } *tick = replicon_tick; diff --git a/src/server.rs b/src/server.rs index 7694b1c9..de7b3050 100644 --- a/src/server.rs +++ b/src/server.rs @@ -387,6 +387,9 @@ fn collect_removals( message.start_array(); } + // PERF: Unfortunately, removed components are grouped by type, not by entity. + // This is why we need an intermediate container. But in practice users rarely + // remove a lot of components in the same tick, so it's probably fine. let mut removals: EntityHashMap<_, Vec<_>> = Default::default(); for (&component_id, &replication_id) in replication_rules.get_ids() { for entity in remove_events diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index f5a78df0..a952b24c 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -83,7 +83,7 @@ impl ReplicationMessages { replicon_tick: RepliconTick, ) -> bincode::Result<(LastChangeTick, Vec)> { if let Some((init_message, _)) = self.data.last() { - if init_message.arrays_with_data() > 1 { + if init_message.arrays_with_data() != 0 { last_change_tick.0 = replicon_tick; } } @@ -95,7 +95,7 @@ impl ReplicationMessages { .zip(&mut self.clients_info) { init_message.send(server, client_info.id); - update_message.send(server, client_info)?; + update_message.send(server, client_info, last_change_tick)?; } Ok((last_change_tick, mem::take(&mut self.clients_info))) @@ -173,9 +173,6 @@ pub(super) struct UpdateMessage { /// Tick until which updates are collected. tick: Tick, - /// Part of the message that will be inserted into each splitted part. - header: Vec, - /// Message data. #[deref] buffer: ReplicationBuffer, @@ -188,7 +185,6 @@ impl UpdateMessage { fn new(tick: Tick) -> bincode::Result { Ok(Self { tick, - header: Default::default(), entities: Default::default(), buffer: Default::default(), }) @@ -199,7 +195,6 @@ impl UpdateMessage { /// Keeps allocated capacity of the buffer. fn reset(&mut self) -> bincode::Result<()> { self.entities.clear(); - self.header.clear(); self.buffer.reset(); Ok(()) @@ -211,7 +206,7 @@ impl UpdateMessage { /// Should be called before [`ReplicationBuffer::end_entity_data`]. pub(super) fn register_entity(&mut self) { if self.buffer.entity_data_len() != 0 { - let data_size = self.buffer.entity_data_pos() - self.buffer.as_slice().len(); + let data_size = self.buffer.as_slice().len() - self.buffer.entity_data_pos() as usize; self.entities.push((self.buffer.data_entity(), data_size)); } } @@ -223,6 +218,7 @@ impl UpdateMessage { &mut self, server: &mut RenetServer, client_info: &mut ClientInfo, + last_change_tick: LastChangeTick, ) -> bincode::Result<()> { if self.buffer.arrays_with_data() == 0 { trace!("no updates to send for client {}", client_info.id); @@ -230,45 +226,47 @@ impl UpdateMessage { } trace!("sending update message(s) to client {}", client_info.id); - const MAX_PACKET_SIZE: usize = 1200; // https://github.com/lucaspoffo/renet/blob/acee8b470e34c70d35700d96c00fb233d9cf6919/renet/src/packet.rs#L7 - let tick_pos = self.header.len(); // Remember position of the written tick to ovewrite next changes later. + const TICK_SIZE: usize = mem::size_of::(); + let mut header = [0; TICK_SIZE + mem::size_of::()]; + bincode::serialize_into(&mut header[..], &*last_change_tick)?; + let mut slice = self.buffer.as_slice(); let mut entities = Vec::new(); let mut message_size = 0; for &(entity, data_size) in &self.entities { - if message_size + data_size + self.header.len() > MAX_PACKET_SIZE { + const MAX_PACKET_SIZE: usize = 1200; // https://github.com/lucaspoffo/renet/blob/acee8b470e34c70d35700d96c00fb233d9cf6919/renet/src/packet.rs#L7 + if message_size + data_size + header.len() > MAX_PACKET_SIZE { let (message, remaining) = slice.split_at(message_size); slice = remaining; message_size = data_size; let update_index = client_info.register_update(self.tick, entities.clone()); - bincode::serialize_into(&mut self.header, &update_index)?; + bincode::serialize_into(&mut header[TICK_SIZE..], &update_index)?; server.send_message( client_info.id, ReplicationChannel::Unreliable, - Bytes::from_iter(self.header.iter().copied().chain(message.iter().copied())), + Bytes::from_iter(header.into_iter().chain(message.iter().copied())), ); - self.header.truncate(tick_pos); entities.clear(); } else { entities.push(entity); + println!("{message_size} increase by {data_size}"); message_size += data_size; } } if !slice.is_empty() { - let update_index = client_info.register_update(self.tick, entities.clone()); - bincode::serialize_into(&mut self.header, &update_index)?; + println!("sending more data"); + let update_index = client_info.register_update(self.tick, entities); + bincode::serialize_into(&mut header[TICK_SIZE..], &update_index)?; server.send_message( client_info.id, ReplicationChannel::Unreliable, - Bytes::from_iter(self.header.iter().copied().chain(slice.iter().copied())), + Bytes::from_iter(header.into_iter().chain(slice.iter().copied())), ); - - self.header.truncate(tick_pos); } Ok(()) diff --git a/src/server/replication_messages/replication_buffer.rs b/src/server/replication_messages/replication_buffer.rs index a28d4cf9..660dbf4d 100644 --- a/src/server/replication_messages/replication_buffer.rs +++ b/src/server/replication_messages/replication_buffer.rs @@ -21,7 +21,7 @@ pub(crate) struct ReplicationBuffer { /// Length of the array that updated automatically after writing data. array_len: u16, - /// The number of arrays excluding trailing empty arrays. + /// The number of arrays excluding empty arrays. arrays_with_data: usize, /// The number of empty arrays at the end. Can be removed using [`Self::trim_empty_arrays`] @@ -51,14 +51,14 @@ impl ReplicationBuffer { self.trailing_empty_arrays = 0; } - /// Returns the number of arrays excluding trailing empty arrays. + /// Returns the number of arrays excluding empty arrays. pub(super) fn arrays_with_data(&self) -> usize { self.arrays_with_data } /// Returns position from the last [`Self::start_entity_data`] call. - pub(super) fn entity_data_pos(&self) -> usize { - self.entity_data_pos as usize + pub(super) fn entity_data_pos(&self) -> u64 { + self.entity_data_pos } /// Returns length of the current entity data. diff --git a/tests/replication.rs b/tests/replication.rs index 659cbf1b..66c25e50 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -34,14 +34,7 @@ fn reset() { client_app.update(); assert_eq!(server_app.world.resource::().get(), 0); - assert!(server_app.world.resource::().is_empty()); - assert_eq!(client_app.world.resource::().get(), 0); - assert!(client_app.world.resource::().is_empty()); - - let server_entity_map = client_app.world.resource::(); - assert!(server_entity_map.to_client().is_empty()); - assert!(server_entity_map.to_server().is_empty()); } #[test] @@ -154,7 +147,6 @@ fn client_spawn_replication() { fn insert_replication() { let mut server_app = App::new(); let mut client_app = App::new(); - for app in [&mut server_app, &mut client_app] { app.add_plugins(( MinimalPlugins, @@ -206,6 +198,51 @@ fn insert_replication() { ); } +#[test] +fn update_replication() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )) + .replicate::(); + } + + common::connect(&mut server_app, &mut client_app); + + // TODO: Spawn many entities to cover message splitting. Splitting logic needs to be fixed. + const ENTITIES_COUNT: u32 = 70; + server_app + .world + .spawn_batch([(Replication, BoolComponent(false)); ENTITIES_COUNT as usize]); + + server_app.update(); + client_app.update(); + + assert_eq!(client_app.world.entities().len(), ENTITIES_COUNT); + + for mut component in server_app + .world + .query::<&mut BoolComponent>() + .iter_mut(&mut server_app.world) + { + component.0 = true; + } + + server_app.update(); + client_app.update(); + + for component in client_app + .world + .query::<&BoolComponent>() + .iter(&client_app.world) + { + assert_eq!(component.0, true); + } +} + #[test] fn removal_replication() { let mut server_app = App::new(); @@ -397,6 +434,9 @@ struct NonReplicatingComponent; #[derive(Component, Deserialize, Serialize)] struct IgnoredComponent; +#[derive(Component, Clone, Copy, Serialize, Deserialize)] +struct BoolComponent(bool); + #[derive(Component, Default, Deserialize, Reflect, Serialize)] #[reflect(Component)] struct ReflectedComponent; From b1d4265647f9aa4f35ac460d67bedba50a533907 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Tue, 5 Dec 2023 22:22:09 +0200 Subject: [PATCH 07/63] Refactor working with ticks --- src/server.rs | 16 ++++++++----- src/server/replication_messages.rs | 37 ++++++++---------------------- 2 files changed, 19 insertions(+), 34 deletions(-) diff --git a/src/server.rs b/src/server.rs index de7b3050..71f584fd 100644 --- a/src/server.rs +++ b/src/server.rs @@ -179,21 +179,25 @@ impl ServerPlugin { replicon_tick: Res, ) -> bincode::Result<()> { let clients_info = mem::take(&mut set.p1().0); // Take ownership to avoid borrowing issues. - messages.prepare(clients_info, *replicon_tick, change_tick.this_run())?; + messages.prepare(clients_info, *replicon_tick)?; collect_mappings(&mut messages, &mut set.p2())?; collect_changes(&mut messages, set.p0(), &change_tick, &replication_rules)?; collect_removals( &mut messages, remove_events, - &change_tick, + change_tick.this_run(), &replication_rules, )?; collect_despawns(&mut messages, &mut removed_replication)?; let last_change_tick = *set.p4(); - let (last_change_tick, clients_info) = - messages.send(&mut set.p3(), last_change_tick, *replicon_tick)?; + let (last_change_tick, clients_info) = messages.send( + &mut set.p3(), + last_change_tick, + *replicon_tick, + change_tick.this_run(), + )?; // Return borrowed data back. **set.p1() = clients_info; @@ -380,7 +384,7 @@ fn collect_component_change( fn collect_removals( messages: &mut ReplicationMessages, remove_events: &RemovedComponentEvents, - change_tick: &SystemChangeTick, + tick: Tick, replication_rules: &ReplicationRules, ) -> bincode::Result<()> { for (message, _) in messages.iter_mut() { @@ -406,7 +410,7 @@ fn collect_removals( for (message, _, client_info) in messages.iter_mut_with_info() { message.start_entity_data(entity); for &replication_id in &components { - client_info.ticks.insert(entity, change_tick.this_run()); + client_info.ticks.insert(entity, tick); message.write_replication_id(replication_id)?; } message.end_entity_data()?; diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index a952b24c..520ac93f 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -25,8 +25,6 @@ pub(crate) struct ReplicationMessages { impl ReplicationMessages { /// Initializes messages for each client. /// - /// `replicon_tick` and `tick` are current ticks. - /// /// Reuses already allocated messages. /// Creates new messages if number of clients is bigger then the number of allocated messages. /// If there are more messages than the number of clients, then the extra messages remain untouched @@ -35,7 +33,6 @@ impl ReplicationMessages { &mut self, clients_info: Vec, replicon_tick: RepliconTick, - tick: Tick, ) -> bincode::Result<()> { self.clients_count = clients_info.len(); @@ -46,9 +43,8 @@ impl ReplicationMessages { init_message.reset(replicon_tick)?; update_message.reset()?; } else { - let init_message = InitMessage::new(replicon_tick)?; - let update_message = UpdateMessage::new(tick)?; - self.data.push((init_message, update_message)); + self.data + .push((InitMessage::new(replicon_tick)?, UpdateMessage::default())); } } @@ -81,6 +77,7 @@ impl ReplicationMessages { server: &mut RenetServer, mut last_change_tick: LastChangeTick, replicon_tick: RepliconTick, + tick: Tick, ) -> bincode::Result<(LastChangeTick, Vec)> { if let Some((init_message, _)) = self.data.last() { if init_message.arrays_with_data() != 0 { @@ -95,7 +92,7 @@ impl ReplicationMessages { .zip(&mut self.clients_info) { init_message.send(server, client_info.id); - update_message.send(server, client_info, last_change_tick)?; + update_message.send(server, client_info, last_change_tick, tick)?; } Ok((last_change_tick, mem::take(&mut self.clients_info))) @@ -118,9 +115,6 @@ pub(super) struct InitMessage { impl InitMessage { /// Creates a new message for the specified tick. - /// - /// `tick` is the current tick that will be written into - /// the message to read by client on receive. fn new(replicon_tick: RepliconTick) -> bincode::Result { let mut buffer = ReplicationBuffer::default(); buffer.write(&replicon_tick)?; @@ -128,7 +122,7 @@ impl InitMessage { Ok(Self { buffer }) } - /// Clears the message and assigns `tick` to it. + /// Clears the message and assigns tick to it. /// /// Keeps allocated capacity of the buffer. fn reset(&mut self, replicon_tick: RepliconTick) -> bincode::Result<()> { @@ -165,31 +159,17 @@ impl InitMessage { /// Sent over [`ReplicationChannel::Unreliable`] channel. /// /// See also [Limits](../index.html#limits) -#[derive(Deref, DerefMut)] +#[derive(Deref, DerefMut, Default)] pub(super) struct UpdateMessage { /// Entities and their data sizes. entities: Vec<(Entity, usize)>, - /// Tick until which updates are collected. - tick: Tick, - /// Message data. #[deref] buffer: ReplicationBuffer, } impl UpdateMessage { - /// Creates a new message for the specified tick. - /// - /// `system_tick` is the current Bevy's tick. - fn new(tick: Tick) -> bincode::Result { - Ok(Self { - tick, - entities: Default::default(), - buffer: Default::default(), - }) - } - /// Clears the message. /// /// Keeps allocated capacity of the buffer. @@ -219,6 +199,7 @@ impl UpdateMessage { server: &mut RenetServer, client_info: &mut ClientInfo, last_change_tick: LastChangeTick, + tick: Tick, ) -> bincode::Result<()> { if self.buffer.arrays_with_data() == 0 { trace!("no updates to send for client {}", client_info.id); @@ -240,7 +221,7 @@ impl UpdateMessage { slice = remaining; message_size = data_size; - let update_index = client_info.register_update(self.tick, entities.clone()); + let update_index = client_info.register_update(tick, entities.clone()); bincode::serialize_into(&mut header[TICK_SIZE..], &update_index)?; server.send_message( @@ -259,7 +240,7 @@ impl UpdateMessage { if !slice.is_empty() { println!("sending more data"); - let update_index = client_info.register_update(self.tick, entities); + let update_index = client_info.register_update(tick, entities); bincode::serialize_into(&mut header[TICK_SIZE..], &update_index)?; server.send_message( From 1a39f0fac6b41ce3c35644fce631eb4b6807d63b Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Tue, 5 Dec 2023 22:31:21 +0200 Subject: [PATCH 08/63] Fix clippy --- tests/replication.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/replication.rs b/tests/replication.rs index 66c25e50..da87e08a 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -239,7 +239,7 @@ fn update_replication() { .query::<&BoolComponent>() .iter(&client_app.world) { - assert_eq!(component.0, true); + assert!(component.0); } } From 86bd68908f1704d1279999a55fb2b4a716b70b2e Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Tue, 5 Dec 2023 23:15:09 +0200 Subject: [PATCH 09/63] Fix splitting logic --- src/client.rs | 71 +++++++++++++------ src/server.rs | 6 +- src/server/replication_messages.rs | 4 +- .../replication_buffer.rs | 29 ++++++-- tests/replication.rs | 4 +- 5 files changed, 77 insertions(+), 37 deletions(-) diff --git a/src/client.rs b/src/client.rs index 2f6b18bb..38e95a40 100644 --- a/src/client.rs +++ b/src/client.rs @@ -114,13 +114,12 @@ impl ClientPlugin { let mut cursor = Cursor::new(&*update.message); cursor.set_position(update.position); - apply_components( + apply_update_components( &mut cursor, world, &mut entity_map, &mut entity_ticks, stats.as_mut(), - ComponentsKind::Update, &replication_rules, replicon_tick, )?; @@ -180,7 +179,7 @@ fn apply_init_message( return Ok(()); } - apply_components( + apply_init_components( &mut cursor, world, entity_map, @@ -194,7 +193,7 @@ fn apply_init_message( return Ok(()); } - apply_components( + apply_init_components( &mut cursor, world, entity_map, @@ -255,13 +254,12 @@ fn apply_update_message( return Ok(update_index); } - apply_components( + apply_update_components( &mut cursor, world, entity_map, entity_ticks, stats, - ComponentsKind::Update, replication_rules, tick, )?; @@ -306,7 +304,7 @@ fn apply_entity_mappings( /// Deserializes replicated components of `components_kind` and applies them to the `world`. #[allow(clippy::too_many_arguments)] -fn apply_components( +fn apply_init_components( cursor: &mut Cursor<&[u8]>, world: &mut World, entity_map: &mut ServerEntityMap, @@ -320,20 +318,7 @@ fn apply_components( for _ in 0..entities_count { let entity = deserialize_entity(cursor)?; let mut entity = entity_map.get_by_server_or_spawn(world, entity); - match components_kind { - ComponentsKind::Update => { - let Some(tick) = entity_ticks.0.get_mut(&entity.id()) else { - continue; // Update arrived arrive after a despawn from init message. - }; - if *tick > replicon_tick { - continue; // Update for this entity is outdated. - } - *tick = replicon_tick; - } - ComponentsKind::Insert | ComponentsKind::Removal => { - entity_ticks.0.insert(entity.id(), replicon_tick); - } - } + entity_ticks.0.insert(entity.id(), replicon_tick); let components_count: u8 = bincode::deserialize_from(&mut *cursor)?; if let Some(stats) = &mut stats { @@ -345,7 +330,7 @@ fn apply_components( // SAFETY: server and client have identical `ReplicationRules` and server always sends valid IDs. let replication_info = unsafe { replication_rules.get_info_unchecked(replication_id) }; match components_kind { - ComponentsKind::Insert | ComponentsKind::Update => { + ComponentsKind::Insert => { (replication_info.deserialize)(&mut entity, entity_map, cursor, replicon_tick)? } ComponentsKind::Removal => (replication_info.remove)(&mut entity, replicon_tick), @@ -387,6 +372,47 @@ fn apply_despawns( Ok(()) } +/// Deserializes replicated components of `components_kind` and applies them to the `world`. +fn apply_update_components( + cursor: &mut Cursor<&[u8]>, + world: &mut World, + entity_map: &mut ServerEntityMap, + entity_ticks: &mut ServerEntityTicks, + mut stats: Option<&mut ClientStats>, + replication_rules: &ReplicationRules, + replicon_tick: RepliconTick, +) -> bincode::Result<()> { + loop { + let entity = deserialize_entity(cursor)?; + let mut entity = entity_map.get_by_server_or_spawn(world, entity); + let Some(tick) = entity_ticks.0.get_mut(&entity.id()) else { + continue; // Update arrived arrive after a despawn from init message. + }; + if *tick > replicon_tick { + continue; // Update for this entity is outdated. + } + *tick = replicon_tick; + + let components_count: u8 = bincode::deserialize_from(&mut *cursor)?; + if let Some(stats) = &mut stats { + stats.entities_changed += 1; + stats.components_changed += components_count as u32; + } + for _ in 0..components_count { + let replication_id = DefaultOptions::new().deserialize_from(&mut *cursor)?; + // SAFETY: server and client have identical `ReplicationRules` and server always sends valid IDs. + let replication_info = unsafe { replication_rules.get_info_unchecked(replication_id) }; + (replication_info.deserialize)(&mut entity, entity_map, cursor, replicon_tick)? + } + + if cursor.position() as usize == cursor.get_ref().len() { + break; + } + } + + Ok(()) +} + /// Deserializes `entity` from compressed index and generation. /// /// For details see [`ReplicationBuffer::write_entity`](crate::server::replication_message::replication_buffer::write_entity). @@ -409,7 +435,6 @@ fn deserialize_entity(cursor: &mut Cursor<&[u8]>) -> bincode::Result { /// Parameter for [`apply_components`]. enum ComponentsKind { Insert, - Update, Removal, } diff --git a/src/server.rs b/src/server.rs index 71f584fd..544cecb2 100644 --- a/src/server.rs +++ b/src/server.rs @@ -245,9 +245,8 @@ fn collect_changes( change_tick: &SystemChangeTick, replication_rules: &ReplicationRules, ) -> bincode::Result<()> { - for (init_message, update_message) in messages.iter_mut() { + for (init_message, _) in messages.iter_mut() { init_message.start_array(); - update_message.start_array(); } for archetype in world @@ -341,9 +340,8 @@ fn collect_changes( } } - for (init_message, update_message) in messages.iter_mut() { + for (init_message, _) in messages.iter_mut() { init_message.end_array()?; - update_message.end_array()?; } Ok(()) diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 520ac93f..2f5c1972 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -201,7 +201,7 @@ impl UpdateMessage { last_change_tick: LastChangeTick, tick: Tick, ) -> bincode::Result<()> { - if self.buffer.arrays_with_data() == 0 { + if self.buffer.as_slice().is_empty() { trace!("no updates to send for client {}", client_info.id); return Ok(()); } @@ -233,13 +233,11 @@ impl UpdateMessage { entities.clear(); } else { entities.push(entity); - println!("{message_size} increase by {data_size}"); message_size += data_size; } } if !slice.is_empty() { - println!("sending more data"); let update_index = client_info.register_update(tick, entities); bincode::serialize_into(&mut header[TICK_SIZE..], &update_index)?; diff --git a/src/server/replication_messages/replication_buffer.rs b/src/server/replication_messages/replication_buffer.rs index 660dbf4d..cdb031e3 100644 --- a/src/server/replication_messages/replication_buffer.rs +++ b/src/server/replication_messages/replication_buffer.rs @@ -15,6 +15,9 @@ pub(crate) struct ReplicationBuffer { /// Serialized data. cursor: Cursor>, + /// An indicator of whether the array is currently writing. + inside_array: bool, + /// Position of the array from last call of [`Self::start_array`]. array_pos: u64, @@ -85,6 +88,7 @@ impl ReplicationBuffer { /// Should happen outside of array or entity data and the buffer shouldn't contain trailing empty arrays. /// See also [`Self::start_array`] and [`Self::start_entity_data`]. pub(super) fn write(&mut self, value: &impl Serialize) -> bincode::Result<()> { + debug_assert!(!self.inside_array); debug_assert_eq!(self.array_len, 0); debug_assert_eq!(self.entity_data_len, 0); debug_assert_eq!(self.trailing_empty_arrays, 0); @@ -99,8 +103,10 @@ impl ReplicationBuffer { /// See also [`Self::end_array`], [`Self::write_client_mapping`], [`Self::write_entity`] and [`Self::start_entity_data`]. pub(crate) fn start_array(&mut self) { debug_assert_eq!(self.array_len, 0); + debug_assert!(!self.inside_array); self.array_pos = self.cursor.position(); + self.inside_array = true; self.cursor .set_position(self.array_pos + mem::size_of_val(&self.array_len) as u64); } @@ -109,6 +115,8 @@ impl ReplicationBuffer { /// /// See also [`Self::start_array`]. pub(crate) fn end_array(&mut self) -> bincode::Result<()> { + debug_assert!(self.inside_array); + if self.array_len != 0 { let previous_pos = self.cursor.position(); self.cursor.set_position(self.array_pos); @@ -124,6 +132,7 @@ impl ReplicationBuffer { self.cursor.set_position(self.array_pos); bincode::serialize_into(&mut self.cursor, &self.array_len)?; } + self.inside_array = false; Ok(()) } @@ -134,6 +143,8 @@ impl ReplicationBuffer { /// Increases array length by 1. /// See also [`Self::start_array`]. pub(crate) fn write_client_mapping(&mut self, mapping: &ClientMapping) -> bincode::Result<()> { + debug_assert!(self.inside_array); + serialize_entity(&mut self.cursor, mapping.server_entity)?; serialize_entity(&mut self.cursor, mapping.client_entity)?; self.array_len = self @@ -150,6 +161,8 @@ impl ReplicationBuffer { /// Increases array length by 1. /// See also [`Self::start_array`]. pub(crate) fn write_entity(&mut self, entity: Entity) -> bincode::Result<()> { + debug_assert!(self.inside_array); + serialize_entity(&mut self.cursor, entity)?; self.array_len = self .array_len @@ -159,11 +172,12 @@ impl ReplicationBuffer { Ok(()) } - /// Starts writing entity and its data as an array element. + /// Starts writing entity and its data. /// /// Data can contain components with their IDs or IDs only. /// Length will be increased automatically after writing data. /// Entity will be written lazily after first data write. + /// Can be called inside and outside of the array. /// See also [`Self::end_entity_data`], [`Self::write_component`] /// and [`Self::write_component_id`]. pub(crate) fn start_entity_data(&mut self, entity: Entity) { @@ -189,6 +203,7 @@ impl ReplicationBuffer { /// Ends writing entity data by writing its length into the last remembered position. /// /// If the entity data is empty, nothing will be written. + /// Increases array length if writing is done inside the array. /// See also [`Self::start_array`], [`Self::write_component`] and /// [`Self::write_component_id`]. pub(crate) fn end_entity_data(&mut self) -> bincode::Result<()> { @@ -200,10 +215,12 @@ impl ReplicationBuffer { self.cursor.set_position(previous_pos); self.entity_data_len = 0; - self.array_len = self - .array_len - .checked_add(1) - .ok_or(bincode::ErrorKind::SizeLimit)?; + if self.inside_array { + self.array_len = self + .array_len + .checked_add(1) + .ok_or(bincode::ErrorKind::SizeLimit)?; + } } else { self.cursor.set_position(self.entity_data_pos); } @@ -257,6 +274,7 @@ impl ReplicationBuffer { /// Should only be called after all arrays have been written, because /// removed array somewhere the middle cannot be detected during deserialization. pub(super) fn trim_empty_arrays(&mut self) { + debug_assert!(!self.inside_array); debug_assert_eq!(self.array_len, 0); debug_assert_eq!(self.entity_data_len, 0); @@ -272,6 +290,7 @@ impl Default for ReplicationBuffer { cursor: Default::default(), array_pos: Default::default(), array_len: Default::default(), + inside_array: Default::default(), arrays_with_data: Default::default(), trailing_empty_arrays: Default::default(), entity_data_pos: Default::default(), diff --git a/tests/replication.rs b/tests/replication.rs index da87e08a..df29d6c5 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -212,8 +212,8 @@ fn update_replication() { common::connect(&mut server_app, &mut client_app); - // TODO: Spawn many entities to cover message splitting. Splitting logic needs to be fixed. - const ENTITIES_COUNT: u32 = 70; + // Spawn many entities to cover message splitting. + const ENTITIES_COUNT: u32 = 300; server_app .world .spawn_batch([(Replication, BoolComponent(false)); ENTITIES_COUNT as usize]); From 7a9959520e61cfb49454902f82e24567ab79feba Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Tue, 5 Dec 2023 23:18:02 +0200 Subject: [PATCH 10/63] Undo diagnostics changes --- CHANGELOG.md | 1 - src/client.rs | 4 ++-- src/client/diagnostics.rs | 39 ++++++++++++++++----------------------- tests/replication.rs | 2 +- 4 files changed, 19 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64f6f681..7815f8db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,7 +22,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Use `EntityHashMap` instead of `HashMap` with entities as keys. - Use `Cursor<&[u8]>` instead of `Cursor`. - Replace `LastRepliconTick` with `RepliconTick` on client. -- Replace Packets in client diagnostics with init and update messages (it was never actually a number of packets). - Fix missing reset of `RepliconTick` on server disconnect. - Rename `replicate_into_scene` into `replicate_into` and move it to `scene` module. diff --git a/src/client.rs b/src/client.rs index 38e95a40..d3652006 100644 --- a/src/client.rs +++ b/src/client.rs @@ -163,7 +163,7 @@ fn apply_init_message( let end_pos: u64 = message.len().try_into().unwrap(); let mut cursor = Cursor::new(message); if let Some(stats) = &mut stats { - stats.init_messages += 1; + stats.packets += 1; stats.bytes += end_pos; } @@ -240,7 +240,7 @@ fn apply_update_message( let end_pos: u64 = message.len().try_into().unwrap(); let mut cursor = Cursor::new(&*message); if let Some(stats) = &mut stats { - stats.update_messages += 1; + stats.packets += 1; stats.bytes += end_pos; } diff --git a/src/client/diagnostics.rs b/src/client/diagnostics.rs index 0c506c02..025d9ef0 100644 --- a/src/client/diagnostics.rs +++ b/src/client/diagnostics.rs @@ -19,10 +19,8 @@ pub struct ClientStats { pub mappings: u32, /// Incremented per entity despawn. pub despawns: u32, - /// Init messages received. - pub init_messages: u32, - /// Update messages received. - pub update_messages: u32, + /// Replication packets received. + pub packets: u32, /// Replication bytes received in packet payloads (without internal Renet data). pub bytes: u64, } @@ -60,7 +58,7 @@ impl Plugin for ClientDiagnosticsPlugin { Self::DIAGNOSTIC_HISTORY_LEN, )) .register_diagnostic(Diagnostic::new( - Self::INIT_MESSAGES, + Self::PACKETS, "packets per second", Self::DIAGNOSTIC_HISTORY_LEN, )) @@ -83,12 +81,8 @@ impl ClientDiagnosticsPlugin { pub const MAPPINGS: DiagnosticId = DiagnosticId::from_u128(61564098061172206743744706749187); /// How many despawns per second from replication. pub const DESPAWNS: DiagnosticId = DiagnosticId::from_u128(11043323327351349675112378115868); - /// How many init messages processed per second. - pub const INIT_MESSAGES: DiagnosticId = - DiagnosticId::from_u128(40094818756895929689855772983865); - /// How many update messages processed per second. - pub const UPDATE_MESSAGES: DiagnosticId = - DiagnosticId::from_u128(40094818756895929689855772983865); + /// How many replication packets processed per second. + pub const PACKETS: DiagnosticId = DiagnosticId::from_u128(40094818756895929689855772983865); /// How many bytes of replication packets payloads per second. pub const BYTES: DiagnosticId = DiagnosticId::from_u128(87998088176776397493423835383418); @@ -97,42 +91,41 @@ impl ClientDiagnosticsPlugin { fn diagnostic_system(mut stats: ResMut, mut diagnostics: Diagnostics) { diagnostics.add_measurement(Self::ENTITY_CHANGES, || { - if stats.init_messages == 0 { + if stats.packets == 0 { 0_f64 } else { - stats.entities_changed as f64 / stats.init_messages as f64 + stats.entities_changed as f64 / stats.packets as f64 } }); diagnostics.add_measurement(Self::COMPONENT_CHANGES, || { - if stats.init_messages == 0 { + if stats.packets == 0 { 0_f64 } else { - stats.components_changed as f64 / stats.init_messages as f64 + stats.components_changed as f64 / stats.packets as f64 } }); diagnostics.add_measurement(Self::MAPPINGS, || { - if stats.init_messages == 0 { + if stats.packets == 0 { 0_f64 } else { - stats.mappings as f64 / stats.init_messages as f64 + stats.mappings as f64 / stats.packets as f64 } }); diagnostics.add_measurement(Self::DESPAWNS, || { - if stats.init_messages == 0 { + if stats.packets == 0 { 0_f64 } else { - stats.despawns as f64 / stats.init_messages as f64 + stats.despawns as f64 / stats.packets as f64 } }); diagnostics.add_measurement(Self::BYTES, || { - if stats.init_messages == 0 { + if stats.packets == 0 { 0_f64 } else { - stats.bytes as f64 / stats.init_messages as f64 + stats.bytes as f64 / stats.packets as f64 } }); - diagnostics.add_measurement(Self::INIT_MESSAGES, || stats.init_messages as f64); - diagnostics.add_measurement(Self::UPDATE_MESSAGES, || stats.init_messages as f64); + diagnostics.add_measurement(Self::PACKETS, || stats.packets as f64); *stats = ClientStats::default(); } } diff --git a/tests/replication.rs b/tests/replication.rs index df29d6c5..b37675dc 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -408,7 +408,7 @@ fn diagnostics() { assert_eq!(stats.components_changed, 1); assert_eq!(stats.mappings, 1); assert_eq!(stats.despawns, 1); - assert_eq!(stats.init_messages, 1); + assert_eq!(stats.packets, 1); assert_eq!(stats.bytes, 18); } From 689cacd743f640fc68119d3c24ad4a057a8318af Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 6 Dec 2023 00:49:36 +0200 Subject: [PATCH 11/63] Update deny.toml --- deny.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/deny.toml b/deny.toml index 0b9aae52..1abcc01f 100644 --- a/deny.toml +++ b/deny.toml @@ -11,10 +11,14 @@ allow-osi-fsf-free = "either" multiple-versions = "deny" wildcards = "allow" skip = [ + { name = "async-channel", version = "1.9" }, + { name = "async-lock", version = "2.8" }, { name = "bitflags", version = "2.0" }, + { name = "event-listener", version = "2.5" }, { name = "fastrand", version = "1.9" }, { name = "foreign-types", version = "0.3" }, { name = "foreign-types-shared", version = "0.1" }, + { name = "futures-lite", version = "1.13" }, { name = "hashbrown", version = "0.12" }, { name = "indexmap", version = "1.0" }, { name = "libloading", version = "0.7" }, @@ -26,6 +30,7 @@ skip = [ { name = "regex-syntax", version = "0.7" }, { name = "syn", version = "1.0" }, { name = "toml_edit", version = "0.19" }, + { name = "tracing-log", version = "0.1" }, { name = "windows", version = "0.44" }, { name = "windows-sys", version = "0.45" }, { name = "windows-targets", version = "0.42" }, From 9ec9ef259f031e7a26ad65d53ed47c878249082f Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 6 Dec 2023 01:08:53 +0200 Subject: [PATCH 12/63] Update docs --- src/lib.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index 66e19302..b1fdac0c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -375,6 +375,15 @@ They rarely used for gameplay systems (since you write the same logic for multiplayer and single-player!), but could be used for server creation / connection systems and corresponding UI. +## Guarantees + +Replicon maintains valid state of the world even with packet loss. All events, inserts, +removals and despawns will be applied in the same order as on the server. + +Only component value changes per entity could arrive in any order and partially. But they will +be applied when the tick on which they changed also arrives. So if your component references another entity, +it will be applied only when this entity being spawned. + ## Limits To reduce packet size there are the following limits per replication update: From 9bf14bc34ddb24749d1dff479c6b08af0501bc9f Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 6 Dec 2023 19:44:45 +0200 Subject: [PATCH 13/63] Trigger change detection to fully cover diagnostics --- tests/replication.rs | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/tests/replication.rs b/tests/replication.rs index b37675dc..6d959d19 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -1,5 +1,7 @@ mod common; +use std::ops::DerefMut; + use bevy::prelude::*; use bevy_replicon::{prelude::*, scene}; @@ -388,7 +390,6 @@ fn diagnostics() { let client_transport = client_app.world.resource::(); let client_id = ClientId::from_raw(client_transport.client_id()); - let mut entity_map = server_app.world.resource_mut::(); entity_map.insert( client_id, @@ -403,13 +404,23 @@ fn diagnostics() { server_app.update(); client_app.update(); + // Trigger change detection. + server_app + .world + .get_mut::(server_entity) + .unwrap() + .deref_mut(); + + server_app.update(); + client_app.update(); + let stats = client_app.world.resource::(); - assert_eq!(stats.entities_changed, 1); - assert_eq!(stats.components_changed, 1); + assert_eq!(stats.entities_changed, 2); + assert_eq!(stats.components_changed, 2); assert_eq!(stats.mappings, 1); assert_eq!(stats.despawns, 1); - assert_eq!(stats.packets, 1); - assert_eq!(stats.bytes, 18); + assert_eq!(stats.packets, 2); + assert_eq!(stats.bytes, 27); } #[derive(Component, Deserialize, Serialize)] From 34cff3f96ccf287cebf3cd206210e3f70c616e3f Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 6 Dec 2023 22:59:23 +0200 Subject: [PATCH 14/63] Add test for update replication buffering and fix the discovered bug --- src/client.rs | 2 +- tests/replication.rs | 55 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index d3652006..8ca61c39 100644 --- a/src/client.rs +++ b/src/client.rs @@ -109,7 +109,7 @@ impl ClientPlugin { retain_buffer.clear(); retain_buffer.reserve(old_buffers); for update in buffered_updates.iter().take(old_buffers) { - let retain = update.tick < replicon_tick; + let retain = update.tick <= replicon_tick; if retain { let mut cursor = Cursor::new(&*update.message); cursor.set_position(update.position); diff --git a/tests/replication.rs b/tests/replication.rs index 6d959d19..433f9886 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -245,6 +245,61 @@ fn update_replication() { } } +#[test] +fn update_replication_buffering() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )) + .replicate::(); + } + + common::connect(&mut server_app, &mut client_app); + + // Spawn many entities to cover message splitting. + let server_entity = server_app + .world + .spawn((Replication, BoolComponent(false))) + .id(); + + let old_tick = *server_app.world.resource::(); + + server_app.update(); + client_app.update(); + + // Artificially rollback the client by 1 tick to force next received update to be buffered. + *client_app.world.resource_mut::() = old_tick; + let mut component = server_app + .world + .get_mut::(server_entity) + .unwrap(); + component.0 = true; + + server_app.update(); + client_app.update(); + + let (client_entity, component) = client_app + .world + .query::<(Entity, &BoolComponent)>() + .single(&client_app.world); + assert!(!component.0, "client should buffer the update"); + + // Move tick forward to let the buffered update apply. + client_app.world.resource_mut::().increment(); + + server_app.update(); + client_app.update(); + + let component = client_app + .world + .get::(client_entity) + .unwrap(); + assert!(component.0, "buffered update should be applied"); +} + #[test] fn removal_replication() { let mut server_app = App::new(); From cbf011681e557d797529a39ffc62fcad3c3edb8f Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 6 Dec 2023 23:33:42 +0200 Subject: [PATCH 15/63] Refactor event queue test in similar way to test more --- tests/server_event.rs | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/tests/server_event.rs b/tests/server_event.rs index 6d5b4a76..de25b2a8 100644 --- a/tests/server_event.rs +++ b/tests/server_event.rs @@ -5,6 +5,7 @@ use bevy_renet::renet::{transport::NetcodeClientTransport, ClientId}; use bevy_replicon::prelude::*; use common::DummyEvent; +use serde::{Deserialize, Serialize}; #[test] fn without_server_plugin() { @@ -160,27 +161,38 @@ fn event_queue() { MinimalPlugins, ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), )) + .replicate::() .add_server_event::(EventType::Ordered); } common::connect(&mut server_app, &mut client_app); - // Simulate event that received two ticks earlier. - let mut tick = *server_app.world.resource::(); - tick.increment_by(2); - client_app - .world - .resource_mut::>() - .insert(tick, DummyEvent(Entity::PLACEHOLDER)); + // Spawn entity to trigger world change. + server_app.world.spawn((Replication, DummyComponent)); + let old_tick = *server_app.world.resource::(); + + server_app.update(); + client_app.update(); + + // Artificially rollback the client by 1 tick to force next received event to be queued. + *client_app.world.resource_mut::() = old_tick; + server_app.world.send_event(ToClients { + mode: SendMode::Broadcast, + event: DummyEvent(Entity::PLACEHOLDER), + }); + server_app.update(); client_app.update(); assert!(client_app.world.resource::>().is_empty()); - client_app.insert_resource(tick); + client_app.world.resource_mut::().increment(); client_app.update(); let dummy_events = client_app.world.resource::>(); assert_eq!(dummy_events.len(), 1); } + +#[derive(Component, Serialize, Deserialize)] +struct DummyComponent; From 7408bf9a53c42fcf9c6917e40fc35ae0c3694638 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Thu, 7 Dec 2023 01:42:02 +0200 Subject: [PATCH 16/63] Use tick from the message when apply an update from buffer Also use more correct and clear naming to avoid this confusion in the future. --- src/client.rs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/client.rs b/src/client.rs index 8ca61c39..8a2d21a9 100644 --- a/src/client.rs +++ b/src/client.rs @@ -109,7 +109,7 @@ impl ClientPlugin { retain_buffer.clear(); retain_buffer.reserve(old_buffers); for update in buffered_updates.iter().take(old_buffers) { - let retain = update.tick <= replicon_tick; + let retain = update.message_tick <= replicon_tick; if retain { let mut cursor = Cursor::new(&*update.message); cursor.set_position(update.position); @@ -121,7 +121,7 @@ impl ClientPlugin { &mut entity_ticks, stats.as_mut(), &replication_rules, - replicon_tick, + update.message_tick, )?; } retain_buffer.push(retain); @@ -244,10 +244,10 @@ fn apply_update_message( stats.bytes += end_pos; } - let (tick, update_index) = bincode::deserialize_from(&mut cursor)?; - if tick > replicon_tick { + let (message_tick, update_index) = bincode::deserialize_from(&mut cursor)?; + if message_tick > replicon_tick { buffered_updates.push(BufferedUpdate { - tick, + message_tick, position: cursor.position(), message, }); @@ -261,7 +261,7 @@ fn apply_update_message( entity_ticks, stats, replication_rules, - tick, + message_tick, )?; Ok(update_index) @@ -380,18 +380,18 @@ fn apply_update_components( entity_ticks: &mut ServerEntityTicks, mut stats: Option<&mut ClientStats>, replication_rules: &ReplicationRules, - replicon_tick: RepliconTick, + message_tick: RepliconTick, ) -> bincode::Result<()> { loop { let entity = deserialize_entity(cursor)?; let mut entity = entity_map.get_by_server_or_spawn(world, entity); - let Some(tick) = entity_ticks.0.get_mut(&entity.id()) else { + let Some(entity_tick) = entity_ticks.0.get_mut(&entity.id()) else { continue; // Update arrived arrive after a despawn from init message. }; - if *tick > replicon_tick { + if *entity_tick > message_tick { continue; // Update for this entity is outdated. } - *tick = replicon_tick; + *entity_tick = message_tick; let components_count: u8 = bincode::deserialize_from(&mut *cursor)?; if let Some(stats) = &mut stats { @@ -402,7 +402,7 @@ fn apply_update_components( let replication_id = DefaultOptions::new().deserialize_from(&mut *cursor)?; // SAFETY: server and client have identical `ReplicationRules` and server always sends valid IDs. let replication_info = unsafe { replication_rules.get_info_unchecked(replication_id) }; - (replication_info.deserialize)(&mut entity, entity_map, cursor, replicon_tick)? + (replication_info.deserialize)(&mut entity, entity_map, cursor, message_tick)? } if cursor.position() as usize == cursor.get_ref().len() { @@ -546,7 +546,7 @@ pub struct ServerEntityTicks(EntityHashMap); /// then the required init message with this tick. pub(super) struct BufferedUpdate { /// Required tick to wait for. - tick: RepliconTick, + message_tick: RepliconTick, /// Position of the message data. position: u64, /// Update data. From 58b659237d2dab6c78d8e42f1c5c4df4c6b44964 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Thu, 7 Dec 2023 21:06:47 +0200 Subject: [PATCH 17/63] Store replicon tick inside update message too Required to apply updates correctly if the world didn't change. While we can detect if a message is older by the ack, users won't be able to get information about how old the current message is. --- src/client.rs | 22 ++++++++++++++++------ src/server/replication_messages.rs | 17 +++++++++-------- tests/replication.rs | 2 +- 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/src/client.rs b/src/client.rs index 8a2d21a9..3a713b61 100644 --- a/src/client.rs +++ b/src/client.rs @@ -109,7 +109,7 @@ impl ClientPlugin { retain_buffer.clear(); retain_buffer.reserve(old_buffers); for update in buffered_updates.iter().take(old_buffers) { - let retain = update.message_tick <= replicon_tick; + let retain = update.last_change_tick <= replicon_tick; if retain { let mut cursor = Cursor::new(&*update.message); cursor.set_position(update.position); @@ -244,9 +244,10 @@ fn apply_update_message( stats.bytes += end_pos; } - let (message_tick, update_index) = bincode::deserialize_from(&mut cursor)?; - if message_tick > replicon_tick { + let (last_change_tick, message_tick, update_index) = bincode::deserialize_from(&mut cursor)?; + if last_change_tick > replicon_tick { buffered_updates.push(BufferedUpdate { + last_change_tick, message_tick, position: cursor.position(), message, @@ -388,7 +389,7 @@ fn apply_update_components( let Some(entity_tick) = entity_ticks.0.get_mut(&entity.id()) else { continue; // Update arrived arrive after a despawn from init message. }; - if *entity_tick > message_tick { + if *entity_tick >= message_tick { continue; // Update for this entity is outdated. } *entity_tick = message_tick; @@ -542,13 +543,22 @@ impl Mapper for ClientMapper<'_> { #[derive(Default, Deref, Resource)] pub struct ServerEntityTicks(EntityHashMap); -/// Caches buffer with deserialized tick that we received earlier +/// Caches buffer with deserialized ticks that was received earlier /// then the required init message with this tick. +/// +/// See also [`crate::server::replication_messages::UpdateMessage`]. pub(super) struct BufferedUpdate { /// Required tick to wait for. + /// + /// See also [`crate::server::LastChangeTick`]. + last_change_tick: RepliconTick, + + /// The tick this update corresponds to. message_tick: RepliconTick, - /// Position of the message data. + + /// The offset from which the data begins. position: u64, + /// Update data. message: Bytes, } diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 2f5c1972..88bd1fb7 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -92,7 +92,7 @@ impl ReplicationMessages { .zip(&mut self.clients_info) { init_message.send(server, client_info.id); - update_message.send(server, client_info, last_change_tick, tick)?; + update_message.send(server, client_info, last_change_tick, replicon_tick, tick)?; } Ok((last_change_tick, mem::take(&mut self.clients_info))) @@ -152,8 +152,8 @@ impl InitMessage { /// A reusable message with component updates. /// -/// Contains tick and component updates since the last tick until this tick for each entity. -/// Requires init message with the same tick to be applied to keep the world in valid state. +/// Contains last change tick, current tick and component updates since the last acknowledged tick for each entity. +/// Requires init message with the same tick as last change tick to be applied to keep the world in valid state. /// The message will be manually split into packets up to max size that can be applied independently. /// Splits will happen per-entity to avoid weird behavior of partially changed entity. /// Sent over [`ReplicationChannel::Unreliable`] channel. @@ -199,6 +199,7 @@ impl UpdateMessage { server: &mut RenetServer, client_info: &mut ClientInfo, last_change_tick: LastChangeTick, + replicon_tick: RepliconTick, tick: Tick, ) -> bincode::Result<()> { if self.buffer.as_slice().is_empty() { @@ -207,9 +208,9 @@ impl UpdateMessage { } trace!("sending update message(s) to client {}", client_info.id); - const TICK_SIZE: usize = mem::size_of::(); - let mut header = [0; TICK_SIZE + mem::size_of::()]; - bincode::serialize_into(&mut header[..], &*last_change_tick)?; + const TICKS_SIZE: usize = 2 * mem::size_of::(); + let mut header = [0; TICKS_SIZE + mem::size_of::()]; + bincode::serialize_into(&mut header[..], &(*last_change_tick, replicon_tick))?; let mut slice = self.buffer.as_slice(); let mut entities = Vec::new(); @@ -222,7 +223,7 @@ impl UpdateMessage { message_size = data_size; let update_index = client_info.register_update(tick, entities.clone()); - bincode::serialize_into(&mut header[TICK_SIZE..], &update_index)?; + bincode::serialize_into(&mut header[TICKS_SIZE..], &update_index)?; server.send_message( client_info.id, @@ -239,7 +240,7 @@ impl UpdateMessage { if !slice.is_empty() { let update_index = client_info.register_update(tick, entities); - bincode::serialize_into(&mut header[TICK_SIZE..], &update_index)?; + bincode::serialize_into(&mut header[TICKS_SIZE..], &update_index)?; server.send_message( client_info.id, diff --git a/tests/replication.rs b/tests/replication.rs index 433f9886..b7b25c9f 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -475,7 +475,7 @@ fn diagnostics() { assert_eq!(stats.mappings, 1); assert_eq!(stats.despawns, 1); assert_eq!(stats.packets, 2); - assert_eq!(stats.bytes, 27); + assert_eq!(stats.bytes, 31); } #[derive(Component, Deserialize, Serialize)] From bbf5db4ac2b6415728e5c56cc54b49f81e6bd006 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Thu, 7 Dec 2023 21:25:27 +0200 Subject: [PATCH 18/63] Do not store position in buffered update --- src/client.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/client.rs b/src/client.rs index 3a713b61..ed9ca03a 100644 --- a/src/client.rs +++ b/src/client.rs @@ -111,11 +111,8 @@ impl ClientPlugin { for update in buffered_updates.iter().take(old_buffers) { let retain = update.last_change_tick <= replicon_tick; if retain { - let mut cursor = Cursor::new(&*update.message); - cursor.set_position(update.position); - apply_update_components( - &mut cursor, + &mut Cursor::new(&*update.message), world, &mut entity_map, &mut entity_ticks, @@ -249,8 +246,7 @@ fn apply_update_message( buffered_updates.push(BufferedUpdate { last_change_tick, message_tick, - position: cursor.position(), - message, + message: message.slice(cursor.position() as usize..), }); return Ok(update_index); } @@ -556,9 +552,6 @@ pub(super) struct BufferedUpdate { /// The tick this update corresponds to. message_tick: RepliconTick, - /// The offset from which the data begins. - position: u64, - /// Update data. message: Bytes, } From 8014108318da457606f94fc42a23858b14108399 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 01:20:20 +0200 Subject: [PATCH 19/63] Drop extra retain buffer It was kinda silly. --- src/client.rs | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/src/client.rs b/src/client.rs index ed9ca03a..05b07686 100644 --- a/src/client.rs +++ b/src/client.rs @@ -64,7 +64,6 @@ impl ClientPlugin { pub(super) fn replication_receiving_system( world: &mut World, mut buffered_updates: Local>, - mut retain_buffer: Local>, ) -> bincode::Result<()> { world.resource_scope(|world, mut client: Mut| { world.resource_scope(|world, mut entity_map: Mut| { @@ -84,7 +83,6 @@ impl ClientPlugin { )?; } - let old_buffers = buffered_updates.len(); let replicon_tick = *world.resource::(); while let Some(message) = client.receive_message(ReplicationChannel::Unreliable) @@ -106,25 +104,26 @@ impl ClientPlugin { ) } - retain_buffer.clear(); - retain_buffer.reserve(old_buffers); - for update in buffered_updates.iter().take(old_buffers) { - let retain = update.last_change_tick <= replicon_tick; - if retain { - apply_update_components( - &mut Cursor::new(&*update.message), - world, - &mut entity_map, - &mut entity_ticks, - stats.as_mut(), - &replication_rules, - update.message_tick, - )?; + let mut result = Ok(()); + buffered_updates.retain(|update| { + if update.last_change_tick > replicon_tick { + return true; } - retain_buffer.push(retain); - } - let mut iter = retain_buffer.iter(); - buffered_updates.retain(|_| *iter.next().unwrap_or(&true)); + if let Err(e) = apply_update_components( + &mut Cursor::new(&*update.message), + world, + &mut entity_map, + &mut entity_ticks, + stats.as_mut(), + &replication_rules, + update.message_tick, + ) { + result = Err(e); + } + + false + }); + result?; if let Some(stats) = stats { world.insert_resource(stats); From ce9e7e42ab6edbb37794d0feb1d6334f74a26c00 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 01:21:25 +0200 Subject: [PATCH 20/63] Remove wrong comment --- tests/replication.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/replication.rs b/tests/replication.rs index b7b25c9f..8666e14d 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -259,7 +259,6 @@ fn update_replication_buffering() { common::connect(&mut server_app, &mut client_app); - // Spawn many entities to cover message splitting. let server_entity = server_app .world .spawn((Replication, BoolComponent(false))) From 33669b3c8f56669851650e26d776ff73f5d0b228 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 13:03:19 +0200 Subject: [PATCH 21/63] Hide `ServerEntityTicks` from public API Not really useful for users, they have access to the tick on deserialziation. If someone will need it - I will be happy to reconsider. --- CHANGELOG.md | 4 ---- src/client.rs | 12 ++++++------ src/lib.rs | 2 +- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7815f8db..3d96aeb6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,10 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] -### Added - -- `ServerEntityTicks` with ticks for each entity on client. - ### Changed - Send all component mappings, inserts, removals and despawns over reliable channel in form of deltas and component updates over unreliable channel packed by packet size. This significantly reduces the possibility of packet loss. diff --git a/src/client.rs b/src/client.rs index 05b07686..dde29946 100644 --- a/src/client.rs +++ b/src/client.rs @@ -143,7 +143,7 @@ impl ClientPlugin { ) { *replicon_tick = Default::default(); entity_map.clear(); - entity_ticks.0.clear(); + entity_ticks.clear(); } } @@ -314,7 +314,7 @@ fn apply_init_components( for _ in 0..entities_count { let entity = deserialize_entity(cursor)?; let mut entity = entity_map.get_by_server_or_spawn(world, entity); - entity_ticks.0.insert(entity.id(), replicon_tick); + entity_ticks.insert(entity.id(), replicon_tick); let components_count: u8 = bincode::deserialize_from(&mut *cursor)?; if let Some(stats) = &mut stats { @@ -360,7 +360,7 @@ fn apply_despawns( .remove_by_server(server_entity) .and_then(|entity| world.get_entity_mut(entity)) { - entity_ticks.0.remove(&client_entity.id()); + entity_ticks.remove(&client_entity.id()); (replication_rules.despawn_fn)(client_entity, replicon_tick); } } @@ -381,7 +381,7 @@ fn apply_update_components( loop { let entity = deserialize_entity(cursor)?; let mut entity = entity_map.get_by_server_or_spawn(world, entity); - let Some(entity_tick) = entity_ticks.0.get_mut(&entity.id()) else { + let Some(entity_tick) = entity_ticks.get_mut(&entity.id()) else { continue; // Update arrived arrive after a despawn from init message. }; if *entity_tick >= message_tick { @@ -535,8 +535,8 @@ impl Mapper for ClientMapper<'_> { /// Last received tick for each entity. /// /// Used to avoid applying old updates. -#[derive(Default, Deref, Resource)] -pub struct ServerEntityTicks(EntityHashMap); +#[derive(Default, Deref, DerefMut, Resource)] +pub(super) struct ServerEntityTicks(EntityHashMap); /// Caches buffer with deserialized ticks that was received earlier /// then the required init message with this tick. diff --git a/src/lib.rs b/src/lib.rs index b1fdac0c..e5506463 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -405,7 +405,7 @@ pub mod prelude { pub use super::{ client::{ diagnostics::{ClientDiagnosticsPlugin, ClientStats}, - ClientMapper, ClientPlugin, ClientSet, ServerEntityMap, ServerEntityTicks, + ClientMapper, ClientPlugin, ClientSet, ServerEntityMap, }, network_event::{ client_event::{ClientEventAppExt, FromClient}, From 6a0d92ba3ec4fdd78d89eea3c4a7a288ce86d832 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 15:53:30 +0200 Subject: [PATCH 22/63] If there is any insertion, include all updates into init message To keep entity updates atomic. --- src/server.rs | 17 +++++++++--- src/server/replication_messages.rs | 11 +++----- .../replication_buffer.rs | 26 +++++++++++++++++-- 3 files changed, 40 insertions(+), 14 deletions(-) diff --git a/src/server.rs b/src/server.rs index 544cecb2..4f1179d1 100644 --- a/src/server.rs +++ b/src/server.rs @@ -332,10 +332,20 @@ fn collect_changes( } } - for (init_message, update_message) in messages.iter_mut() { + for (init_message, update_message, client_info) in messages.iter_mut_with_info() { + if init_message.entity_data_len() != 0 { + // If there is any insertion, include all updates into init message + // and bump the last acknowledged tick to keep entity updates atomic. + init_message.take_entity_data(update_message); + client_info + .ticks + .insert(archetype_entity.entity(), change_tick.this_run()); + } else { + update_message.register_entity(); + update_message.end_entity_data()?; + } + init_message.end_entity_data()?; - update_message.register_entity(); - update_message.end_entity_data()?; } } } @@ -362,7 +372,6 @@ fn collect_component_change( ) -> bincode::Result<()> { for (init_message, update_message, client_info) in messages.iter_mut_with_info() { if ticks.is_added(change_tick.last_run(), change_tick.this_run()) { - client_info.ticks.insert(entity, change_tick.this_run()); init_message.write_component(replication_info, replication_id, component)?; } else { let tick = *client_info diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 88bd1fb7..92ec2946 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -180,15 +180,10 @@ impl UpdateMessage { Ok(()) } - /// Registers entity from buffer's entity data for possible splitting. - /// - /// Ignores the entity if data length is 0. - /// Should be called before [`ReplicationBuffer::end_entity_data`]. + /// Registers entity from buffer's entity data and its size for possible splitting. pub(super) fn register_entity(&mut self) { - if self.buffer.entity_data_len() != 0 { - let data_size = self.buffer.as_slice().len() - self.buffer.entity_data_pos() as usize; - self.entities.push((self.buffer.data_entity(), data_size)); - } + let data_size = self.buffer.as_slice().len() - self.buffer.entity_data_pos() as usize; + self.entities.push((self.buffer.data_entity(), data_size)); } /// Splits message according to `entities` and sends it to the specified client. diff --git a/src/server/replication_messages/replication_buffer.rs b/src/server/replication_messages/replication_buffer.rs index cdb031e3..40306a28 100644 --- a/src/server/replication_messages/replication_buffer.rs +++ b/src/server/replication_messages/replication_buffer.rs @@ -1,4 +1,7 @@ -use std::{io::Cursor, mem}; +use std::{ + io::{Cursor, Write}, + mem, +}; use bevy::{prelude::*, ptr::Ptr}; use bincode::{DefaultOptions, Options}; @@ -67,7 +70,7 @@ impl ReplicationBuffer { /// Returns length of the current entity data. /// /// See also [`Self::start_entity_data`] and [`Self::end_entity_data`]. - pub(super) fn entity_data_len(&self) -> u8 { + pub(crate) fn entity_data_len(&self) -> u8 { self.entity_data_len } @@ -269,6 +272,25 @@ impl ReplicationBuffer { Ok(()) } + /// Removes entity data elements from `other` copies it. + /// + /// Ends entity data for `other`. + /// See also [`Self::start_entity_data`] and [`Self::end_entity_data`]. + pub(crate) fn take_entity_data(&mut self, other: &mut Self) { + if other.entity_data_len != 0 { + let slice = other.cursor.get_ref(); + let offset = + other.entity_data_len_pos as usize + mem::size_of_val(&other.entity_data_len); + let pos = other.cursor.position() as usize; + self.cursor.write_all(&slice[offset..pos]).unwrap(); + self.entity_data_len += other.entity_data_len; + + other.entity_data_len = 0; + } + + other.cursor.set_position(other.entity_data_pos); + } + /// Crops empty arrays at the end. /// /// Should only be called after all arrays have been written, because From 61d8d46f6e84c45c91f2c435ed7ccba65b8faf49 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 16:04:47 +0200 Subject: [PATCH 23/63] Properly disconnect the client to cover this case --- tests/replication.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/replication.rs b/tests/replication.rs index 8666e14d..c7f2cd37 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -21,10 +21,10 @@ fn reset() { common::connect(&mut server_app, &mut client_app); - client_app - .world - .resource_mut::() - .disconnect(); + client_app.world.resource_mut::().disconnect(); + + client_app.update(); + server_app.update(); client_app.update(); server_app.update(); From 9af3e8c6f5a1c9a31c832965bc90b4c68eaa1dcd Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 16:06:43 +0200 Subject: [PATCH 24/63] Remove extra check We no longer send empty messages. --- src/client.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/client.rs b/src/client.rs index dde29946..67e5e3d9 100644 --- a/src/client.rs +++ b/src/client.rs @@ -166,9 +166,6 @@ fn apply_init_message( let replicon_tick = bincode::deserialize_from(&mut cursor)?; trace!("applying {replicon_tick:?}"); *world.resource_mut::() = replicon_tick; - if cursor.position() == end_pos { - return Ok(()); - } apply_entity_mappings(&mut cursor, world, entity_map, stats.as_deref_mut())?; if cursor.position() == end_pos { From 1566c3a25cd979334cf1717da4d2e6f96dc79fe1 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 16:09:26 +0200 Subject: [PATCH 25/63] Update tracing messages --- src/client.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 67e5e3d9..10ce169a 100644 --- a/src/client.rs +++ b/src/client.rs @@ -109,6 +109,8 @@ impl ClientPlugin { if update.last_change_tick > replicon_tick { return true; } + + trace!("applying buffered update message for {replicon_tick:?}"); if let Err(e) = apply_update_components( &mut Cursor::new(&*update.message), world, @@ -164,7 +166,7 @@ fn apply_init_message( } let replicon_tick = bincode::deserialize_from(&mut cursor)?; - trace!("applying {replicon_tick:?}"); + trace!("applying init message for {replicon_tick:?}"); *world.resource_mut::() = replicon_tick; apply_entity_mappings(&mut cursor, world, entity_map, stats.as_deref_mut())?; @@ -239,6 +241,7 @@ fn apply_update_message( let (last_change_tick, message_tick, update_index) = bincode::deserialize_from(&mut cursor)?; if last_change_tick > replicon_tick { + trace!("buffering update message for {replicon_tick:?}"); buffered_updates.push(BufferedUpdate { last_change_tick, message_tick, @@ -247,6 +250,7 @@ fn apply_update_message( return Ok(update_index); } + trace!("applying update message for {replicon_tick:?}"); apply_update_components( &mut cursor, world, From 6939114b72a305e71d2a01e729004a704d91a598 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 16:15:06 +0200 Subject: [PATCH 26/63] Panic if update references a non-spawned entity --- src/client.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 10ce169a..36a7140e 100644 --- a/src/client.rs +++ b/src/client.rs @@ -381,7 +381,9 @@ fn apply_update_components( ) -> bincode::Result<()> { loop { let entity = deserialize_entity(cursor)?; - let mut entity = entity_map.get_by_server_or_spawn(world, entity); + let mut entity = entity_map + .get_by_server(world, entity) + .expect("updates should be applied only on spawned entities"); let Some(entity_tick) = entity_ticks.get_mut(&entity.id()) else { continue; // Update arrived arrive after a despawn from init message. }; @@ -479,6 +481,16 @@ impl ServerEntityMap { } } + pub(super) fn get_by_server<'a>( + &mut self, + world: &'a mut World, + server_entity: Entity, + ) -> Option> { + self.server_to_client + .get(&server_entity) + .map(|&entity| world.entity_mut(entity)) + } + pub(super) fn remove_by_server(&mut self, server_entity: Entity) -> Option { let client_entity = self.server_to_client.remove(&server_entity); if let Some(client_entity) = client_entity { From d2bed64997109b1b12eab5ef8f554b6b52003fd6 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 16:16:45 +0200 Subject: [PATCH 27/63] Fix comment --- src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 36a7140e..5c3876d3 100644 --- a/src/client.rs +++ b/src/client.rs @@ -385,7 +385,7 @@ fn apply_update_components( .get_by_server(world, entity) .expect("updates should be applied only on spawned entities"); let Some(entity_tick) = entity_ticks.get_mut(&entity.id()) else { - continue; // Update arrived arrive after a despawn from init message. + continue; // Update arrived after a despawn from init message. }; if *entity_tick >= message_tick { continue; // Update for this entity is outdated. From b711ab60222ca5639e0dd520ff2e2736b96823b2 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 16:18:16 +0200 Subject: [PATCH 28/63] Reverse condition Just for clarity. --- src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 5c3876d3..03bedfc2 100644 --- a/src/client.rs +++ b/src/client.rs @@ -387,7 +387,7 @@ fn apply_update_components( let Some(entity_tick) = entity_ticks.get_mut(&entity.id()) else { continue; // Update arrived after a despawn from init message. }; - if *entity_tick >= message_tick { + if message_tick < *entity_tick { continue; // Update for this entity is outdated. } *entity_tick = message_tick; From af8ec14ea3fc0495329db2b7e67540958931f89e Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 17:20:38 +0200 Subject: [PATCH 29/63] Fix replication buffer slicing Now slice contains only valid data. We also no longer need to clear it. --- src/server/replication_messages.rs | 4 +- .../replication_buffer.rs | 19 +++++----- tests/replication.rs | 37 +++++++++++++++++++ 3 files changed, 48 insertions(+), 12 deletions(-) diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 92ec2946..c1eaff56 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -197,7 +197,8 @@ impl UpdateMessage { replicon_tick: RepliconTick, tick: Tick, ) -> bincode::Result<()> { - if self.buffer.as_slice().is_empty() { + let mut slice = self.buffer.as_slice(); + if slice.is_empty() { trace!("no updates to send for client {}", client_info.id); return Ok(()); } @@ -207,7 +208,6 @@ impl UpdateMessage { let mut header = [0; TICKS_SIZE + mem::size_of::()]; bincode::serialize_into(&mut header[..], &(*last_change_tick, replicon_tick))?; - let mut slice = self.buffer.as_slice(); let mut entities = Vec::new(); let mut message_size = 0; for &(entity, data_size) in &self.entities { diff --git a/src/server/replication_messages/replication_buffer.rs b/src/server/replication_messages/replication_buffer.rs index 40306a28..100adeee 100644 --- a/src/server/replication_messages/replication_buffer.rs +++ b/src/server/replication_messages/replication_buffer.rs @@ -52,7 +52,6 @@ impl ReplicationBuffer { /// Keeps allocated capacity. pub(super) fn reset(&mut self) { self.cursor.set_position(0); - self.cursor.get_mut().clear(); self.arrays_with_data = 0; self.trailing_empty_arrays = 0; } @@ -83,7 +82,9 @@ impl ReplicationBuffer { /// Returns the buffer as a byte array. pub(super) fn as_slice(&self) -> &[u8] { - self.cursor.get_ref() + let slice = self.cursor.get_ref(); + let position = self.cursor.position() as usize; + &slice[..position] } /// Writes the `value` into the buffer. @@ -278,11 +279,10 @@ impl ReplicationBuffer { /// See also [`Self::start_entity_data`] and [`Self::end_entity_data`]. pub(crate) fn take_entity_data(&mut self, other: &mut Self) { if other.entity_data_len != 0 { - let slice = other.cursor.get_ref(); + let slice = other.as_slice(); let offset = other.entity_data_len_pos as usize + mem::size_of_val(&other.entity_data_len); - let pos = other.cursor.position() as usize; - self.cursor.write_all(&slice[offset..pos]).unwrap(); + self.cursor.write_all(&slice[offset..]).unwrap(); self.entity_data_len += other.entity_data_len; other.entity_data_len = 0; @@ -300,9 +300,9 @@ impl ReplicationBuffer { debug_assert_eq!(self.array_len, 0); debug_assert_eq!(self.entity_data_len, 0); - let used_len = self.cursor.get_ref().len() - - self.trailing_empty_arrays * mem::size_of_val(&self.array_len); - self.cursor.get_mut().truncate(used_len); + let extra_len = self.trailing_empty_arrays * mem::size_of_val(&self.array_len); + self.cursor + .set_position(self.cursor.position() - extra_len as u64); } } @@ -348,7 +348,6 @@ mod tests { fn trimming_arrays() -> bincode::Result<()> { let mut buffer = ReplicationBuffer::default(); - let begin_len = buffer.cursor.get_ref().len(); for _ in 0..3 { buffer.start_array(); buffer.end_array()?; @@ -356,7 +355,7 @@ mod tests { buffer.trim_empty_arrays(); - assert_eq!(buffer.cursor.get_ref().len(), begin_len); + assert!(buffer.as_slice().is_empty()); Ok(()) } diff --git a/tests/replication.rs b/tests/replication.rs index c7f2cd37..887ac437 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -245,6 +245,43 @@ fn update_replication() { } } +#[test] +fn insert_update_replication() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )) + .replicate::() + .replicate::(); + } + + common::connect(&mut server_app, &mut client_app); + + let server_entity = server_app + .world + .spawn((Replication, BoolComponent(false))) + .id(); + + server_app.update(); + client_app.update(); + + let mut server_entity = server_app.world.entity_mut(server_entity); + server_entity.get_mut::().unwrap().0 = true; + server_entity.insert(TableComponent); + + server_app.update(); + client_app.update(); + + let component = client_app + .world + .query_filtered::<&BoolComponent, With>() + .single(&client_app.world); + assert!(component.0, "init messages should contain the update"); +} + #[test] fn update_replication_buffering() { let mut server_app = App::new(); From 6aeed9f02f523d5f633dfbaa941a2a034ac4a891 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 9 Dec 2023 18:03:45 +0200 Subject: [PATCH 30/63] Update docks --- src/lib.rs | 15 +++++++++------ src/server.rs | 2 +- src/server/replication_messages.rs | 4 ++-- .../replication_messages/replication_buffer.rs | 4 ++-- 4 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index e5506463..99b1dd0c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -375,14 +375,17 @@ They rarely used for gameplay systems (since you write the same logic for multiplayer and single-player!), but could be used for server creation / connection systems and corresponding UI. -## Guarantees +## Eventual consistency -Replicon maintains valid state of the world even with packet loss. All events, inserts, -removals and despawns will be applied in the same order as on the server. +All events, inserts, removals and despawns never replicated partially and will be applied in the same order as on the server. -Only component value changes per entity could arrive in any order and partially. But they will -be applied when the tick on which they changed also arrives. So if your component references another entity, -it will be applied only when this entity being spawned. +Component value changes could be applies partially. So clients could receive changes for some entities and miss for other. +But changes are grouped by entity, so client will never receive entity update partially. +Also updates will be applied only when the tick with other data on which they changed also arrives. +So if your component references another entity, it will be applied only when this entity being spawned. + +In other words clients should never assume that it's world state is the same as the server's on any given tick value-wise. +World state on the client is only "eventually consistent" with the server's. ## Limits diff --git a/src/server.rs b/src/server.rs index 4f1179d1..b7600670 100644 --- a/src/server.rs +++ b/src/server.rs @@ -359,7 +359,7 @@ fn collect_changes( /// Collects the component if it has been changed. /// -/// If the component has been changed in this tick, it will be collected into init buffer and last entity tick will be bumped. +/// If the component has been changed in this tick, it will be collected into init buffer. /// Otherwise if the component has been changed since the last entity tick for a client - it will be collected into update message. fn collect_component_change( messages: &mut ReplicationMessages, diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index c1eaff56..67327f35 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -130,7 +130,7 @@ impl InitMessage { self.buffer.write(&replicon_tick) } - /// Trims empty arrays from message and sends it to the specified client. + /// Trims empty arrays from the message and sends it to the specified client. /// /// Does nothing if there is no data to send. fn send(&mut self, server: &mut RenetServer, client_id: ClientId) { @@ -186,7 +186,7 @@ impl UpdateMessage { self.entities.push((self.buffer.data_entity(), data_size)); } - /// Splits message according to `entities` and sends it to the specified client. + /// Splits message according to entities inside it and sends it to the specified client. /// /// Does nothing if there is no data to send. fn send( diff --git a/src/server/replication_messages/replication_buffer.rs b/src/server/replication_messages/replication_buffer.rs index 100adeee..09f4db70 100644 --- a/src/server/replication_messages/replication_buffer.rs +++ b/src/server/replication_messages/replication_buffer.rs @@ -181,7 +181,7 @@ impl ReplicationBuffer { /// Data can contain components with their IDs or IDs only. /// Length will be increased automatically after writing data. /// Entity will be written lazily after first data write. - /// Can be called inside and outside of the array. + /// Can be called inside and outside of an array. /// See also [`Self::end_entity_data`], [`Self::write_component`] /// and [`Self::write_component_id`]. pub(crate) fn start_entity_data(&mut self, entity: Entity) { @@ -273,7 +273,7 @@ impl ReplicationBuffer { Ok(()) } - /// Removes entity data elements from `other` copies it. + /// Removes entity data elements from `other` and copies it. /// /// Ends entity data for `other`. /// See also [`Self::start_entity_data`] and [`Self::end_entity_data`]. From ccf9c9c3ea5dd8225659044dc05d275468589263 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Mon, 11 Dec 2023 22:52:13 +0200 Subject: [PATCH 31/63] Compare ticks before acknowledgment --- src/server.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/server.rs b/src/server.rs index b7600670..6991b69b 100644 --- a/src/server.rs +++ b/src/server.rs @@ -12,7 +12,7 @@ use bevy::{ prelude::*, ptr::Ptr, time::common_conditions::on_timer, - utils::{EntityHashMap, HashMap}, + utils::{hashbrown::hash_map::Entry, EntityHashMap, HashMap}, }; use bevy_renet::{ renet::{ClientId, RenetClient, RenetServer, ServerEvent}, @@ -103,6 +103,7 @@ impl ServerPlugin { } fn acks_receiving_system( + change_tick: SystemChangeTick, mut server: ResMut, mut clients_info: ResMut, ) { @@ -123,7 +124,18 @@ impl ServerPlugin { }; for entity in entities { - client_info.ticks.insert(entity, tick); + match client_info.ticks.entry(entity) { + Entry::Occupied(mut entry) => { + // Received tick could be outdated because we bump it + // if we detect any insertion on the entity in `collect_changes`. + if !entry.get().is_newer_than(tick, change_tick.this_run()) { + *entry.get_mut() = tick; + } + } + Entry::Vacant(entry) => { + entry.insert(tick); + } + } } trace!( "client {} acknowledged an update with {tick:?}", From 0c5c3e7c73d4166fcab510527aedc08d03c50d2e Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Mon, 11 Dec 2023 23:16:22 +0200 Subject: [PATCH 32/63] Simplify logic --- src/server.rs | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/src/server.rs b/src/server.rs index 6991b69b..908320df 100644 --- a/src/server.rs +++ b/src/server.rs @@ -12,7 +12,7 @@ use bevy::{ prelude::*, ptr::Ptr, time::common_conditions::on_timer, - utils::{hashbrown::hash_map::Entry, EntityHashMap, HashMap}, + utils::{EntityHashMap, HashMap}, }; use bevy_renet::{ renet::{ClientId, RenetClient, RenetServer, ServerEvent}, @@ -124,17 +124,15 @@ impl ServerPlugin { }; for entity in entities { - match client_info.ticks.entry(entity) { - Entry::Occupied(mut entry) => { - // Received tick could be outdated because we bump it - // if we detect any insertion on the entity in `collect_changes`. - if !entry.get().is_newer_than(tick, change_tick.this_run()) { - *entry.get_mut() = tick; - } - } - Entry::Vacant(entry) => { - entry.insert(tick); - } + let last_tick = client_info + .ticks + .get_mut(&entity) + .expect("ticks should be added on insertion"); + + // Received tick could be outdated because we bump it + // if we detect any insertion on the entity in `collect_changes`. + if !last_tick.is_newer_than(tick, change_tick.this_run()) { + *last_tick = tick; } } trace!( From 6e440e24390718a90fe9103fce241dce368e767c Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Mon, 11 Dec 2023 23:18:17 +0200 Subject: [PATCH 33/63] Rephrase panic message --- src/server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.rs b/src/server.rs index 908320df..04342b0e 100644 --- a/src/server.rs +++ b/src/server.rs @@ -127,7 +127,7 @@ impl ServerPlugin { let last_tick = client_info .ticks .get_mut(&entity) - .expect("ticks should be added on insertion"); + .expect("tick should be inserted on any component insertion"); // Received tick could be outdated because we bump it // if we detect any insertion on the entity in `collect_changes`. From 169296dfb8f0f1b4788a0572c996af599988722b Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 13 Dec 2023 22:23:48 +0200 Subject: [PATCH 34/63] Apply suggested docs --- src/client.rs | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/client.rs b/src/client.rs index 03bedfc2..98003b54 100644 --- a/src/client.rs +++ b/src/client.rs @@ -50,15 +50,18 @@ impl Plugin for ClientPlugin { impl ClientPlugin { /// Receives and applies replication messages from the server. /// - /// Init messages applied first to ensure the valid state. + /// Tick init messages are sent over the [`ReplicationChannel::Reliable`] and are applied first to ensure valid state + /// for entity updates. /// - /// Then update messages will be applied only if an init message with their tick has arrived. - /// If an update message received before the required init message, then they will be buffered until then. - /// Since they could arrive in any order, entities will be updated only if the received update requires a more recent tick. + /// Entity update messages are sent over [`ReplicationChannel::Unreliable`], which means they may appear + /// ahead-of or behind init messages from the same server tick. An update will only be applied if its + /// change tick has already appeared in an init message, otherwise it will be buffered while waiting. + /// Since entity updates can arrive in any order, updates will only be applied if they correspond to a more + /// recent server tick than the last acked server tick for each entity. /// - /// And then the buffered messages from the last run are processed. + /// Buffered entity update messages are processed last. /// - /// Sends acknowledgments back for update messages. + /// Acknowledgments for received entity update messages are sent back to the server. /// /// See also [`ReplicationMessages`](crate::server::replication_messages::ReplicationMessages). pub(super) fn replication_receiving_system( @@ -369,7 +372,9 @@ fn apply_despawns( Ok(()) } -/// Deserializes replicated components of `components_kind` and applies them to the `world`. +/// Deserializes replicated component updates and applies them to the `world`. +/// +/// Consumes all remaining bytes in the cursor. fn apply_update_components( cursor: &mut Cursor<&[u8]>, world: &mut World, @@ -551,8 +556,7 @@ impl Mapper for ClientMapper<'_> { #[derive(Default, Deref, DerefMut, Resource)] pub(super) struct ServerEntityTicks(EntityHashMap); -/// Caches buffer with deserialized ticks that was received earlier -/// then the required init message with this tick. +/// Caches a partially-deserialized entity update message that is waiting for its tick to appear in an init message. /// /// See also [`crate::server::replication_messages::UpdateMessage`]. pub(super) struct BufferedUpdate { From ab3387b8aef7af067d088c1611cafeca9bdec42e Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 13 Dec 2023 22:23:56 +0200 Subject: [PATCH 35/63] Use while instead of loop --- src/client.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/client.rs b/src/client.rs index 98003b54..d7aaf1ef 100644 --- a/src/client.rs +++ b/src/client.rs @@ -384,7 +384,8 @@ fn apply_update_components( replication_rules: &ReplicationRules, message_tick: RepliconTick, ) -> bincode::Result<()> { - loop { + let len = cursor.get_ref().len() as u64; + while cursor.position() < len { let entity = deserialize_entity(cursor)?; let mut entity = entity_map .get_by_server(world, entity) @@ -408,10 +409,6 @@ fn apply_update_components( let replication_info = unsafe { replication_rules.get_info_unchecked(replication_id) }; (replication_info.deserialize)(&mut entity, entity_map, cursor, message_tick)? } - - if cursor.position() as usize == cursor.get_ref().len() { - break; - } } Ok(()) From 1b2c7c483f505ee5a97830f046a2115977a7c978 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 13 Dec 2023 22:47:54 +0200 Subject: [PATCH 36/63] Move buffered updates into resource to be able to reset them But since it makes indentation level of `replication_receiving_system` even more insane, so I moved it into a separate function. --- src/client.rs | 141 +++++++++++++++++++++++++++++--------------------- 1 file changed, 82 insertions(+), 59 deletions(-) diff --git a/src/client.rs b/src/client.rs index d7aaf1ef..b427b7ec 100644 --- a/src/client.rs +++ b/src/client.rs @@ -25,6 +25,7 @@ impl Plugin for ClientPlugin { app.add_plugins((RenetClientPlugin, NetcodeClientPlugin)) .init_resource::() .init_resource::() + .init_resource::() .configure_sets( PreUpdate, ClientSet::Receive.after(NetcodeClientPlugin::update_system), @@ -64,77 +65,29 @@ impl ClientPlugin { /// Acknowledgments for received entity update messages are sent back to the server. /// /// See also [`ReplicationMessages`](crate::server::replication_messages::ReplicationMessages). - pub(super) fn replication_receiving_system( - world: &mut World, - mut buffered_updates: Local>, - ) -> bincode::Result<()> { + pub(super) fn replication_receiving_system(world: &mut World) -> bincode::Result<()> { world.resource_scope(|world, mut client: Mut| { world.resource_scope(|world, mut entity_map: Mut| { world.resource_scope(|world, mut entity_ticks: Mut| { - world.resource_scope(|world, replication_rules: Mut| { - let mut stats = world.remove_resource::(); - while let Some(message) = - client.receive_message(ReplicationChannel::Reliable) - { - apply_init_message( - &message, - world, - &mut entity_map, - &mut entity_ticks, - stats.as_mut(), - &replication_rules, - )?; - } - - let replicon_tick = *world.resource::(); - while let Some(message) = - client.receive_message(ReplicationChannel::Unreliable) - { - let index = apply_update_message( - message, + world.resource_scope(|world, mut buffered_updates: Mut| { + world.resource_scope(|world, replication_rules: Mut| { + let mut stats = world.remove_resource::(); + apply_replication( world, + &mut client, &mut entity_map, &mut entity_ticks, &mut buffered_updates, stats.as_mut(), &replication_rules, - replicon_tick, )?; - client.send_message( - ReplicationChannel::Reliable, - bincode::serialize(&index)?, - ) - } - - let mut result = Ok(()); - buffered_updates.retain(|update| { - if update.last_change_tick > replicon_tick { - return true; - } - - trace!("applying buffered update message for {replicon_tick:?}"); - if let Err(e) = apply_update_components( - &mut Cursor::new(&*update.message), - world, - &mut entity_map, - &mut entity_ticks, - stats.as_mut(), - &replication_rules, - update.message_tick, - ) { - result = Err(e); + if let Some(stats) = stats { + world.insert_resource(stats); } - false - }); - result?; - - if let Some(stats) = stats { - world.insert_resource(stats); - } - - Ok(()) + Ok(()) + }) }) }) }) @@ -145,13 +98,80 @@ impl ClientPlugin { mut replicon_tick: ResMut, mut entity_map: ResMut, mut entity_ticks: ResMut, + mut buffered_updates: ResMut, ) { *replicon_tick = Default::default(); entity_map.clear(); entity_ticks.clear(); + buffered_updates.clear(); } } +/// Reads all received messages and applies them. +/// +/// Sends acknowledgments for update messages back. +fn apply_replication( + world: &mut World, + client: &mut RenetClient, + entity_map: &mut ServerEntityMap, + entity_ticks: &mut ServerEntityTicks, + buffered_updates: &mut BufferedUpdates, + mut stats: Option<&mut ClientStats>, + replication_rules: &ReplicationRules, +) -> Result<(), Box> { + while let Some(message) = client.receive_message(ReplicationChannel::Reliable) { + apply_init_message( + &message, + world, + entity_map, + entity_ticks, + stats.as_deref_mut(), + replication_rules, + )?; + } + + let replicon_tick = *world.resource::(); + while let Some(message) = client.receive_message(ReplicationChannel::Unreliable) { + let index = apply_update_message( + message, + world, + entity_map, + entity_ticks, + buffered_updates, + stats.as_deref_mut(), + replication_rules, + replicon_tick, + )?; + + client.send_message(ReplicationChannel::Reliable, bincode::serialize(&index)?) + } + + let mut result = Ok(()); + buffered_updates.retain(|update| { + if update.last_change_tick > replicon_tick { + return true; + } + + trace!("applying buffered update message for {replicon_tick:?}"); + if let Err(e) = apply_update_components( + &mut Cursor::new(&*update.message), + world, + entity_map, + entity_ticks, + stats.as_deref_mut(), + replication_rules, + update.message_tick, + ) { + result = Err(e); + } + + false + }); + result?; + + Ok(()) +} + /// Applies [`InitMessage`](crate::server::replication_messages::InitMessage). fn apply_init_message( message: &[u8], @@ -230,7 +250,7 @@ fn apply_update_message( world: &mut World, entity_map: &mut ServerEntityMap, entity_ticks: &mut ServerEntityTicks, - buffered_updates: &mut Vec, + buffered_updates: &mut BufferedUpdates, mut stats: Option<&mut ClientStats>, replication_rules: &ReplicationRules, replicon_tick: RepliconTick, @@ -553,6 +573,9 @@ impl Mapper for ClientMapper<'_> { #[derive(Default, Deref, DerefMut, Resource)] pub(super) struct ServerEntityTicks(EntityHashMap); +#[derive(Default, Deref, DerefMut, Resource)] +pub(super) struct BufferedUpdates(Vec); + /// Caches a partially-deserialized entity update message that is waiting for its tick to appear in an init message. /// /// See also [`crate::server::replication_messages::UpdateMessage`]. From 7a5510f700b924b07e53fdbb327b038f9256fb0f Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 13 Dec 2023 23:22:55 +0200 Subject: [PATCH 37/63] Remove extra map lookup Can't happen with reliable channel. --- src/client.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/client.rs b/src/client.rs index b427b7ec..3ade874b 100644 --- a/src/client.rs +++ b/src/client.rs @@ -302,14 +302,6 @@ fn apply_entity_mappings( let server_entity = deserialize_entity(cursor)?; let client_entity = deserialize_entity(cursor)?; - if let Some(entry) = entity_map.to_client().get(&server_entity) { - // It's possible to receive the same mappings in multiple packets if the server has not - // yet received an ack from the client for the tick when the mapping was created. - if *entry != client_entity { - panic!("received mapping from {server_entity:?} to {client_entity:?}, but already mapped to {entry:?}"); - } - } - if let Some(mut entity) = world.get_entity_mut(client_entity) { debug!("received mapping from {server_entity:?} to {client_entity:?}"); entity.insert(Replication); @@ -480,9 +472,16 @@ pub struct ServerEntityMap { } impl ServerEntityMap { + /// Inserts a server-client pair into the map. + /// + /// # Panics + /// + /// Panics if this mapping is already present. #[inline] pub fn insert(&mut self, server_entity: Entity, client_entity: Entity) { - self.server_to_client.insert(server_entity, client_entity); + if let Some(existing_entity) = self.server_to_client.insert(server_entity, client_entity) { + panic!("mapping {server_entity:?} to {client_entity:?}, but it's already mapped to {existing_entity:?}"); + } self.client_to_server.insert(client_entity, server_entity); } From c21f4549fb7af0ee78a91ccb38465abba75b95eb Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 13 Dec 2023 23:31:47 +0200 Subject: [PATCH 38/63] Use more correct check --- src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 3ade874b..8bed924d 100644 --- a/src/client.rs +++ b/src/client.rs @@ -405,7 +405,7 @@ fn apply_update_components( let Some(entity_tick) = entity_ticks.get_mut(&entity.id()) else { continue; // Update arrived after a despawn from init message. }; - if message_tick < *entity_tick { + if message_tick <= *entity_tick { continue; // Update for this entity is outdated. } *entity_tick = message_tick; From e5f428c35c4eff0501c40a92306e70fe0e1079c4 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Wed, 13 Dec 2023 23:49:51 +0200 Subject: [PATCH 39/63] Add debug assertion I used shorter form because we already printed the tick in `trace!`. --- src/client.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client.rs b/src/client.rs index 8bed924d..1da912d9 100644 --- a/src/client.rs +++ b/src/client.rs @@ -191,6 +191,7 @@ fn apply_init_message( let replicon_tick = bincode::deserialize_from(&mut cursor)?; trace!("applying init message for {replicon_tick:?}"); *world.resource_mut::() = replicon_tick; + debug_assert_eq!(cursor.position(), end_pos, "init message can't be empty"); apply_entity_mappings(&mut cursor, world, entity_map, stats.as_deref_mut())?; if cursor.position() == end_pos { From d961f366bd93f99f8a9a92cc75334d32233ca1bd Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Thu, 14 Dec 2023 00:40:53 +0200 Subject: [PATCH 40/63] Fix wrong condition --- src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 1da912d9..94631988 100644 --- a/src/client.rs +++ b/src/client.rs @@ -191,7 +191,7 @@ fn apply_init_message( let replicon_tick = bincode::deserialize_from(&mut cursor)?; trace!("applying init message for {replicon_tick:?}"); *world.resource_mut::() = replicon_tick; - debug_assert_eq!(cursor.position(), end_pos, "init message can't be empty"); + debug_assert!(cursor.position() < end_pos, "init message can't be empty"); apply_entity_mappings(&mut cursor, world, entity_map, stats.as_deref_mut())?; if cursor.position() == end_pos { From 9e24aba3e1d76f304aeb2e041367c1c783acb1ce Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Fri, 15 Dec 2023 20:43:54 +0200 Subject: [PATCH 41/63] Apply docs suggestion --- src/lib.rs | 14 +++++++----- src/server.rs | 4 ++-- src/server/replication_messages.rs | 22 ++++++++++++------- .../replication_buffer.rs | 4 ++-- 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 99b1dd0c..f59b7efb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -377,14 +377,16 @@ creation / connection systems and corresponding UI. ## Eventual consistency -All events, inserts, removals and despawns never replicated partially and will be applied in the same order as on the server. +All events, inserts, removals and despawns will be applied to clients in the same order as on the server. -Component value changes could be applies partially. So clients could receive changes for some entities and miss for other. -But changes are grouped by entity, so client will never receive entity update partially. -Also updates will be applied only when the tick with other data on which they changed also arrives. -So if your component references another entity, it will be applied only when this entity being spawned. +Entity component updates are grouped by entity, and component groupings may be applied to clients in a different order than on the server. +For example, if two entities are spawned in tick 1 on the server and their components are updated in tick 2, +then the client is guaranteed to see the spawns at the same time, but the component updates may appear in different client ticks. -In other words clients should never assume that it's world state is the same as the server's on any given tick value-wise. +If a component is dependent on other data, updates to the component will only be applied to the client when that data has arrived. +So if your component references another entity, updates to that component will only be applied when the referenced entity has been spawned on the client. + +Clients should never assume their world state is the same as the server's on any given tick value-wise. World state on the client is only "eventually consistent" with the server's. ## Limits diff --git a/src/server.rs b/src/server.rs index 04342b0e..2eb8de5f 100644 --- a/src/server.rs +++ b/src/server.rs @@ -387,7 +387,7 @@ fn collect_component_change( let tick = *client_info .ticks .get(&entity) - .expect("entity should present after adding component"); + .expect("entity should be present after adding component"); if ticks.is_changed(tick, change_tick.this_run()) { update_message.write_component(replication_info, replication_id, component)?; } @@ -528,7 +528,7 @@ impl ClientInfo { } } -/// Contains the last tick on which the world was changed. +/// Contains the last tick in which a replicated entity was spawned, despawned, or gained/lost a component. /// /// It should be included in update messages and server events instead of the current tick /// to avoid needless waiting for the next init message to arrive. diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 67327f35..69ecd1d8 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -11,7 +11,7 @@ use replication_buffer::ReplicationBuffer; /// Accumulates replication messages and sends them to clients. /// -/// Messages serialized and deserialized manually because using an intermediate structure +/// Messages are serialized and deserialized manually because using an intermediate structure /// leads to allocations and according to our benchmarks it's much slower. /// /// Reuses allocated memory from older messages. @@ -26,7 +26,7 @@ impl ReplicationMessages { /// Initializes messages for each client. /// /// Reuses already allocated messages. - /// Creates new messages if number of clients is bigger then the number of allocated messages. + /// Creates new messages if the number of clients is bigger then the number of allocated messages. /// If there are more messages than the number of clients, then the extra messages remain untouched /// and iteration methods will not include them. pub(super) fn prepare( @@ -71,7 +71,11 @@ impl ReplicationMessages { }) } - /// Sends all messages and returns updated last change tick with clients info that was consumed in [`Self::prepare`]. + /// Sends cached messages to clients specified in the last [`Self::prepare`] call. + /// + /// Returns the server's last change tick, which will equal the latest replicon tick if any init + /// messages were sent to clients. If only update messages were sent (or no messages at all) then + /// it will equal the input `last_change_tick`. pub(super) fn send( &mut self, server: &mut RenetServer, @@ -150,13 +154,15 @@ impl InitMessage { } } -/// A reusable message with component updates. +/// A reusable message with replicated component updates. /// /// Contains last change tick, current tick and component updates since the last acknowledged tick for each entity. -/// Requires init message with the same tick as last change tick to be applied to keep the world in valid state. -/// The message will be manually split into packets up to max size that can be applied independently. -/// Splits will happen per-entity to avoid weird behavior of partially changed entity. -/// Sent over [`ReplicationChannel::Unreliable`] channel. +/// Cannot be applied on the client until the init message matching this update message's last change tick +/// has been applied to the client world. +/// The message will be manually split into packets up to max size, and each packet will be applied +/// independently on the client. +/// Message splits only happen per-entity to avoid weird behavior from partial entity updates. +/// Sent over the [`ReplicationChannel::Unreliable`] channel. /// /// See also [Limits](../index.html#limits) #[derive(Deref, DerefMut, Default)] diff --git a/src/server/replication_messages/replication_buffer.rs b/src/server/replication_messages/replication_buffer.rs index 09f4db70..5c7063c8 100644 --- a/src/server/replication_messages/replication_buffer.rs +++ b/src/server/replication_messages/replication_buffer.rs @@ -207,7 +207,7 @@ impl ReplicationBuffer { /// Ends writing entity data by writing its length into the last remembered position. /// /// If the entity data is empty, nothing will be written. - /// Increases array length if writing is done inside the array. + /// Increases array length if writing is done inside an array. /// See also [`Self::start_array`], [`Self::write_component`] and /// [`Self::write_component_id`]. pub(crate) fn end_entity_data(&mut self) -> bincode::Result<()> { @@ -294,7 +294,7 @@ impl ReplicationBuffer { /// Crops empty arrays at the end. /// /// Should only be called after all arrays have been written, because - /// removed array somewhere the middle cannot be detected during deserialization. + /// arrays removed somewhere the middle cannot be detected during deserialization. pub(super) fn trim_empty_arrays(&mut self) { debug_assert!(!self.inside_array); debug_assert_eq!(self.array_len, 0); From b26297717e6065053fccc149ceba61c248176c62 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Fri, 15 Dec 2023 20:44:03 +0200 Subject: [PATCH 42/63] Get messages for fist client instead of last --- src/server/replication_messages.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 69ecd1d8..2a817298 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -83,7 +83,7 @@ impl ReplicationMessages { replicon_tick: RepliconTick, tick: Tick, ) -> bincode::Result<(LastChangeTick, Vec)> { - if let Some((init_message, _)) = self.data.last() { + if let Some((init_message, _)) = self.data.first() { if init_message.arrays_with_data() != 0 { last_change_tick.0 = replicon_tick; } From a706c4bac5ce8fa883d67138609f6c88ef6cadb7 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Fri, 15 Dec 2023 20:46:39 +0200 Subject: [PATCH 43/63] Move replication buffer into a server submodule --- src/server.rs | 1 + .../replication_buffer.rs | 22 +++++++++---------- src/server/replication_messages.rs | 5 +---- 3 files changed, 13 insertions(+), 15 deletions(-) rename src/server/{replication_messages => }/replication_buffer.rs (95%) diff --git a/src/server.rs b/src/server.rs index 2eb8de5f..4d3457e5 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,3 +1,4 @@ +pub(super) mod replication_buffer; pub(super) mod replication_messages; use std::{mem, time::Duration}; diff --git a/src/server/replication_messages/replication_buffer.rs b/src/server/replication_buffer.rs similarity index 95% rename from src/server/replication_messages/replication_buffer.rs rename to src/server/replication_buffer.rs index 5c7063c8..930aaf06 100644 --- a/src/server/replication_messages/replication_buffer.rs +++ b/src/server/replication_buffer.rs @@ -14,7 +14,7 @@ use crate::{ }; /// A reusable buffer with replicated data. -pub(crate) struct ReplicationBuffer { +pub(super) struct ReplicationBuffer { /// Serialized data. cursor: Cursor>, @@ -69,7 +69,7 @@ impl ReplicationBuffer { /// Returns length of the current entity data. /// /// See also [`Self::start_entity_data`] and [`Self::end_entity_data`]. - pub(crate) fn entity_data_len(&self) -> u8 { + pub(super) fn entity_data_len(&self) -> u8 { self.entity_data_len } @@ -105,7 +105,7 @@ impl ReplicationBuffer { /// Arrays can contain entity data or despawns inside. /// Length will be increased automatically after writing data. /// See also [`Self::end_array`], [`Self::write_client_mapping`], [`Self::write_entity`] and [`Self::start_entity_data`]. - pub(crate) fn start_array(&mut self) { + pub(super) fn start_array(&mut self) { debug_assert_eq!(self.array_len, 0); debug_assert!(!self.inside_array); @@ -118,7 +118,7 @@ impl ReplicationBuffer { /// Ends writing array by writing its length into the last remembered position. /// /// See also [`Self::start_array`]. - pub(crate) fn end_array(&mut self) -> bincode::Result<()> { + pub(super) fn end_array(&mut self) -> bincode::Result<()> { debug_assert!(self.inside_array); if self.array_len != 0 { @@ -146,7 +146,7 @@ impl ReplicationBuffer { /// Should be called only inside array. /// Increases array length by 1. /// See also [`Self::start_array`]. - pub(crate) fn write_client_mapping(&mut self, mapping: &ClientMapping) -> bincode::Result<()> { + pub(super) fn write_client_mapping(&mut self, mapping: &ClientMapping) -> bincode::Result<()> { debug_assert!(self.inside_array); serialize_entity(&mut self.cursor, mapping.server_entity)?; @@ -164,7 +164,7 @@ impl ReplicationBuffer { /// Should be called only inside array. /// Increases array length by 1. /// See also [`Self::start_array`]. - pub(crate) fn write_entity(&mut self, entity: Entity) -> bincode::Result<()> { + pub(super) fn write_entity(&mut self, entity: Entity) -> bincode::Result<()> { debug_assert!(self.inside_array); serialize_entity(&mut self.cursor, entity)?; @@ -184,7 +184,7 @@ impl ReplicationBuffer { /// Can be called inside and outside of an array. /// See also [`Self::end_entity_data`], [`Self::write_component`] /// and [`Self::write_component_id`]. - pub(crate) fn start_entity_data(&mut self, entity: Entity) { + pub(super) fn start_entity_data(&mut self, entity: Entity) { debug_assert_eq!(self.entity_data_len, 0); self.data_entity = entity; @@ -210,7 +210,7 @@ impl ReplicationBuffer { /// Increases array length if writing is done inside an array. /// See also [`Self::start_array`], [`Self::write_component`] and /// [`Self::write_component_id`]. - pub(crate) fn end_entity_data(&mut self) -> bincode::Result<()> { + pub(super) fn end_entity_data(&mut self) -> bincode::Result<()> { if self.entity_data_len != 0 { let previous_pos = self.cursor.position(); self.cursor.set_position(self.entity_data_len_pos); @@ -237,7 +237,7 @@ impl ReplicationBuffer { /// Should be called only inside entity data. /// Increases entity data length by 1. /// See also [`Self::start_entity_data`]. - pub(crate) fn write_component( + pub(super) fn write_component( &mut self, replication_info: &ReplicationInfo, replication_id: ReplicationId, @@ -259,7 +259,7 @@ impl ReplicationBuffer { /// Should be called only inside entity data. /// Increases entity data length by 1. /// See also [`Self::start_entity_data`]. - pub(crate) fn write_replication_id( + pub(super) fn write_replication_id( &mut self, replication_id: ReplicationId, ) -> bincode::Result<()> { @@ -277,7 +277,7 @@ impl ReplicationBuffer { /// /// Ends entity data for `other`. /// See also [`Self::start_entity_data`] and [`Self::end_entity_data`]. - pub(crate) fn take_entity_data(&mut self, other: &mut Self) { + pub(super) fn take_entity_data(&mut self, other: &mut Self) { if other.entity_data_len != 0 { let slice = other.as_slice(); let offset = diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 2a817298..0a13c143 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -1,13 +1,10 @@ -pub(super) mod replication_buffer; - use std::mem; use bevy::{ecs::component::Tick, prelude::*}; use bevy_renet::renet::{Bytes, ClientId, RenetServer}; -use super::{ClientInfo, LastChangeTick}; +use super::{replication_buffer::ReplicationBuffer, ClientInfo, LastChangeTick}; use crate::replicon_core::{replicon_tick::RepliconTick, ReplicationChannel}; -use replication_buffer::ReplicationBuffer; /// Accumulates replication messages and sends them to clients. /// From 65240e35a1655af2ab3cc4245ee9725a21beba22 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Fri, 15 Dec 2023 21:22:48 +0200 Subject: [PATCH 44/63] Improve group strategy and test it properly --- src/server/replication_messages.rs | 12 +++-- tests/replication.rs | 83 +++++++++++++++++++++++++++++- 2 files changed, 90 insertions(+), 5 deletions(-) diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 0a13c143..9997cfa7 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -215,7 +215,14 @@ impl UpdateMessage { let mut message_size = 0; for &(entity, data_size) in &self.entities { const MAX_PACKET_SIZE: usize = 1200; // https://github.com/lucaspoffo/renet/blob/acee8b470e34c70d35700d96c00fb233d9cf6919/renet/src/packet.rs#L7 - if message_size + data_size + header.len() > MAX_PACKET_SIZE { + + // Pack small messages ordered after large messages into the large message's packet ('skip over' strategy). + if message_size == 0 + || ((message_size + header.len()) % MAX_PACKET_SIZE) + 1 <= data_size + { + entities.push(entity); + message_size += data_size; + } else { let (message, remaining) = slice.split_at(message_size); slice = remaining; message_size = data_size; @@ -230,9 +237,6 @@ impl UpdateMessage { ); entities.clear(); - } else { - entities.push(entity); - message_size += data_size; } } diff --git a/tests/replication.rs b/tests/replication.rs index 887ac437..c0cbe847 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -214,6 +214,84 @@ fn update_replication() { common::connect(&mut server_app, &mut client_app); + let server_entity = server_app + .world + .spawn((Replication, BoolComponent(false))) + .id(); + + server_app.update(); + client_app.update(); + + let mut component = server_app + .world + .get_mut::(server_entity) + .unwrap(); + component.0 = true; + + server_app.update(); + client_app.update(); + + let component = client_app + .world + .query::<&BoolComponent>() + .single(&client_app.world); + assert!(component.0); +} + +#[test] +fn big_entity_update_replication() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )) + .replicate::(); + } + + common::connect(&mut server_app, &mut client_app); + + let server_entity = server_app + .world + .spawn((Replication, VecComponent::default())) + .id(); + + server_app.update(); + client_app.update(); + + // To exceed packed size. + const BIG_DATA: &[u8] = &[0; 1200]; + let mut component = server_app + .world + .get_mut::(server_entity) + .unwrap(); + component.0 = BIG_DATA.to_vec(); + + server_app.update(); + client_app.update(); + + let component = client_app + .world + .query::<&VecComponent>() + .single(&client_app.world); + assert_eq!(component.0, BIG_DATA); +} + +#[test] +fn many_entities_update_replication() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )) + .replicate::(); + } + + common::connect(&mut server_app, &mut client_app); + // Spawn many entities to cover message splitting. const ENTITIES_COUNT: u32 = 300; server_app @@ -536,9 +614,12 @@ struct NonReplicatingComponent; #[derive(Component, Deserialize, Serialize)] struct IgnoredComponent; -#[derive(Component, Clone, Copy, Serialize, Deserialize)] +#[derive(Clone, Component, Copy, Deserialize, Serialize)] struct BoolComponent(bool); +#[derive(Component, Default, Deserialize, Serialize)] +struct VecComponent(Vec); + #[derive(Component, Default, Deserialize, Reflect, Serialize)] #[reflect(Component)] struct ReflectedComponent; From 16e3c18a6639a51f6b06cfcff6441099bc479b68 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Fri, 15 Dec 2023 21:31:58 +0200 Subject: [PATCH 45/63] Add `is_sendable` helpers --- src/server/replication_messages.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 9997cfa7..9bf63267 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -81,7 +81,7 @@ impl ReplicationMessages { tick: Tick, ) -> bincode::Result<(LastChangeTick, Vec)> { if let Some((init_message, _)) = self.data.first() { - if init_message.arrays_with_data() != 0 { + if init_message.is_sendable() { last_change_tick.0 = replicon_tick; } } @@ -131,11 +131,16 @@ impl InitMessage { self.buffer.write(&replicon_tick) } + /// Returns `true` is message contains any non-empty arrays. + fn is_sendable(&self) -> bool { + self.buffer.arrays_with_data() != 0 + } + /// Trims empty arrays from the message and sends it to the specified client. /// /// Does nothing if there is no data to send. fn send(&mut self, server: &mut RenetServer, client_id: ClientId) { - if self.buffer.arrays_with_data() == 0 { + if !self.is_sendable() { trace!("no init data to send for client {client_id}"); return; } @@ -189,6 +194,11 @@ impl UpdateMessage { self.entities.push((self.buffer.data_entity(), data_size)); } + /// Returns `true` is message contains any written data. + fn is_sendable(&self) -> bool { + !self.buffer.as_slice().is_empty() + } + /// Splits message according to entities inside it and sends it to the specified client. /// /// Does nothing if there is no data to send. @@ -200,8 +210,7 @@ impl UpdateMessage { replicon_tick: RepliconTick, tick: Tick, ) -> bincode::Result<()> { - let mut slice = self.buffer.as_slice(); - if slice.is_empty() { + if !self.is_sendable() { trace!("no updates to send for client {}", client_info.id); return Ok(()); } @@ -211,6 +220,7 @@ impl UpdateMessage { let mut header = [0; TICKS_SIZE + mem::size_of::()]; bincode::serialize_into(&mut header[..], &(*last_change_tick, replicon_tick))?; + let mut slice = self.buffer.as_slice(); let mut entities = Vec::new(); let mut message_size = 0; for &(entity, data_size) in &self.entities { From ded251ec38eea3f66426ca56bbd2a25d77166c4f Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Fri, 15 Dec 2023 21:54:49 +0200 Subject: [PATCH 46/63] Apply clippy suggestion and update description --- src/server/replication_messages.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 9bf63267..fcb68bf5 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -226,10 +226,8 @@ impl UpdateMessage { for &(entity, data_size) in &self.entities { const MAX_PACKET_SIZE: usize = 1200; // https://github.com/lucaspoffo/renet/blob/acee8b470e34c70d35700d96c00fb233d9cf6919/renet/src/packet.rs#L7 - // Pack small messages ordered after large messages into the large message's packet ('skip over' strategy). - if message_size == 0 - || ((message_size + header.len()) % MAX_PACKET_SIZE) + 1 <= data_size - { + // Pack small messages ordered after large messages into the large message's packet ('pack back' strategy). + if message_size == 0 || ((message_size + header.len()) % MAX_PACKET_SIZE) < data_size { entities.push(entity); message_size += data_size; } else { From d0a24598f25066d823dd85aea4ebe3aaa00a3f1e Mon Sep 17 00:00:00 2001 From: koe Date: Fri, 15 Dec 2023 23:38:02 -0600 Subject: [PATCH 47/63] bug fix: client should skip component updates that aren't needed --- src/client.rs | 50 ++++++++++++++++++++------------ src/lib.rs | 6 ++-- src/server/replication_buffer.rs | 14 +++++---- tests/replication.rs | 2 +- 4 files changed, 44 insertions(+), 28 deletions(-) diff --git a/src/client.rs b/src/client.rs index 94631988..5388c088 100644 --- a/src/client.rs +++ b/src/client.rs @@ -330,15 +330,13 @@ fn apply_init_components( let entities_count: u16 = bincode::deserialize_from(&mut *cursor)?; for _ in 0..entities_count { let entity = deserialize_entity(cursor)?; + let update_bytes: u16 = bincode::deserialize_from(&mut *cursor)?; let mut entity = entity_map.get_by_server_or_spawn(world, entity); entity_ticks.insert(entity.id(), replicon_tick); - let components_count: u8 = bincode::deserialize_from(&mut *cursor)?; - if let Some(stats) = &mut stats { - stats.entities_changed += 1; - stats.components_changed += components_count as u32; - } - for _ in 0..components_count { + let end_pos = cursor.position() + update_bytes as u64; + let mut components_count = 0u32; + while cursor.position() < end_pos { let replication_id = DefaultOptions::new().deserialize_from(&mut *cursor)?; // SAFETY: server and client have identical `ReplicationRules` and server always sends valid IDs. let replication_info = unsafe { replication_rules.get_info_unchecked(replication_id) }; @@ -348,6 +346,11 @@ fn apply_init_components( } ComponentsKind::Removal => (replication_info.remove)(&mut entity, replicon_tick), } + components_count += 1; + } + if let Some(stats) = &mut stats { + stats.entities_changed += 1; + stats.components_changed += components_count; } } @@ -399,28 +402,37 @@ fn apply_update_components( ) -> bincode::Result<()> { let len = cursor.get_ref().len() as u64; while cursor.position() < len { - let entity = deserialize_entity(cursor)?; - let mut entity = entity_map - .get_by_server(world, entity) - .expect("updates should be applied only on spawned entities"); + let server_entity = deserialize_entity(cursor)?; + let update_bytes: u16 = bincode::deserialize_from(&mut *cursor)?; + let Some(mut entity) = entity_map.get_by_server(world, server_entity) else { + debug!("ignoring update received for unknown server entity {server_entity:?}"); + cursor.set_position(cursor.position() + update_bytes as u64); + continue; + }; let Some(entity_tick) = entity_ticks.get_mut(&entity.id()) else { - continue; // Update arrived after a despawn from init message. + error!("replicon tick missing for client entity {:?} mapped to server entity {server_entity:?}", entity.id()); + cursor.set_position(cursor.position() + update_bytes as u64); + continue; }; if message_tick <= *entity_tick { - continue; // Update for this entity is outdated. + trace!("ignoring outdated update for client entity {:?} mapped to server entity {server_entity:?}", entity.id()); + cursor.set_position(cursor.position() + update_bytes as u64); + continue; } *entity_tick = message_tick; - let components_count: u8 = bincode::deserialize_from(&mut *cursor)?; - if let Some(stats) = &mut stats { - stats.entities_changed += 1; - stats.components_changed += components_count as u32; - } - for _ in 0..components_count { + let end_pos = cursor.position() + update_bytes as u64; + let mut components_count = 0u32; + while cursor.position() < end_pos { let replication_id = DefaultOptions::new().deserialize_from(&mut *cursor)?; // SAFETY: server and client have identical `ReplicationRules` and server always sends valid IDs. let replication_info = unsafe { replication_rules.get_info_unchecked(replication_id) }; - (replication_info.deserialize)(&mut entity, entity_map, cursor, message_tick)? + (replication_info.deserialize)(&mut entity, entity_map, cursor, message_tick)?; + components_count += 1; + } + if let Some(stats) = &mut stats { + stats.entities_changed += 1; + stats.components_changed += components_count; } } diff --git a/src/lib.rs b/src/lib.rs index f59b7efb..4ce92507 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -393,9 +393,9 @@ World state on the client is only "eventually consistent" with the server's. To reduce packet size there are the following limits per replication update: -- Up to [`u16::MAX`] entities that have added components with up to [`u8::MAX`] such components. -- Up to [`u16::MAX`] entities that have changed components with up to [`u8::MAX`] such components. -- Up to [`u16::MAX`] entities that have removed components with up to [`u8::MAX`] such components. +- Up to [`u16::MAX`] entities that have added components with up to [`u16::MAX`] bytes of component data. +- Up to [`u16::MAX`] entities that have changed components with up to [`u16::MAX`] bytes of component data. +- Up to [`u16::MAX`] entities that have removed components with up to [`u16::MAX`] bytes of component data. - Up to [`u16::MAX`] entities that were despawned. */ diff --git a/src/server/replication_buffer.rs b/src/server/replication_buffer.rs index 930aaf06..26114c62 100644 --- a/src/server/replication_buffer.rs +++ b/src/server/replication_buffer.rs @@ -39,8 +39,8 @@ pub(super) struct ReplicationBuffer { /// Position of entity data length from last call of [`Self::write_data_entity`]. entity_data_len_pos: u64, - /// Length of the data for entity that updated automatically after writing data. - entity_data_len: u8, + /// Length in bytes of the component data stored for the currently-being-written entity. + entity_data_len: u16, /// Entity from last call of [`Self::start_entity_data`]. data_entity: Entity, @@ -66,10 +66,10 @@ impl ReplicationBuffer { self.entity_data_pos } - /// Returns length of the current entity data. + /// Returns length in bytes of the current entity data. /// /// See also [`Self::start_entity_data`] and [`Self::end_entity_data`]. - pub(super) fn entity_data_len(&self) -> u8 { + pub(super) fn entity_data_len(&self) -> u16 { self.entity_data_len } @@ -247,9 +247,13 @@ impl ReplicationBuffer { self.write_data_entity()?; } + let old_pos = self.cursor.position(); DefaultOptions::new().serialize_into(&mut self.cursor, &replication_id)?; (replication_info.serialize)(ptr, &mut self.cursor)?; - self.entity_data_len += 1; + let component_length = self.cursor.position() - old_pos; + debug_assert!(component_length > 0); + debug_assert!(component_length <= u16::MAX as u64); + self.entity_data_len += component_length as u16; Ok(()) } diff --git a/tests/replication.rs b/tests/replication.rs index c0cbe847..cf082686 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -589,7 +589,7 @@ fn diagnostics() { assert_eq!(stats.mappings, 1); assert_eq!(stats.despawns, 1); assert_eq!(stats.packets, 2); - assert_eq!(stats.bytes, 31); + assert_eq!(stats.bytes, 33); } #[derive(Component, Deserialize, Serialize)] From 9268d999b583f18db49d84a49c4fa89d8d6954c6 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 12:15:58 +0200 Subject: [PATCH 48/63] Refactor component writing - Use checked conversion as in other places. - Use naming as in other places (I don't have a preference, I just prefer consistency). --- src/server/replication_buffer.rs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/server/replication_buffer.rs b/src/server/replication_buffer.rs index 26114c62..2352a412 100644 --- a/src/server/replication_buffer.rs +++ b/src/server/replication_buffer.rs @@ -247,13 +247,17 @@ impl ReplicationBuffer { self.write_data_entity()?; } - let old_pos = self.cursor.position(); + let previous_pos = self.cursor.position(); DefaultOptions::new().serialize_into(&mut self.cursor, &replication_id)?; (replication_info.serialize)(ptr, &mut self.cursor)?; - let component_length = self.cursor.position() - old_pos; - debug_assert!(component_length > 0); - debug_assert!(component_length <= u16::MAX as u64); - self.entity_data_len += component_length as u16; + + let component_len = (self.cursor.position() - previous_pos) + .try_into() + .map_err(|_| bincode::ErrorKind::SizeLimit)?; + self.entity_data_len = self + .entity_data_len + .checked_add(component_len) + .ok_or(bincode::ErrorKind::SizeLimit)?; Ok(()) } From e3966933289018f901a545a32e3eed7090d89955 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 12:18:09 +0200 Subject: [PATCH 49/63] Refactor applying on client - Use shorter trace messages. - Panic if tick is not found, I can't happen. --- src/client.rs | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/client.rs b/src/client.rs index 5388c088..86be8cb6 100644 --- a/src/client.rs +++ b/src/client.rs @@ -330,11 +330,11 @@ fn apply_init_components( let entities_count: u16 = bincode::deserialize_from(&mut *cursor)?; for _ in 0..entities_count { let entity = deserialize_entity(cursor)?; - let update_bytes: u16 = bincode::deserialize_from(&mut *cursor)?; + let data_len: u16 = bincode::deserialize_from(&mut *cursor)?; let mut entity = entity_map.get_by_server_or_spawn(world, entity); entity_ticks.insert(entity.id(), replicon_tick); - let end_pos = cursor.position() + update_bytes as u64; + let end_pos = cursor.position() + data_len as u64; let mut components_count = 0u32; while cursor.position() < end_pos { let replication_id = DefaultOptions::new().deserialize_from(&mut *cursor)?; @@ -402,26 +402,25 @@ fn apply_update_components( ) -> bincode::Result<()> { let len = cursor.get_ref().len() as u64; while cursor.position() < len { - let server_entity = deserialize_entity(cursor)?; - let update_bytes: u16 = bincode::deserialize_from(&mut *cursor)?; - let Some(mut entity) = entity_map.get_by_server(world, server_entity) else { - debug!("ignoring update received for unknown server entity {server_entity:?}"); - cursor.set_position(cursor.position() + update_bytes as u64); - continue; - }; - let Some(entity_tick) = entity_ticks.get_mut(&entity.id()) else { - error!("replicon tick missing for client entity {:?} mapped to server entity {server_entity:?}", entity.id()); - cursor.set_position(cursor.position() + update_bytes as u64); + let entity = deserialize_entity(cursor)?; + let data_len: u16 = bincode::deserialize_from(&mut *cursor)?; + let Some(mut entity) = entity_map.get_by_server(world, entity) else { + // Update could arrive after a despawn from init message. + debug!("ignoring update received for unknown server's {entity:?}"); + cursor.set_position(cursor.position() + data_len as u64); continue; }; + let entity_tick = entity_ticks + .get_mut(&entity.id()) + .expect("all entities from update should have assigned ticks"); if message_tick <= *entity_tick { - trace!("ignoring outdated update for client entity {:?} mapped to server entity {server_entity:?}", entity.id()); - cursor.set_position(cursor.position() + update_bytes as u64); + trace!("ignoring outdated update for client's {:?}", entity.id()); + cursor.set_position(cursor.position() + data_len as u64); continue; } *entity_tick = message_tick; - let end_pos = cursor.position() + update_bytes as u64; + let end_pos = cursor.position() + data_len as u64; let mut components_count = 0u32; while cursor.position() < end_pos { let replication_id = DefaultOptions::new().deserialize_from(&mut *cursor)?; From e215ba239f8fe5b28186d1b16afec51d0fb52173 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 12:33:25 +0200 Subject: [PATCH 50/63] Add test --- tests/replication.rs | 42 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/tests/replication.rs b/tests/replication.rs index cf082686..b9606db1 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -357,7 +357,47 @@ fn insert_update_replication() { .world .query_filtered::<&BoolComponent, With>() .single(&client_app.world); - assert!(component.0, "init messages should contain the update"); + assert!(component.0); +} + +#[test] +fn despawn_update_replication() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )) + .replicate::() + .replicate::(); + } + + common::connect(&mut server_app, &mut client_app); + + let server_entity = server_app + .world + .spawn((Replication, BoolComponent(false))) + .id(); + + server_app.update(); + client_app.update(); + + let mut component = server_app + .world + .get_mut::(server_entity) + .unwrap(); + component.0 = true; + + // Update without client to send update message. + server_app.update(); + + server_app.world.despawn(server_entity); + + server_app.update(); + client_app.update(); + + assert!(client_app.world.entities().is_empty()); } #[test] From eba336624149baead93fd5b28db824b27d911877 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 13:14:16 +0200 Subject: [PATCH 51/63] Fix message packing --- src/server/replication_messages.rs | 34 ++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index fcb68bf5..3a9a680b 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -224,10 +224,11 @@ impl UpdateMessage { let mut entities = Vec::new(); let mut message_size = 0; for &(entity, data_size) in &self.entities { - const MAX_PACKET_SIZE: usize = 1200; // https://github.com/lucaspoffo/renet/blob/acee8b470e34c70d35700d96c00fb233d9cf6919/renet/src/packet.rs#L7 - - // Pack small messages ordered after large messages into the large message's packet ('pack back' strategy). - if message_size == 0 || ((message_size + header.len()) % MAX_PACKET_SIZE) < data_size { + // Try to pack back first, then try to pack forward. + if message_size == 0 + || can_pack(header.len(), message_size, data_size) + || can_pack(header.len(), data_size, message_size) + { entities.push(entity); message_size += data_size; } else { @@ -262,3 +263,28 @@ impl UpdateMessage { Ok(()) } } + +fn can_pack(header_len: usize, base: usize, add: usize) -> bool { + const MAX_PACKET_SIZE: usize = 1200; // https://github.com/lucaspoffo/renet/blob/acee8b470e34c70d35700d96c00fb233d9cf6919/renet/src/packet.rs#L7 + + let dangling = (base + header_len) % MAX_PACKET_SIZE; + (dangling > 0) && ((dangling + add) <= MAX_PACKET_SIZE) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn packing() { + assert!(can_pack(10, 0, 5)); + assert!(can_pack(10, 0, 1190)); + assert!(!can_pack(10, 0, 1191)); + assert!(!can_pack(10, 0, 3000)); + + assert!(can_pack(10, 1189, 1)); + assert!(!can_pack(10, 1190, 0)); + assert!(!can_pack(10, 1190, 1)); + assert!(!can_pack(10, 1190, 3000)); + } +} From bef9b1a5df471435d88691bcce59938f987764cf Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 13:28:17 +0200 Subject: [PATCH 52/63] Refactor connection handling --- src/server.rs | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/src/server.rs b/src/server.rs index 4d3457e5..29141975 100644 --- a/src/server.rs +++ b/src/server.rs @@ -52,7 +52,7 @@ impl Plugin for ServerPlugin { .configure_sets(PostUpdate, ServerSet::Send.before(RenetSend)) .add_systems( PreUpdate, - (Self::acks_receiving_system, Self::disconnect_cleanup_system) + (Self::acks_receiving_system, Self::handle_connections_system) .in_set(ServerSet::Receive) .run_if(resource_exists::()), ) @@ -103,6 +103,28 @@ impl ServerPlugin { trace!("incremented {replicon_tick:?}"); } + fn handle_connections_system( + mut server_events: EventReader, + mut entity_map: ResMut, + mut clients_info: ResMut, + ) { + for event in server_events.read() { + match *event { + ServerEvent::ClientDisconnected { client_id, .. } => { + entity_map.0.remove(&client_id); + let index = clients_info + .iter() + .position(|info| info.id == client_id) + .expect("clients info should contain all connected clients"); + clients_info.remove(index); + } + ServerEvent::ClientConnected { client_id } => { + clients_info.push(ClientInfo::new(client_id)); + } + } + } + } + fn acks_receiving_system( change_tick: SystemChangeTick, mut server: ResMut, @@ -150,28 +172,6 @@ impl ServerPlugin { } } - fn disconnect_cleanup_system( - mut server_events: EventReader, - mut entity_map: ResMut, - mut clients_info: ResMut, - ) { - for event in server_events.read() { - match *event { - ServerEvent::ClientDisconnected { client_id, .. } => { - entity_map.0.remove(&client_id); - let index = clients_info - .iter() - .position(|info| info.id == client_id) - .expect("clients info should contain all connected clients"); - clients_info.remove(index); - } - ServerEvent::ClientConnected { client_id } => { - clients_info.push(ClientInfo::new(client_id)); - } - } - } - } - /// Collects [`ReplicationMessages`] and sends them. #[allow(clippy::type_complexity)] pub(super) fn replication_sending_system( From 9a5a34915712a0bc31b11e880901ec6f13961703 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 13:46:34 +0200 Subject: [PATCH 53/63] Apply docs suggestions --- src/server.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/server.rs b/src/server.rs index 29141975..719fa7dd 100644 --- a/src/server.rs +++ b/src/server.rs @@ -249,7 +249,8 @@ fn collect_mappings( Ok(()) } -/// Collects component insertions from this tick into init messages and changes into update messages since the last entity tick. +/// Collects component insertions from this tick into init messages, and changes into update messages +/// since the last entity tick. fn collect_changes( messages: &mut ReplicationMessages, world: &World, @@ -370,8 +371,8 @@ fn collect_changes( /// Collects the component if it has been changed. /// -/// If the component has been changed in this tick, it will be collected into init buffer. -/// Otherwise if the component has been changed since the last entity tick for a client - it will be collected into update message. +/// If the component was added since the client's last init message, it will be collected into +/// init buffer. fn collect_component_change( messages: &mut ReplicationMessages, entity: Entity, From 0fde0752fad58535604e49c2887198313ffb94e4 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 16:24:33 +0200 Subject: [PATCH 54/63] Reuse memory for entities --- src/server.rs | 79 ++++++++++++++++++++++++------ src/server/replication_messages.rs | 33 ++++++++----- 2 files changed, 84 insertions(+), 28 deletions(-) diff --git a/src/server.rs b/src/server.rs index 719fa7dd..b667462e 100644 --- a/src/server.rs +++ b/src/server.rs @@ -112,14 +112,10 @@ impl ServerPlugin { match *event { ServerEvent::ClientDisconnected { client_id, .. } => { entity_map.0.remove(&client_id); - let index = clients_info - .iter() - .position(|info| info.id == client_id) - .expect("clients info should contain all connected clients"); - clients_info.remove(index); + clients_info.remove(client_id); } ServerEvent::ClientConnected { client_id } => { - clients_info.push(ClientInfo::new(client_id)); + clients_info.info.push(ClientInfo::new(client_id)); } } } @@ -130,13 +126,18 @@ impl ServerPlugin { mut server: ResMut, mut clients_info: ResMut, ) { - for client_info in clients_info.iter_mut() { + let ClientsInfo { + info, + entity_buffer, + } = &mut *clients_info; + + for client_info in info.iter_mut() { while let Some(message) = server.receive_message(client_info.id, ReplicationChannel::Reliable) { match bincode::deserialize::(&message) { Ok(update_index) => { - let Some((tick, entities)) = + let Some((tick, mut entities)) = client_info.update_entities.remove(&update_index) else { error!( @@ -146,10 +147,10 @@ impl ServerPlugin { continue; }; - for entity in entities { + for entity in &entities { let last_tick = client_info .ticks - .get_mut(&entity) + .get_mut(entity) .expect("tick should be inserted on any component insertion"); // Received tick could be outdated because we bump it @@ -158,6 +159,9 @@ impl ServerPlugin { *last_tick = tick; } } + entities.clear(); + entity_buffer.push(entities); + trace!( "client {} acknowledged an update with {tick:?}", client_info.id @@ -189,8 +193,8 @@ impl ServerPlugin { replication_rules: Res, replicon_tick: Res, ) -> bincode::Result<()> { - let clients_info = mem::take(&mut set.p1().0); // Take ownership to avoid borrowing issues. - messages.prepare(clients_info, *replicon_tick)?; + let info = mem::take(&mut set.p1().info); // Take ownership to avoid borrowing issues. + messages.prepare(info, *replicon_tick)?; collect_mappings(&mut messages, &mut set.p2())?; collect_changes(&mut messages, set.p0(), &change_tick, &replication_rules)?; @@ -203,15 +207,20 @@ impl ServerPlugin { collect_despawns(&mut messages, &mut removed_replication)?; let last_change_tick = *set.p4(); - let (last_change_tick, clients_info) = messages.send( + let mut entity_buffer = mem::take(&mut set.p1().entity_buffer); + let (last_change_tick, info) = messages.send( &mut set.p3(), + &mut entity_buffer, last_change_tick, *replicon_tick, change_tick.this_run(), )?; // Return borrowed data back. - **set.p1() = clients_info; + *set.p1() = ClientsInfo { + info, + entity_buffer, + }; *set.p4() = last_change_tick; Ok(()) @@ -496,8 +505,46 @@ pub enum TickPolicy { } /// Stores meta-information about connected clients. -#[derive(Default, Resource, Deref, DerefMut)] -pub(super) struct ClientsInfo(Vec); +#[derive(Default, Resource)] +pub(super) struct ClientsInfo { + info: Vec, + + /// [`Vec`]'s from acknowledged update indexes from [`ClientInfo`]. + /// + /// All data is cleared before the insertion, used just to reuse allocated capacity. + entity_buffer: Vec>, +} + +impl ClientsInfo { + fn remove(&mut self, client_id: ClientId) { + let index = self + .info + .iter() + .position(|info| info.id == client_id) + .expect("clients info should contain all connected clients"); + let mut client_info = self.info.remove(index); + let old_entities = client_info + .update_entities + .drain() + .map(|(_, (_, mut entities))| { + entities.clear(); + entities + }); + self.entity_buffer.extend(old_entities); + } + + fn clear(&mut self) { + let old_entities = self + .info + .drain(..) + .flat_map(|client_info| client_info.update_entities) + .map(|(_, (_, mut entities))| { + entities.clear(); + entities + }); + self.entity_buffer.extend(old_entities); + } +} pub(super) struct ClientInfo { id: ClientId, diff --git a/src/server/replication_messages.rs b/src/server/replication_messages.rs index 3a9a680b..09028d4a 100644 --- a/src/server/replication_messages.rs +++ b/src/server/replication_messages.rs @@ -14,7 +14,7 @@ use crate::replicon_core::{replicon_tick::RepliconTick, ReplicationChannel}; /// Reuses allocated memory from older messages. #[derive(Default)] pub(crate) struct ReplicationMessages { - clients_info: Vec, + info: Vec, data: Vec<(InitMessage, UpdateMessage)>, clients_count: usize, } @@ -28,14 +28,14 @@ impl ReplicationMessages { /// and iteration methods will not include them. pub(super) fn prepare( &mut self, - clients_info: Vec, + info: Vec, replicon_tick: RepliconTick, ) -> bincode::Result<()> { - self.clients_count = clients_info.len(); + self.clients_count = info.len(); self.data.reserve(self.clients_count); - for index in 0..clients_info.len() { + for index in 0..info.len() { if let Some((init_message, update_message)) = self.data.get_mut(index) { init_message.reset(replicon_tick)?; update_message.reset()?; @@ -45,7 +45,7 @@ impl ReplicationMessages { } } - self.clients_info = clients_info; + self.info = info; Ok(()) } @@ -62,7 +62,7 @@ impl ReplicationMessages { self.data .iter_mut() .take(self.clients_count) - .zip(&mut self.clients_info) + .zip(&mut self.info) .map(|((init_message, update_message), client_info)| { (init_message, update_message, client_info) }) @@ -76,6 +76,7 @@ impl ReplicationMessages { pub(super) fn send( &mut self, server: &mut RenetServer, + entity_buffer: &mut Vec>, mut last_change_tick: LastChangeTick, replicon_tick: RepliconTick, tick: Tick, @@ -90,13 +91,20 @@ impl ReplicationMessages { .data .iter_mut() .take(self.clients_count) - .zip(&mut self.clients_info) + .zip(&mut self.info) { init_message.send(server, client_info.id); - update_message.send(server, client_info, last_change_tick, replicon_tick, tick)?; + update_message.send( + server, + entity_buffer, + client_info, + last_change_tick, + replicon_tick, + tick, + )?; } - Ok((last_change_tick, mem::take(&mut self.clients_info))) + Ok((last_change_tick, mem::take(&mut self.info))) } } @@ -205,6 +213,7 @@ impl UpdateMessage { fn send( &mut self, server: &mut RenetServer, + entity_buffer: &mut Vec>, client_info: &mut ClientInfo, last_change_tick: LastChangeTick, replicon_tick: RepliconTick, @@ -221,7 +230,7 @@ impl UpdateMessage { bincode::serialize_into(&mut header[..], &(*last_change_tick, replicon_tick))?; let mut slice = self.buffer.as_slice(); - let mut entities = Vec::new(); + let mut entities = entity_buffer.pop().unwrap_or_default(); let mut message_size = 0; for &(entity, data_size) in &self.entities { // Try to pack back first, then try to pack forward. @@ -236,7 +245,7 @@ impl UpdateMessage { slice = remaining; message_size = data_size; - let update_index = client_info.register_update(tick, entities.clone()); + let update_index = client_info.register_update(tick, entities); bincode::serialize_into(&mut header[TICKS_SIZE..], &update_index)?; server.send_message( @@ -245,7 +254,7 @@ impl UpdateMessage { Bytes::from_iter(header.into_iter().chain(message.iter().copied())), ); - entities.clear(); + entities = entity_buffer.pop().unwrap_or_default(); } } From 305569bbc149f34b0db5b2f7f6ea148e441e7a46 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 16:47:04 +0200 Subject: [PATCH 55/63] Clarify events despawned entities --- src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index 4ce92507..c1b30ceb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -386,6 +386,8 @@ then the client is guaranteed to see the spawns at the same time, but the compon If a component is dependent on other data, updates to the component will only be applied to the client when that data has arrived. So if your component references another entity, updates to that component will only be applied when the referenced entity has been spawned on the client. +Updates for despawned entities will be discarded automatically, but events could reference despawned entities and users should discard them manually. + Clients should never assume their world state is the same as the server's on any given tick value-wise. World state on the client is only "eventually consistent" with the server's. From d7b27e3bbe2830e38224f4527c8130a9ad682901 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 17:22:09 +0200 Subject: [PATCH 56/63] Fix old entities replication --- src/server.rs | 9 +++++++-- tests/replication.rs | 20 ++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/server.rs b/src/server.rs index b667462e..e9d01292 100644 --- a/src/server.rs +++ b/src/server.rs @@ -371,7 +371,8 @@ fn collect_changes( } } - for (init_message, _) in messages.iter_mut() { + for (init_message, _, client_info) in messages.iter_mut_with_info() { + client_info.just_connected = false; init_message.end_array()?; } @@ -392,7 +393,9 @@ fn collect_component_change( component: Ptr, ) -> bincode::Result<()> { for (init_message, update_message, client_info) in messages.iter_mut_with_info() { - if ticks.is_added(change_tick.last_run(), change_tick.this_run()) { + if client_info.just_connected + || ticks.is_added(change_tick.last_run(), change_tick.this_run()) + { init_message.write_component(replication_info, replication_id, component)?; } else { let tick = *client_info @@ -548,6 +551,7 @@ impl ClientsInfo { pub(super) struct ClientInfo { id: ClientId, + just_connected: bool, ticks: EntityHashMap, update_entities: HashMap)>, next_update_index: u16, @@ -557,6 +561,7 @@ impl ClientInfo { fn new(id: ClientId) -> Self { Self { id, + just_connected: true, ticks: Default::default(), update_entities: Default::default(), next_update_index: Default::default(), diff --git a/tests/replication.rs b/tests/replication.rs index b9606db1..9e541e33 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -545,6 +545,26 @@ fn despawn_replication() { assert!(entity_map.to_server().is_empty()); } +#[test] +fn old_entities_replication() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )) + .replicate::(); + } + + // Spawn an entity before client connected. + server_app.world.spawn((Replication, TableComponent)); + + common::connect(&mut server_app, &mut client_app); + + assert_eq!(client_app.world.entities().len(), 1); +} + #[test] fn replication_into_scene() { let mut app = App::new(); From a06da4e822147abb95ced2d09e22aceb66265293 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 17:24:16 +0200 Subject: [PATCH 57/63] Move update replication tests after init tests --- tests/replication.rs | 222 +++++++++++++++++++++---------------------- 1 file changed, 111 insertions(+), 111 deletions(-) diff --git a/tests/replication.rs b/tests/replication.rs index 9e541e33..5d121a52 100644 --- a/tests/replication.rs +++ b/tests/replication.rs @@ -200,6 +200,117 @@ fn insert_replication() { ); } +#[test] +fn removal_replication() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )) + .replicate::(); + } + + common::connect(&mut server_app, &mut client_app); + + let server_entity = server_app + .world + .spawn((Replication, TableComponent, NonReplicatingComponent)) + .id(); + + server_app.update(); + + server_app + .world + .entity_mut(server_entity) + .remove::(); + + let client_entity = client_app + .world + .spawn((Replication, TableComponent, NonReplicatingComponent)) + .id(); + + client_app + .world + .resource_mut::() + .insert(server_entity, client_entity); + + server_app.update(); + client_app.update(); + + let client_entity = client_app.world.entity(client_entity); + assert!(!client_entity.contains::()); + assert!(client_entity.contains::()); +} + +#[test] +fn despawn_replication() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )); + } + + common::connect(&mut server_app, &mut client_app); + + let server_child_entity = server_app.world.spawn(Replication).id(); + let server_entity = server_app + .world + .spawn(Replication) + .push_children(&[server_child_entity]) + .id(); + + server_app.update(); + + server_app.world.despawn(server_entity); + server_app.world.despawn(server_child_entity); + + let client_child_entity = client_app.world.spawn(Replication).id(); + let client_entity = client_app + .world + .spawn(Replication) + .push_children(&[client_child_entity]) + .id(); + + let mut entity_map = client_app.world.resource_mut::(); + entity_map.insert(server_entity, client_entity); + entity_map.insert(server_child_entity, client_child_entity); + + server_app.update(); + client_app.update(); + + assert!(client_app.world.get_entity(client_entity).is_none()); + assert!(client_app.world.get_entity(client_child_entity).is_none()); + + let entity_map = client_app.world.resource::(); + assert!(entity_map.to_client().is_empty()); + assert!(entity_map.to_server().is_empty()); +} + +#[test] +fn old_entities_replication() { + let mut server_app = App::new(); + let mut client_app = App::new(); + for app in [&mut server_app, &mut client_app] { + app.add_plugins(( + MinimalPlugins, + ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), + )) + .replicate::(); + } + + // Spawn an entity before client connected. + server_app.world.spawn((Replication, TableComponent)); + + common::connect(&mut server_app, &mut client_app); + + assert_eq!(client_app.world.entities().len(), 1); +} + #[test] fn update_replication() { let mut server_app = App::new(); @@ -454,117 +565,6 @@ fn update_replication_buffering() { assert!(component.0, "buffered update should be applied"); } -#[test] -fn removal_replication() { - let mut server_app = App::new(); - let mut client_app = App::new(); - for app in [&mut server_app, &mut client_app] { - app.add_plugins(( - MinimalPlugins, - ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), - )) - .replicate::(); - } - - common::connect(&mut server_app, &mut client_app); - - let server_entity = server_app - .world - .spawn((Replication, TableComponent, NonReplicatingComponent)) - .id(); - - server_app.update(); - - server_app - .world - .entity_mut(server_entity) - .remove::(); - - let client_entity = client_app - .world - .spawn((Replication, TableComponent, NonReplicatingComponent)) - .id(); - - client_app - .world - .resource_mut::() - .insert(server_entity, client_entity); - - server_app.update(); - client_app.update(); - - let client_entity = client_app.world.entity(client_entity); - assert!(!client_entity.contains::()); - assert!(client_entity.contains::()); -} - -#[test] -fn despawn_replication() { - let mut server_app = App::new(); - let mut client_app = App::new(); - for app in [&mut server_app, &mut client_app] { - app.add_plugins(( - MinimalPlugins, - ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), - )); - } - - common::connect(&mut server_app, &mut client_app); - - let server_child_entity = server_app.world.spawn(Replication).id(); - let server_entity = server_app - .world - .spawn(Replication) - .push_children(&[server_child_entity]) - .id(); - - server_app.update(); - - server_app.world.despawn(server_entity); - server_app.world.despawn(server_child_entity); - - let client_child_entity = client_app.world.spawn(Replication).id(); - let client_entity = client_app - .world - .spawn(Replication) - .push_children(&[client_child_entity]) - .id(); - - let mut entity_map = client_app.world.resource_mut::(); - entity_map.insert(server_entity, client_entity); - entity_map.insert(server_child_entity, client_child_entity); - - server_app.update(); - client_app.update(); - - assert!(client_app.world.get_entity(client_entity).is_none()); - assert!(client_app.world.get_entity(client_child_entity).is_none()); - - let entity_map = client_app.world.resource::(); - assert!(entity_map.to_client().is_empty()); - assert!(entity_map.to_server().is_empty()); -} - -#[test] -fn old_entities_replication() { - let mut server_app = App::new(); - let mut client_app = App::new(); - for app in [&mut server_app, &mut client_app] { - app.add_plugins(( - MinimalPlugins, - ReplicationPlugins.set(ServerPlugin::new(TickPolicy::EveryFrame)), - )) - .replicate::(); - } - - // Spawn an entity before client connected. - server_app.world.spawn((Replication, TableComponent)); - - common::connect(&mut server_app, &mut client_app); - - assert_eq!(client_app.world.entities().len(), 1); -} - #[test] fn replication_into_scene() { let mut app = App::new(); From 5e58ac094b5ad0ca7e5590cb66ecaafa35d867ac Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 17:49:41 +0200 Subject: [PATCH 58/63] Move clients info into a separate module Logically better and it contains `next_update_index` that shouldn't be used or changed outside. --- src/server.rs | 77 +-------------------------------- src/server/clients_info.rs | 87 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 75 deletions(-) create mode 100644 src/server/clients_info.rs diff --git a/src/server.rs b/src/server.rs index e9d01292..0d7b28e6 100644 --- a/src/server.rs +++ b/src/server.rs @@ -1,3 +1,4 @@ +pub(super) mod clients_info; pub(super) mod replication_buffer; pub(super) mod replication_messages; @@ -26,6 +27,7 @@ use crate::replicon_core::{ replicon_tick::RepliconTick, ReplicationChannel, }; +use clients_info::{ClientInfo, ClientsInfo}; use replication_messages::ReplicationMessages; pub const SERVER_ID: ClientId = ClientId::from_raw(0); @@ -507,81 +509,6 @@ pub enum TickPolicy { Manual, } -/// Stores meta-information about connected clients. -#[derive(Default, Resource)] -pub(super) struct ClientsInfo { - info: Vec, - - /// [`Vec`]'s from acknowledged update indexes from [`ClientInfo`]. - /// - /// All data is cleared before the insertion, used just to reuse allocated capacity. - entity_buffer: Vec>, -} - -impl ClientsInfo { - fn remove(&mut self, client_id: ClientId) { - let index = self - .info - .iter() - .position(|info| info.id == client_id) - .expect("clients info should contain all connected clients"); - let mut client_info = self.info.remove(index); - let old_entities = client_info - .update_entities - .drain() - .map(|(_, (_, mut entities))| { - entities.clear(); - entities - }); - self.entity_buffer.extend(old_entities); - } - - fn clear(&mut self) { - let old_entities = self - .info - .drain(..) - .flat_map(|client_info| client_info.update_entities) - .map(|(_, (_, mut entities))| { - entities.clear(); - entities - }); - self.entity_buffer.extend(old_entities); - } -} - -pub(super) struct ClientInfo { - id: ClientId, - just_connected: bool, - ticks: EntityHashMap, - update_entities: HashMap)>, - next_update_index: u16, -} - -impl ClientInfo { - fn new(id: ClientId) -> Self { - Self { - id, - just_connected: true, - ticks: Default::default(), - update_entities: Default::default(), - next_update_index: Default::default(), - } - } - - /// Remembers `entities` and `tick` of an update message and returns its index. - /// - /// Used later to acknowledge updated entities. - #[must_use] - fn register_update(&mut self, tick: Tick, entities: Vec) -> u16 { - let update_index = self.next_update_index; - self.update_entities.insert(update_index, (tick, entities)); - - self.next_update_index = self.next_update_index.overflowing_add(1).0; - - update_index - } -} - /// Contains the last tick in which a replicated entity was spawned, despawned, or gained/lost a component. /// /// It should be included in update messages and server events instead of the current tick diff --git a/src/server/clients_info.rs b/src/server/clients_info.rs new file mode 100644 index 00000000..68650db4 --- /dev/null +++ b/src/server/clients_info.rs @@ -0,0 +1,87 @@ +use bevy::{ + ecs::component::Tick, + prelude::*, + utils::{EntityHashMap, HashMap}, +}; +use bevy_renet::renet::ClientId; + +/// Stores meta-information about connected clients. +#[derive(Default, Resource)] +pub(crate) struct ClientsInfo { + pub(super) info: Vec, + + /// [`Vec`]'s from acknowledged update indexes from [`ClientInfo`]. + /// + /// All data is cleared before the insertion, used just to reuse allocated capacity. + pub(super) entity_buffer: Vec>, +} + +impl ClientsInfo { + /// Removes info for the client. + /// + /// Keeps memory from update entities for reuse. + pub(super) fn remove(&mut self, client_id: ClientId) { + let index = self + .info + .iter() + .position(|info| info.id == client_id) + .expect("clients info should contain all connected clients"); + let mut client_info = self.info.remove(index); + let old_entities = client_info + .update_entities + .drain() + .map(|(_, (_, mut entities))| { + entities.clear(); + entities + }); + self.entity_buffer.extend(old_entities); + } + + /// Clears information for all clients. + /// + /// Keeps memory from update entities for reuse. + pub(super) fn clear(&mut self) { + let old_entities = self + .info + .drain(..) + .flat_map(|client_info| client_info.update_entities) + .map(|(_, (_, mut entities))| { + entities.clear(); + entities + }); + self.entity_buffer.extend(old_entities); + } +} + +pub(super) struct ClientInfo { + pub(super) id: ClientId, + pub(super) just_connected: bool, + pub(super) ticks: EntityHashMap, + pub(super) update_entities: HashMap)>, + next_update_index: u16, +} + +impl ClientInfo { + pub(super) fn new(id: ClientId) -> Self { + Self { + id, + just_connected: true, + ticks: Default::default(), + update_entities: Default::default(), + next_update_index: Default::default(), + } + } + + /// Remembers `entities` and `tick` of an update message and returns its index. + /// + /// Used later to acknowledge updated entities. + #[must_use] + pub(super) fn register_update(&mut self, tick: Tick, entities: Vec) -> u16 { + let update_index = self.next_update_index; + self.update_entities.insert(update_index, (tick, entities)); + + self.next_update_index = self.next_update_index.overflowing_add(1).0; + + update_index + } +} From 9becc1425ea6dae88d85536d6f45518abd41ea0e Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sat, 16 Dec 2023 20:40:51 +0200 Subject: [PATCH 59/63] Apply doc suggestion --- src/server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server.rs b/src/server.rs index 0d7b28e6..dfaf3a9c 100644 --- a/src/server.rs +++ b/src/server.rs @@ -239,7 +239,7 @@ impl ServerPlugin { } } -/// Collects and writes any new entity mappings happened in this tick. +/// Collects and writes any new entity mappings that happened in this tick. /// /// On deserialization mappings should be processed first, so all referenced entities after it will behave correctly. fn collect_mappings( From 4612fea7d0f70c853af49c9b4935c3db35db7957 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sun, 17 Dec 2023 00:33:38 +0200 Subject: [PATCH 60/63] Make systems dependent --- src/server.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/server.rs b/src/server.rs index dfaf3a9c..7e09ca59 100644 --- a/src/server.rs +++ b/src/server.rs @@ -54,7 +54,8 @@ impl Plugin for ServerPlugin { .configure_sets(PostUpdate, ServerSet::Send.before(RenetSend)) .add_systems( PreUpdate, - (Self::acks_receiving_system, Self::handle_connections_system) + (Self::handle_connections_system, Self::acks_receiving_system) + .chain() .in_set(ServerSet::Receive) .run_if(resource_exists::()), ) From 7f923a380deab31e023fa439ad1f05ff977b74fd Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sun, 17 Dec 2023 00:33:51 +0200 Subject: [PATCH 61/63] Improve docs --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index c1b30ceb..4c328e31 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -386,7 +386,7 @@ then the client is guaranteed to see the spawns at the same time, but the compon If a component is dependent on other data, updates to the component will only be applied to the client when that data has arrived. So if your component references another entity, updates to that component will only be applied when the referenced entity has been spawned on the client. -Updates for despawned entities will be discarded automatically, but events could reference despawned entities and users should discard them manually. +Updates for despawned entities will be discarded automatically, but events or components may reference despawned entities and should be handled with that in mind. Clients should never assume their world state is the same as the server's on any given tick value-wise. World state on the client is only "eventually consistent" with the server's. From 6d2e80b6ac4e229b3597c2f0b3b93e3354e22f29 Mon Sep 17 00:00:00 2001 From: Hennadii Chernyshchyk Date: Sun, 17 Dec 2023 01:48:24 +0200 Subject: [PATCH 62/63] Keep memory for `ClientInfo` --- src/server.rs | 9 +++--- src/server/clients_info.rs | 64 +++++++++++++++++++++++++------------- 2 files changed, 47 insertions(+), 26 deletions(-) diff --git a/src/server.rs b/src/server.rs index 7e09ca59..13d02eba 100644 --- a/src/server.rs +++ b/src/server.rs @@ -118,7 +118,7 @@ impl ServerPlugin { clients_info.remove(client_id); } ServerEvent::ClientConnected { client_id } => { - clients_info.info.push(ClientInfo::new(client_id)); + clients_info.init(client_id); } } } @@ -132,6 +132,7 @@ impl ServerPlugin { let ClientsInfo { info, entity_buffer, + .. } = &mut *clients_info; for client_info in info.iter_mut() { @@ -220,10 +221,8 @@ impl ServerPlugin { )?; // Return borrowed data back. - *set.p1() = ClientsInfo { - info, - entity_buffer, - }; + set.p1().info = info; + set.p1().entity_buffer = entity_buffer; *set.p4() = last_change_tick; Ok(()) diff --git a/src/server/clients_info.rs b/src/server/clients_info.rs index 68650db4..cb5da984 100644 --- a/src/server/clients_info.rs +++ b/src/server/clients_info.rs @@ -12,14 +12,33 @@ pub(crate) struct ClientsInfo { /// [`Vec`]'s from acknowledged update indexes from [`ClientInfo`]. /// - /// All data is cleared before the insertion, used just to reuse allocated capacity. + /// All data is cleared before the insertion. + /// Stored to reuse allocated capacity. pub(super) entity_buffer: Vec>, + + /// Disconnected client's [`ClientsInfo`]. + /// + /// [`ClientInfo::clear`] is used before the insertion. + /// Stored to reuse allocated memory. + info_buffer: Vec, } impl ClientsInfo { + /// Initializes a new [`ClientInfo`] for this client. + pub(super) fn init(&mut self, client_id: ClientId) { + let client_info = if let Some(mut clinet_info) = self.info_buffer.pop() { + clinet_info.id = client_id; + clinet_info + } else { + ClientInfo::new(client_id) + }; + + self.info.push(client_info); + } + /// Removes info for the client. /// - /// Keeps memory from update entities for reuse. + /// Keeps allocated memory. pub(super) fn remove(&mut self, client_id: ClientId) { let index = self .info @@ -27,29 +46,18 @@ impl ClientsInfo { .position(|info| info.id == client_id) .expect("clients info should contain all connected clients"); let mut client_info = self.info.remove(index); - let old_entities = client_info - .update_entities - .drain() - .map(|(_, (_, mut entities))| { - entities.clear(); - entities - }); - self.entity_buffer.extend(old_entities); + self.entity_buffer.extend(client_info.reset()); + self.info_buffer.push(client_info); } /// Clears information for all clients. /// - /// Keeps memory from update entities for reuse. + /// Keeps allocated memory. pub(super) fn clear(&mut self) { - let old_entities = self - .info - .drain(..) - .flat_map(|client_info| client_info.update_entities) - .map(|(_, (_, mut entities))| { - entities.clear(); - entities - }); - self.entity_buffer.extend(old_entities); + for mut client_info in self.info.drain(..) { + self.entity_buffer.extend(client_info.reset()); + self.info_buffer.push(client_info); + } } } @@ -62,7 +70,7 @@ pub(super) struct ClientInfo { } impl ClientInfo { - pub(super) fn new(id: ClientId) -> Self { + fn new(id: ClientId) -> Self { Self { id, just_connected: true, @@ -72,6 +80,20 @@ impl ClientInfo { } } + /// Resets all data except `id` and drains all [`Vec`]s from update entities mapping. + /// + /// Drained data will be cleared. + /// Keeps allocated memory. + fn reset(&mut self) -> impl Iterator> + '_ { + self.just_connected = true; + self.ticks.clear(); + self.next_update_index = 0; + self.update_entities.drain().map(|(_, (_, mut entities))| { + entities.clear(); + entities + }) + } + /// Remembers `entities` and `tick` of an update message and returns its index. /// /// Used later to acknowledge updated entities. From 7834cd856100c0a30f2a88db9c0079b5e2af6d29 Mon Sep 17 00:00:00 2001 From: UkoeHB <37489173+UkoeHB@users.noreply.github.com> Date: Sat, 16 Dec 2023 18:13:11 -0600 Subject: [PATCH 63/63] fix typo --- src/server/clients_info.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/server/clients_info.rs b/src/server/clients_info.rs index cb5da984..63f0b3b9 100644 --- a/src/server/clients_info.rs +++ b/src/server/clients_info.rs @@ -26,9 +26,9 @@ pub(crate) struct ClientsInfo { impl ClientsInfo { /// Initializes a new [`ClientInfo`] for this client. pub(super) fn init(&mut self, client_id: ClientId) { - let client_info = if let Some(mut clinet_info) = self.info_buffer.pop() { - clinet_info.id = client_id; - clinet_info + let client_info = if let Some(mut client_info) = self.info_buffer.pop() { + client_info.id = client_id; + client_info } else { ClientInfo::new(client_id) };