From 8367537b3bd37e2290e933e43e36d4bd51754908 Mon Sep 17 00:00:00 2001 From: Pablo Murillo Nogales Date: Fri, 7 Jul 2023 11:04:40 +0000 Subject: [PATCH 1/5] 5208 add ha flow support into server42 modify metadata for haSubFlow fix server42-server stub use src switchid as key to check if a flow exist in server42 use flow dst sw for server42 dst packet add yPoint server42 forwarding rules add server42 rules to haflow add --no-hpte option to server42-server --- .../docker-compose/docker-compose.tmpl | 4 +- src-cpp/server42/.gitignore | 6 +- src-cpp/server42/README.md | 5 +- src-cpp/server42/docker_entrypoint.py | 2 +- .../server42/docker_install_requirements.sh | 2 +- src-cpp/server42/generate_java_protobuf.sh | 16 +- src-cpp/server42/local_loop.sh | 2 +- src-cpp/server42/src/Config.cpp | 17 +- src-cpp/server42/src/Config.h | 4 +- src-cpp/server42/src/Control.cpp | 10 + src-cpp/server42/src/FlowMetadata.h | 19 +- src-cpp/server42/src/FlowPoolTest.cpp | 6 +- src-cpp/server42/src/PacketGenerator.cpp | 7 +- src-cpp/server42/src/PacketGenerator.h | 7 +- src-cpp/server42/src/Workers.cpp | 8 +- src-cpp/server42/src/flow-rtt-control.proto | 1 + src-cpp/server42/src/server42.cpp | 11 +- .../flowhs/service/FlowPathBuilder.java | 4 +- .../org/openkilda/wfm/config/KafkaConfig.java | 1 + .../openkilda/wfm/config/ZookeeperConfig.java | 1 + ...gressFlowSegmentInstallFlowModFactory.java | 4 - .../utils/metadata/RoutingMetadata.java | 2 +- ...IngressServer42FlowInstallCommandTest.java | 26 +- .../utils/metadata/RoutingMetadataTest.java | 4 +- .../command/haflow/HaFlowRequest.java | 3 +- .../wfm/topology/flowhs/FlowHsTopology.java | 6 + .../flowhs/bolts/HaFlowCreateHubBolt.java | 10 +- .../flowhs/bolts/HaFlowDeleteHubBolt.java | 12 +- .../flowhs/bolts/HaFlowUpdateHubBolt.java | 22 +- .../BaseHaResourceAllocationAction.java | 2 +- .../create/actions/OnFinishedAction.java | 11 +- .../actions/ResourcesAllocationAction.java | 2 +- .../fsm/haflow/delete/HaFlowDeleteFsm.java | 2 + .../delete/actions/OnFinishedAction.java | 13 +- .../delete/actions/RemoveHaFlowAction.java | 2 + .../fsm/haflow/update/HaFlowUpdateFsm.java | 4 + .../update/actions/BuildNewRulesAction.java | 5 + .../update/actions/OnFinishedAction.java | 62 +-- .../update/actions/RemoveOldRulesAction.java | 1 + .../topology/flowhs/model/RequestedFlow.java | 13 + .../validation/HaFlowValidationTestBase.java | 13 +- .../mapper/RequestedFlowMapperTest.java | 33 ++ .../model/cookie/FlowSegmentCookie.java | 49 +- .../model/cookie/FlowSharedSegmentCookie.java | 27 +- .../openkilda/model/cookie/FlowSubType.java | 41 ++ .../model/cookie/PortColourCookie.java | 29 +- .../model/cookie/FlowSegmentCookieTest.java | 24 + .../repositories/HaFlowRepository.java | 2 + .../repositories/HaSubFlowRepository.java | 3 + .../repositories/FermaHaFlowRepository.java | 11 + .../FermaHaSubFlowRepository.java | 13 + .../FermaHaSubFlowRepositoryTest.java | 14 + .../reroute/service/RerouteServiceTest.java | 2 +- .../org/openkilda/rulemanager/Constants.java | 1 + .../rulemanager/RuleManagerImpl.java | 62 ++- .../factory/FlowRulesGeneratorFactory.java | 49 ++ .../flow/Server42IngressRuleGenerator.java | 139 ++++-- .../flow/haflow/EgressHaRuleGenerator.java | 2 +- .../flow/haflow/IngressHaRuleGenerator.java | 2 +- ...Server42IngressForwardHaRuleGenerator.java | 74 +++ .../flow/haflow/TransitHaRuleGenerator.java | 2 +- ...Server42HaFlowRttTransitRuleGenerator.java | 110 ++++ .../rulemanager/utils/RoutingMetadata.java | 51 +- .../RuleManagerHaFlowRulesTest.java | 444 +++++------------ .../java/org/openkilda/rulemanager/Utils.java | 4 + .../Server42IngressRuleGeneratorTest.java | 22 +- .../flow/haflow/HaFlowRulesBaseTest.java | 469 ++++++++++++++++++ .../flow/haflow/HaRuleGeneratorBaseTest.java | 2 +- ...er42IngressForwardHaRuleGeneratorTest.java | 337 +++++++++++++ ...ointForwardIngressHaRuleGeneratorTest.java | 1 + .../utils/RoutingMetadataTest.java | 71 +++ .../ActivateFlowMonitoringInfoData.java | 13 +- .../control/messaging/flowrtt/AddFlow.java | 5 +- .../messaging/flowrtt/FlowRttControl.java | 162 +++++- .../control/serverstub/ControlServer.java | 6 +- .../control/stormstub/api/AddFlowPayload.java | 3 + .../build.gradle | 8 + .../checkstyle/checkstyle-suppressions.xml | 3 +- .../control/topology/ControlTopology.java | 10 +- .../topology/service/FlowRttService.java | 62 ++- .../topology/service/IFlowCarrier.java | 4 +- .../topology/storm/bolt/flow/FlowHandler.java | 10 +- .../ActivateFlowMonitoringCommand.java | 10 +- .../topology/storm/bolt/router/Router.java | 7 +- .../topology/service/FlowRttServiceTest.java | 154 ++++++ .../server42/server42-control/build.gradle | 4 + .../server42/control/kafka/Gate.java | 13 +- .../server42/control/kafka/GateTest.java | 4 +- .../stats/service/KildaEntryCacheService.java | 3 +- .../topology/stats/StatsTopologyBaseTest.java | 9 +- .../stats/StatsTopologyHaFlowTest.java | 59 +-- .../SwitchManagerTopologyConfig.java | 2 +- 92 files changed, 2326 insertions(+), 667 deletions(-) create mode 100644 src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSubType.java create mode 100644 src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/SharedYServer42IngressForwardHaRuleGenerator.java create mode 100644 src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/YPointServer42HaFlowRttTransitRuleGenerator.java create mode 100644 src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/HaFlowRulesBaseTest.java create mode 100644 src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/SharedYServer42IngressForwardHaRuleGeneratorTest.java create mode 100644 src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/utils/RoutingMetadataTest.java create mode 100644 src-java/server42/server42-control-storm-topology/src/test/java/org/openkilda/server42/control/topology/service/FlowRttServiceTest.java diff --git a/confd/templates/docker-compose/docker-compose.tmpl b/confd/templates/docker-compose/docker-compose.tmpl index da48eb182fd..fae1ded4d98 100644 --- a/confd/templates/docker-compose/docker-compose.tmpl +++ b/confd/templates/docker-compose/docker-compose.tmpl @@ -780,7 +780,7 @@ services: context: docker dockerfile: server42/Dockerfile image: kilda/server42 - command: java -XX:+PrintFlagsFinal -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -jar server42-control-storm-stub.jar + command: /app/wait-for-it.sh -t 120 -h zookeeper.pendev -p 2181 -- java -XX:+PrintFlagsFinal -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap -jar server42-control-storm-stub.jar ports: - "9001:9001" depends_on: @@ -801,6 +801,8 @@ services: {{if not (exists "/no_server42_server")}} networks: server42_to_lab: + driver_opts: + com.docker.network.bridge.name: server42_to_lab {{end}} volumes: diff --git a/src-cpp/server42/.gitignore b/src-cpp/server42/.gitignore index a101e484d57..7fc056a4ff4 100644 --- a/src-cpp/server42/.gitignore +++ b/src-cpp/server42/.gitignore @@ -1,2 +1,6 @@ cmake-build-* -src/*.pb.* \ No newline at end of file +src/*.pb.* +.vscode +cmake +pipework +tools diff --git a/src-cpp/server42/README.md b/src-cpp/server42/README.md index c8854301f5a..e08f38ba540 100644 --- a/src-cpp/server42/README.md +++ b/src-cpp/server42/README.md @@ -14,8 +14,9 @@ wget https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework && chmo `sudo dmidecode | grep Interleaved -- memory channels` # connect to kilda setup -`sudo ./pipework br_kilda_int -i eth1 server42-dpdk 10.0.77.2/24 -sudo ./pipework br_kilda_test -i eth2 server42-dpdk 10.0.88.1/24 +` +sudo ./pipework br_kilda_int -i eth1 server42-server 10.0.77.2/24 +sudo ./pipework br_kilda_test -i eth2 server42-server 10.0.88.1/24 sudo ./pipework br_kilda_int -i eth1 server42-control 10.0.77.1/24 sudo ./pipework br_kilda_int -i eth1 server42-stats 10.0.77.3/24 ` diff --git a/src-cpp/server42/docker_entrypoint.py b/src-cpp/server42/docker_entrypoint.py index 38cbbad7860..04c57e10928 100755 --- a/src-cpp/server42/docker_entrypoint.py +++ b/src-cpp/server42/docker_entrypoint.py @@ -34,4 +34,4 @@ raise SystemExit(2) -subprocess.run(["./server42" , "-c", "0x1f", "--log-level=lib.eal:8" , f"--vdev=net_pcap0,iface={NETWORK_LAB_IFACE}", "--no-huge", "--", "--debug"], stderr=sys.stderr, stdout=sys.stdout) +subprocess.run(["./server42" , "-c", "0x1f", "--log-level=lib.eal:8" , f"--vdev=net_pcap0,iface={NETWORK_LAB_IFACE}", "--no-huge", "--", "--debug", "--no-hpet"], stderr=sys.stderr, stdout=sys.stdout) diff --git a/src-cpp/server42/docker_install_requirements.sh b/src-cpp/server42/docker_install_requirements.sh index 8a295319a77..69b3a6b91c5 100755 --- a/src-cpp/server42/docker_install_requirements.sh +++ b/src-cpp/server42/docker_install_requirements.sh @@ -11,4 +11,4 @@ wget -nc https://github.com/Kitware/CMake/releases/download/v3.15.3/cmake-3.15.3 tar -xzvf cmake-3.15.3-Linux-x86_64.tar.gz && \ cd - && \ ln -sf tools/cmake/cmake-3.15.3-Linux-x86_64/bin/cmake . && \ -pip3 install "docker==4.4.4" netifaces==0.11.0 \ No newline at end of file +pip3 install "urllib3<2" "docker==4.4.4" netifaces==0.11.0 diff --git a/src-cpp/server42/generate_java_protobuf.sh b/src-cpp/server42/generate_java_protobuf.sh index 56f9e8bbd2e..ae85ec25ac4 100755 --- a/src-cpp/server42/generate_java_protobuf.sh +++ b/src-cpp/server42/generate_java_protobuf.sh @@ -11,7 +11,8 @@ if [ -z $(docker images -q kilda/server42dpdk-protobuf:latest) ]; then cd - fi -if [ ! -f "src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/Control.java" ]; then +if [ ! -f "src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/Control.java" ]; then + echo "Generating protobuf java classes for Control" docker run -it --rm \ --user $(id -u):$(id -g) \ -v $(pwd)/src-java/server42/server42-control-messaging/src/main/java:/src-java/server42/server42-control-messaging/src/main/java \ @@ -22,7 +23,20 @@ if [ ! -f "src-java/server42/server42-control-messaging/src/main/java/org/openki fi +if [ ! -f "src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/FlowRttControl.java" ]; then + echo "Generating protobuf java classes for FlowRttControl" + docker run -it --rm \ + --user $(id -u):$(id -g) \ + -v $(pwd)/src-java/server42/server42-control-messaging/src/main/java:/src-java/server42/server42-control-messaging/src/main/java \ + -v $(pwd)/src-cpp/server42/src:/src-cpp/server42/src \ + kilda/server42dpdk-protobuf:latest \ + protoc --java_out src-java/server42/server42-control-messaging/src/main/java --proto_path src-cpp/server42/src \ + src-cpp/server42/src/flow-rtt-control.proto +fi + + if [ ! -f "src-java/server42/server42-stats-messaging/src/main/java/org/openkilda/server42/stats/messaging/flowrtt/Statistics.java" ]; then + echo "Generating protobuf java classes for Statistics" docker run -it --rm \ --user $(id -u):$(id -g) \ -v $(pwd)/src-java/server42/server42-stats-messaging/src/main/java:/src-java/server42/server42-stats-messaging/src/main/java \ diff --git a/src-cpp/server42/local_loop.sh b/src-cpp/server42/local_loop.sh index 75b54d7584c..94aecfedc32 100755 --- a/src-cpp/server42/local_loop.sh +++ b/src-cpp/server42/local_loop.sh @@ -4,4 +4,4 @@ ip link add eth42 type veth peer name eth24 ip link set eth24 up ip link set eth42 up -./server42 -c 0x1f --vdev=net_pcap0,iface=eth42 --vdev=net_pcap1,iface=eth24 --no-huge -- --debug +./server42 -c 0x1f --vdev=net_pcap0,iface=eth42 --vdev=net_pcap1,iface=eth24 --no-huge -- --debug --no-hpet diff --git a/src-cpp/server42/src/Config.cpp b/src-cpp/server42/src/Config.cpp index 9476c10854c..2ed8f879910 100644 --- a/src-cpp/server42/src/Config.cpp +++ b/src-cpp/server42/src/Config.cpp @@ -28,6 +28,7 @@ namespace org::openkilda { uint32_t first_stats_port; uint32_t control_port; bool debug; + bool hpet; public: @@ -50,6 +51,8 @@ namespace org::openkilda { process_control_port(vm); process_debug(vm); + + process_hpet(vm); } uint32_t get_core_mask() const override { @@ -80,6 +83,10 @@ namespace org::openkilda { return debug; } + virtual bool is_hpet() const override { + return hpet; + } + constexpr static const char *arg_core = "c"; constexpr static const char *arg_master_core = "master-lcore"; constexpr static const char *arg_mbuf_pool_size_per_device = "kilda-mbuf-pool-size-per-device"; @@ -87,6 +94,7 @@ namespace org::openkilda { constexpr static const char *arg_first_stats_port = "first-stats-port"; constexpr static const char *arg_control_port = "control-port"; constexpr static const char *arg_debug = "debug"; + constexpr static const char *arg_no_hpet = "no-hpet"; private: @@ -132,7 +140,8 @@ namespace org::openkilda { "Size of queue from read thread and send thread Must be power of 2") (arg_first_stats_port, po::value()->default_value(5556), "First stats port for zmq") (arg_control_port, po::value()->default_value(5555), "Control port for zmq") - (arg_debug, po::bool_switch()); + (arg_debug, po::bool_switch()) + (arg_no_hpet, po::bool_switch()); return desc; } @@ -191,13 +200,17 @@ namespace org::openkilda { BOOST_LOG_TRIVIAL(info) << "debug " << debug; } + void process_hpet(const po::variables_map &vm) { + hpet = !vm[arg_no_hpet].as(); + BOOST_LOG_TRIVIAL(info) << "hpet " << hpet; + } }; int create_config_from_cmd(int argc, char **argv, Config::ptr &config) { auto p = boost::make_shared(); try { p->fill_from_cmd(argc, argv); - } catch (InvalidCmd _) { + } catch (InvalidCmd const&) { return 1; } config = std::move(p); diff --git a/src-cpp/server42/src/Config.h b/src-cpp/server42/src/Config.h index a066547e53d..200c2bd932a 100644 --- a/src-cpp/server42/src/Config.h +++ b/src-cpp/server42/src/Config.h @@ -34,7 +34,9 @@ namespace org::openkilda { virtual uint32_t get_control_port() const = 0; virtual bool is_debug() const = 0; - + + virtual bool is_hpet() const = 0; + using ptr = boost::shared_ptr; using cref_ptr = const ptr&; diff --git a/src-cpp/server42/src/Control.cpp b/src-cpp/server42/src/Control.cpp index f3f08230424..ca951f0a2f0 100644 --- a/src-cpp/server42/src/Control.cpp +++ b/src-cpp/server42/src/Control.cpp @@ -49,6 +49,7 @@ namespace org::openkilda { FlowCreateArgument arg = { .flow_pool = flow_pool, .device = device, + .switch_id = addFlow.flow().switch_id(), .dst_mac = addFlow.flow().dst_mac(), .tunnel_id = addFlow.flow().tunnel_id(), .inner_tunnel_id = addFlow.flow().inner_tunnel_id(), @@ -65,6 +66,7 @@ namespace org::openkilda { void remove_flow(org::openkilda::server42::control::messaging::flowrtt::RemoveFlow &remove_flow, org::openkilda::flow_pool_t &flow_pool) { + BOOST_LOG_TRIVIAL(info) << "Remove flow: " << remove_flow.flow().flow_id() << ":" << remove_flow.flow().direction(); flow_pool.remove_packet(make_flow_endpoint(remove_flow.flow().flow_id(), remove_flow.flow().direction())); } @@ -92,6 +94,7 @@ namespace org::openkilda { } buffer_t get_list_flows(const CommandPacket &command_packet, flow_pool_t &pool) { + BOOST_LOG_TRIVIAL(info) << "Get list flows"; CommandPacketResponse response; response.set_communication_id(command_packet.communication_id()); @@ -100,13 +103,16 @@ namespace org::openkilda { const google::protobuf::Any &any = command_packet.command(0); any.UnpackTo(&filter); auto flow_list = pool.get_metadata_db()->get_endpoint_from_switch(filter.dst_mac()); + BOOST_LOG_TRIVIAL(debug) << "Flow list size: " << flow_list.size() << " flows for switch " << filter.dst_mac(); for (auto f : flow_list) { Flow flow; flow.set_flow_id(std::get(f)); flow.set_direction(std::get(f)); + BOOST_LOG_TRIVIAL(debug) << "Flow: " << flow.flow_id() << ":" << flow.direction(); response.add_response()->PackFrom(flow); } } else { + BOOST_LOG_TRIVIAL(debug) << "Flow list size: " << pool.get_packetbyendpoint_table().size(); for (const flow_endpoint_t &flow_endpoint : pool.get_packetbyendpoint_table()) { Flow flow; flow.set_flow_id(std::get(flow_endpoint)); @@ -143,6 +149,7 @@ namespace org::openkilda { void remove_isl(org::openkilda::server42::control::messaging::islrtt::RemoveIsl &remove_isl, org::openkilda::isl_pool_t &isl_pool) { + BOOST_LOG_TRIVIAL(info) << "Remove ISL: " << remove_isl.isl().switch_id() << ":" << remove_isl.isl().port(); isl_pool.remove_packet(make_isl_endpoint(remove_isl.isl().switch_id(), remove_isl.isl().port())); } @@ -249,6 +256,9 @@ namespace org::openkilda { return clear_isls(command_packet, ctx.isl_pool, ctx.pool_guard); case Command::CommandPacket_Type_LIST_ISLS: return get_list_isls(command_packet, ctx.isl_pool); + default: + BOOST_LOG_TRIVIAL(error) << "Unknown command type: " << command_packet.type(); + return error_response_from(command_packet.communication_id(), "Unknown command type"); } } } diff --git a/src-cpp/server42/src/FlowMetadata.h b/src-cpp/server42/src/FlowMetadata.h index f9dfbf10e0f..c23ad8b5cab 100644 --- a/src-cpp/server42/src/FlowMetadata.h +++ b/src-cpp/server42/src/FlowMetadata.h @@ -24,12 +24,12 @@ namespace org::openkilda { class FlowMetadata { flow_endpoint_t flow_endpoint; - std::string dst_mac; + std::string switch_id; int32_t hash; public: - FlowMetadata(std::string flow_id, bool direction, std::string dst_mac, int32_t hash) - : dst_mac(std::move(dst_mac)), hash(hash) { + FlowMetadata(std::string flow_id, bool direction, std::string switch_id, int32_t hash) + : switch_id(std::move(switch_id)), hash(hash) { flow_endpoint = org::openkilda::make_flow_endpoint(flow_id, direction); } @@ -47,8 +47,8 @@ namespace org::openkilda { return std::get(flow_endpoint); }; - const std::string &get_dst_mac() const { - return dst_mac; + const std::string &get_switch_id() const { + return switch_id; }; int32_t get_hash() const { @@ -62,7 +62,7 @@ namespace org::openkilda { std::shared_ptr, bmi::indexed_by< bmi::ordered_unique >, - bmi::ordered_non_unique> + bmi::ordered_non_unique> > > flow_metadata_set_t; @@ -96,13 +96,12 @@ namespace org::openkilda { } } - virtual std::list get_endpoint_from_switch(const std::string &dst_mac) const { - const flow_metadata_set_t::nth_index<1>::type &dst_mac_index = metadata_set.get<1>(); + virtual std::list get_endpoint_from_switch(const std::string &switch_id) const { + const flow_metadata_set_t::nth_index<1>::type &switch_id_index = metadata_set.get<1>(); flow_metadata_set_t::nth_index<1>::type::iterator its, ite; - std::tie(its, ite) = dst_mac_index.equal_range(dst_mac); + std::tie(its, ite) = switch_id_index.equal_range(switch_id); std::list result; - for (auto i = its; i != ite; ++i) { result.emplace_back(i->get()->get_flow_endpoint()); } diff --git a/src-cpp/server42/src/FlowPoolTest.cpp b/src-cpp/server42/src/FlowPoolTest.cpp index a98ba28399d..b2c5a8a2c98 100644 --- a/src-cpp/server42/src/FlowPoolTest.cpp +++ b/src-cpp/server42/src/FlowPoolTest.cpp @@ -132,7 +132,7 @@ namespace test { std::shared_ptr, bmi::indexed_by< bmi::ordered_unique>, - bmi::ordered_non_unique> + bmi::ordered_non_unique> > > metadata_set_t; @@ -165,7 +165,7 @@ BOOST_AUTO_TEST_CASE(flow_metadata_play) { test::metadata_set_t::nth_index<1>::type::iterator it2s, it2e; - std::tie(it2s, it2e) = dst_mac_index.equal_range(f1->get_dst_mac()); + std::tie(it2s, it2e) = dst_mac_index.equal_range(f1->get_switch_id()); BOOST_TEST(std::distance(it2s, it2e) == 2); @@ -179,7 +179,7 @@ BOOST_AUTO_TEST_CASE(flow_metadata_play) { BOOST_TEST(set.size() == 2); - std::tie(it2s, it2e) = dst_mac_index.equal_range(f1->get_dst_mac()); + std::tie(it2s, it2e) = dst_mac_index.equal_range(f1->get_switch_id()); dst_mac_index.erase(it2s, it2e); diff --git a/src-cpp/server42/src/PacketGenerator.cpp b/src-cpp/server42/src/PacketGenerator.cpp index 6681d0e0ec8..67c45f9e079 100644 --- a/src-cpp/server42/src/PacketGenerator.cpp +++ b/src-cpp/server42/src/PacketGenerator.cpp @@ -20,7 +20,7 @@ namespace org::openkilda { auto flow_meta = db->get(flow_id_key); if (flow_meta && flow_meta->get_hash() == arg.hash) { BOOST_LOG_TRIVIAL(debug) - << "skip add_flow command for " << arg.flow_id + << "skip add_flow command for " << arg.flow_id << " and switch_id " << arg.switch_id << " and direction " << arg.direction_str() << " with hash " << arg.hash << " already exists"; return; } @@ -75,6 +75,9 @@ namespace org::openkilda { auto packet = flow_pool_t::allocator_t::allocate(newPacket.getRawPacket(), arg.device); + BOOST_LOG_TRIVIAL(debug) << "add flow " << arg.flow_id << " and switch_id " << arg.switch_id + << " and direction " << arg.direction_str() << " and dst_mac " << arg.dst_mac; + if (flow_meta) { BOOST_LOG_TRIVIAL(debug) << "update flow " << arg.flow_id @@ -84,7 +87,7 @@ namespace org::openkilda { } auto meta = std::shared_ptr( - new FlowMetadata(arg.flow_id, arg.direction, arg.dst_mac, arg.hash)); + new FlowMetadata(arg.flow_id, arg.direction, arg.switch_id, arg.hash)); bool success = arg.flow_pool.add_packet(flow_id_key, packet, meta); diff --git a/src-cpp/server42/src/PacketGenerator.h b/src-cpp/server42/src/PacketGenerator.h index 7a7608c697d..37325933bf6 100644 --- a/src-cpp/server42/src/PacketGenerator.h +++ b/src-cpp/server42/src/PacketGenerator.h @@ -37,11 +37,12 @@ namespace org::openkilda { struct FlowCreateArgument{ flow_pool_t& flow_pool; pcpp::DpdkDevice* device; + const std::string& switch_id; const std::string& dst_mac; boost::int64_t tunnel_id; boost::int64_t inner_tunnel_id; boost::int64_t transit_tunnel_id; - boost::uint16_t udp_src_port; + boost::uint32_t udp_src_port; const std::string& flow_id; bool direction; boost::int32_t hash; @@ -62,9 +63,9 @@ namespace org::openkilda { pcpp::DpdkDevice* device; const std::string& dst_mac; boost::int64_t transit_tunnel_id; - boost::uint16_t udp_src_port; + boost::uint32_t udp_src_port; const std::string& switch_id; - boost::uint16_t port; + boost::uint32_t port; boost::int32_t hash; }; diff --git a/src-cpp/server42/src/Workers.cpp b/src-cpp/server42/src/Workers.cpp index 3b3d0857fe2..758e1e59577 100644 --- a/src-cpp/server42/src/Workers.cpp +++ b/src-cpp/server42/src/Workers.cpp @@ -48,7 +48,7 @@ namespace org::openkilda { org::openkilda::isl_pool_t& m_isl_pool, std::mutex& m_pool_guard) { - BOOST_LOG_TRIVIAL(info) << "tick isl_pool_size: " << m_isl_pool.table.size(); + BOOST_LOG_TRIVIAL(debug) << "tick isl_pool_size: " << m_isl_pool.table.size(); std::lock_guard guard(m_pool_guard); pcpp::MBufRawPacket **start = m_isl_pool.table.data(); @@ -104,7 +104,7 @@ namespace org::openkilda { while (alive.load()) { try { - BOOST_LOG_TRIVIAL(info) << "tick flow_pool_size: " << m_flow_pool.table.size(); + BOOST_LOG_TRIVIAL(debug) << "tick flow_pool_size: " << m_flow_pool.table.size(); uint64_t start_tsc = rte_get_timer_cycles(); { @@ -392,13 +392,13 @@ namespace org::openkilda { auto cmp = [](mbuf_container_t& left, mbuf_container_t& right) { return std::get<0>(left) > std::get<0>(right); }; std::priority_queue, decltype(cmp) > queue(cmp); - const uint64_t cycles_in_one_second = rte_get_hpet_hz(); + const uint64_t cycles_in_one_second = rte_get_timer_hz(); const uint64_t cycles_in_1_ms = cycles_in_one_second / 1000; while (alive.load()) { uint16_t num_of_packets = device->receivePackets(rx_mbuf, Config::chunk_size, 0); - uint64_t start_tsc = rte_get_hpet_cycles(); + uint64_t start_tsc = rte_get_timer_cycles(); for (uint16_t i = 0; i < num_of_packets; ++i) { diff --git a/src-cpp/server42/src/flow-rtt-control.proto b/src-cpp/server42/src/flow-rtt-control.proto index 186b7aa0556..9a6e3b0da8a 100644 --- a/src-cpp/server42/src/flow-rtt-control.proto +++ b/src-cpp/server42/src/flow-rtt-control.proto @@ -20,6 +20,7 @@ message Flow { uint32 udp_src_port = 8; int32 hash_code = 9; int64 inner_tunnel_id = 10; + string switch_id = 11; } message AddFlow { diff --git a/src-cpp/server42/src/server42.cpp b/src-cpp/server42/src/server42.cpp index c52167491a7..877f1a3319c 100644 --- a/src-cpp/server42/src/server42.cpp +++ b/src-cpp/server42/src/server42.cpp @@ -93,10 +93,13 @@ int init_dpdk(int argc, char *argv[], Config::cref_ptr config) { return 1; } - ret = rte_eal_hpet_init(true); - if (ret < 0) { - BOOST_LOG_TRIVIAL(fatal) << "failed to init HPET"; - return 1; + // Initialize HPET + if (config->is_hpet()) { + ret = rte_eal_hpet_init(true); + if (ret < 0) { + BOOST_LOG_TRIVIAL(fatal) << "failed to init HPET"; + return 1; + } } pcpp::DpdkDeviceList::externalInitializationDpdk(config->get_core_mask(), diff --git a/src-java/base-topology/base-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/FlowPathBuilder.java b/src-java/base-topology/base-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/FlowPathBuilder.java index 8f722830dfd..cbe56d08ad4 100644 --- a/src-java/base-topology/base-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/FlowPathBuilder.java +++ b/src-java/base-topology/base-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/service/FlowPathBuilder.java @@ -16,7 +16,7 @@ package org.openkilda.wfm.topology.flowhs.service; import static java.lang.String.format; -import static org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_TYPES; +import static org.openkilda.model.cookie.FlowSubType.HA_SUB_FLOW_TYPES; import org.openkilda.model.Flow; import org.openkilda.model.FlowPath; @@ -29,7 +29,7 @@ import org.openkilda.model.Switch; import org.openkilda.model.SwitchId; import org.openkilda.model.cookie.FlowSegmentCookie; -import org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.pce.HaPath; import org.openkilda.pce.Path; import org.openkilda.pce.Path.Segment; diff --git a/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/config/KafkaConfig.java b/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/config/KafkaConfig.java index dc5db930fad..8826bb13f5e 100644 --- a/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/config/KafkaConfig.java +++ b/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/config/KafkaConfig.java @@ -27,6 +27,7 @@ public interface KafkaConfig { int STATS_TOPOLOGY_TEST_KAFKA_PORT_3 = 2152; int ISL_LATENCY_TOPOLOGY_TEST_KAFKA_PORT = 2189; int FLOW_PING_TOPOLOGY_TEST_KAFKA_PORT = 2190; + int SERVER42_CONTROL_TOPOLOGY_TEST_KAFKA_PORT = 2191; @Key("hosts") String getHosts(); diff --git a/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/config/ZookeeperConfig.java b/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/config/ZookeeperConfig.java index 8bdbd395633..c084cb3c038 100644 --- a/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/config/ZookeeperConfig.java +++ b/src-java/base-topology/base-storm-topology/src/test/java/org/openkilda/wfm/config/ZookeeperConfig.java @@ -28,6 +28,7 @@ public interface ZookeeperConfig { int STATS_TOPOLOGY_TEST_ZOOKEEPER_PORT_3 = 9053; int ISL_LATENCY_TOPOLOGY_TEST_ZOOKEEPER_PORT = 9099; int FLOW_PING_TOPOLOGY_TEST_ZOOKEEPER_PORT = 9100; + int SERVER42_CONTROL_TOPOLOGY_TEST_ZOOKEEPER_PORT = 9101; @Key("hosts") String getHosts(); diff --git a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/command/flow/ingress/of/IngressFlowSegmentInstallFlowModFactory.java b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/command/flow/ingress/of/IngressFlowSegmentInstallFlowModFactory.java index 2fb61e7dcc7..06e61d369c9 100644 --- a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/command/flow/ingress/of/IngressFlowSegmentInstallFlowModFactory.java +++ b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/command/flow/ingress/of/IngressFlowSegmentInstallFlowModFactory.java @@ -79,11 +79,7 @@ protected List makeServer42IngressFlowTransformActions(List v switch (encapsulation.getType()) { case TRANSIT_VLAN: MacAddress ethSrc = MacAddress.of(sw.getId()); - MacAddress ethDst = MacAddress.of(command.getEgressSwitchId().toLong()); - actions.add(of.actions().setField(of.oxms().ethSrc(ethSrc))); - actions.add(of.actions().setField(of.oxms().ethDst(ethDst))); - if (!getCommand().getMetadata().isMultiTable()) { actions.add(of.actions() .setField(of.oxms().udpSrc(TransportPort.of(SERVER_42_FLOW_RTT_FORWARD_UDP_PORT)))); diff --git a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/RoutingMetadata.java b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/RoutingMetadata.java index c5daab33c2e..8c195d4aa6c 100644 --- a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/RoutingMetadata.java +++ b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/RoutingMetadata.java @@ -37,7 +37,7 @@ public class RoutingMetadata extends MetadataBase { // NOTE: port count was increased from 128 to 4096. At this moment only 1000 ports can be used // on Noviflow switches. But according to open flow specs port count could be up to 65536. // So we increased port count to maximum possible value. - private static final BitField INPUT_PORT_FIELD = new BitField(0x0000_0000_0FFF_0000L); + private static final BitField INPUT_PORT_FIELD = new BitField(0x0000_0000_0FFE_0000L); static final long MAX_INPUT_PORT = INPUT_PORT_FIELD.getMask() >> INPUT_PORT_FIELD.getOffset(); diff --git a/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/command/flow/ingress/IngressServer42FlowInstallCommandTest.java b/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/command/flow/ingress/IngressServer42FlowInstallCommandTest.java index 8e83f23e41b..52eb96ef054 100644 --- a/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/command/flow/ingress/IngressServer42FlowInstallCommandTest.java +++ b/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/command/flow/ingress/IngressServer42FlowInstallCommandTest.java @@ -60,7 +60,6 @@ import org.projectfloodlight.openflow.protocol.instruction.OFInstructionApplyActions; import org.projectfloodlight.openflow.protocol.match.MatchField; import org.projectfloodlight.openflow.protocol.oxm.OFOxm; -import org.projectfloodlight.openflow.protocol.oxm.OFOxmEthDst; import org.projectfloodlight.openflow.protocol.oxm.OFOxmEthSrc; import org.projectfloodlight.openflow.protocol.oxm.OFOxmVlanVid; import org.projectfloodlight.openflow.protocol.ver13.OFFactoryVer13; @@ -111,11 +110,10 @@ public void server42IngressFlowDoubleTagMultiTableVlan() throws Exception { assertMetadata(mod, VLAN_1, CUSTOMER_PORT); List applyActions = ((OFInstructionApplyActions) mod.getInstructions().get(0)).getActions(); - assertEquals(4, applyActions.size()); + assertEquals(3, applyActions.size()); assertSetField(applyActions.get(0), OFOxmEthSrc.class, MacAddress.of(INGRESS_SWITCH_ID.toMacAddress())); - assertSetField(applyActions.get(1), OFOxmEthDst.class, MacAddress.of(EGRESS_SWITCH_ID.toMacAddress())); - assertSetField(applyActions.get(2), OFOxmVlanVid.class, OFVlanVidMatch.ofVlan(VLAN_ENCAPSULATION.getId())); - assertOutputAction(applyActions.get(3)); + assertSetField(applyActions.get(1), OFOxmVlanVid.class, OFVlanVidMatch.ofVlan(VLAN_ENCAPSULATION.getId())); + assertOutputAction(applyActions.get(2)); } @Test @@ -129,12 +127,11 @@ public void server42IngressFlowSingleTagMultiTableVlan() throws Exception { assertMetadata(mod, VLAN_1, CUSTOMER_PORT); List applyActions = ((OFInstructionApplyActions) mod.getInstructions().get(0)).getActions(); - assertEquals(5, applyActions.size()); + assertEquals(4, applyActions.size()); assertSetField(applyActions.get(0), OFOxmEthSrc.class, MacAddress.of(INGRESS_SWITCH_ID.toMacAddress())); - assertSetField(applyActions.get(1), OFOxmEthDst.class, MacAddress.of(EGRESS_SWITCH_ID.toMacAddress())); - assertPushVlanAction(applyActions.get(2)); - assertSetField(applyActions.get(3), OFOxmVlanVid.class, OFVlanVidMatch.ofVlan(VLAN_ENCAPSULATION.getId())); - assertOutputAction(applyActions.get(4)); + assertPushVlanAction(applyActions.get(1)); + assertSetField(applyActions.get(2), OFOxmVlanVid.class, OFVlanVidMatch.ofVlan(VLAN_ENCAPSULATION.getId())); + assertOutputAction(applyActions.get(3)); } @Test @@ -148,12 +145,11 @@ public void server42IngressFlowDefaultMultiTableVlan() throws Exception { assertMetadata(mod, 0, CUSTOMER_PORT); List applyActions = ((OFInstructionApplyActions) mod.getInstructions().get(0)).getActions(); - assertEquals(5, applyActions.size()); + assertEquals(4, applyActions.size()); assertSetField(applyActions.get(0), OFOxmEthSrc.class, MacAddress.of(INGRESS_SWITCH_ID.toMacAddress())); - assertSetField(applyActions.get(1), OFOxmEthDst.class, MacAddress.of(EGRESS_SWITCH_ID.toMacAddress())); - assertPushVlanAction(applyActions.get(2)); - assertSetField(applyActions.get(3), OFOxmVlanVid.class, OFVlanVidMatch.ofVlan(VLAN_ENCAPSULATION.getId())); - assertOutputAction(applyActions.get(4)); + assertPushVlanAction(applyActions.get(1)); + assertSetField(applyActions.get(2), OFOxmVlanVid.class, OFVlanVidMatch.ofVlan(VLAN_ENCAPSULATION.getId())); + assertOutputAction(applyActions.get(3)); } @Test diff --git a/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/utils/metadata/RoutingMetadataTest.java b/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/utils/metadata/RoutingMetadataTest.java index 003edb0382c..7cd470c510c 100644 --- a/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/utils/metadata/RoutingMetadataTest.java +++ b/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/utils/metadata/RoutingMetadataTest.java @@ -31,8 +31,8 @@ public void testFieldsIntersection() { @Test public void testInputPortMetadata() { - int offset = 16; - for (int port = 0; port <= 4095; port++) { + int offset = 17; + for (int port = 0; port <= 2047; port++) { RoutingMetadata metadata = RoutingMetadata.builder().inputPort(port).build(new HashSet<>()); long withoutType = ~TYPE_FIELD.getMask() & metadata.getValue().getValue(); Assertions.assertEquals(port, withoutType >> offset); diff --git a/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/command/haflow/HaFlowRequest.java b/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/command/haflow/HaFlowRequest.java index a257bb84d86..e2663c38f00 100644 --- a/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/command/haflow/HaFlowRequest.java +++ b/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/command/haflow/HaFlowRequest.java @@ -70,7 +70,8 @@ public HaSubFlowDto getHaSubFlow(String haSubFlowId) { return subFlow; } } - throw new IllegalArgumentException(String.format("HA-sub flow %s not found. Valida ha-sub flows are: %s", + throw new IllegalArgumentException(String.format("HA-sub flow %s not found. Valid ha-sub flows are: %s", haSubFlowId, subFlows.stream().map(HaSubFlowDto::getFlowId).collect(Collectors.toList()))); } + } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopology.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopology.java index 13ef4c41f99..daa39db50fe 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopology.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/FlowHsTopology.java @@ -1087,6 +1087,12 @@ private void server42ControlTopologyOutput(TopologyBuilder topologyBuilder) { .shuffleGrouping(ComponentId.YFLOW_REROUTE_HUB.name(), Stream.HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name()) .shuffleGrouping(ComponentId.YFLOW_DELETE_HUB.name(), + Stream.HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name()) + .shuffleGrouping(ComponentId.HA_FLOW_CREATE_HUB.name(), + Stream.HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name()) + .shuffleGrouping(ComponentId.HA_FLOW_DELETE_HUB.name(), + Stream.HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name()) + .shuffleGrouping((ComponentId.HA_FLOW_UPDATE_HUB.name()), Stream.HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name()); } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowCreateHubBolt.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowCreateHubBolt.java index 9a370ce0171..25c71cb127f 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowCreateHubBolt.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowCreateHubBolt.java @@ -20,6 +20,7 @@ import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_METRICS_BOLT; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_NB_RESPONSE_SENDER; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_PING_SENDER; +import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_SPEAKER_WORKER; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_STATS_TOPOLOGY_SENDER; import static org.openkilda.wfm.topology.utils.KafkaRecordTranslator.FIELD_ID_PAYLOAD; @@ -43,6 +44,7 @@ import org.openkilda.rulemanager.RuleManager; import org.openkilda.rulemanager.RuleManagerConfig; import org.openkilda.rulemanager.RuleManagerImpl; +import org.openkilda.server42.control.messaging.flowrtt.ActivateFlowMonitoringInfoData; import org.openkilda.wfm.error.PipelineException; import org.openkilda.wfm.share.flow.resources.FlowResourcesConfig; import org.openkilda.wfm.share.flow.resources.FlowResourcesManager; @@ -53,6 +55,7 @@ import org.openkilda.wfm.share.zk.ZooKeeperBolt; import org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream; import org.openkilda.wfm.topology.flowhs.exception.DuplicateKeyException; +import org.openkilda.wfm.topology.flowhs.mapper.RequestedFlowMapper; import org.openkilda.wfm.topology.flowhs.model.RequestedFlow; import org.openkilda.wfm.topology.flowhs.service.haflow.HaFlowCreateService; import org.openkilda.wfm.topology.flowhs.service.haflow.HaFlowGenericCarrier; @@ -210,6 +213,7 @@ public void declareOutputFields(OutputFieldsDeclarer declarer) { declarer.declareStream(HUB_TO_HISTORY_TOPOLOGY_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); declarer.declareStream(HUB_TO_PING_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); declarer.declareStream(HUB_TO_FLOW_MONITORING_TOPOLOGY_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); + declarer.declareStream(HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); declarer.declareStream(HUB_TO_STATS_TOPOLOGY_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); declarer.declareStream(ZkStreams.ZK.toString(), new Fields(ZooKeeperBolt.FIELD_ID_STATE, ZooKeeperBolt.FIELD_ID_CONTEXT)); @@ -217,7 +221,11 @@ public void declareOutputFields(OutputFieldsDeclarer declarer) { @Override public void sendActivateFlowMonitoring(@NonNull RequestedFlow flow) { - //TODO: Implement logic during https://github.com/telstra/open-kilda/issues/5208 + ActivateFlowMonitoringInfoData payload = RequestedFlowMapper.INSTANCE.toActivateFlowMonitoringInfoData(flow); + Message message = new InfoMessage(payload, getCommandContext().getCreateTime(), + getCommandContext().getCorrelationId()); + emitWithContext(HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name(), getCurrentTuple(), + new Values(flow.getFlowId(), message)); } @Getter diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowDeleteHubBolt.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowDeleteHubBolt.java index ecd18bad100..5c6ef8df8c1 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowDeleteHubBolt.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowDeleteHubBolt.java @@ -20,6 +20,7 @@ import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_METRICS_BOLT; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_NB_RESPONSE_SENDER; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_PING_SENDER; +import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_SPEAKER_WORKER; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_STATS_TOPOLOGY_SENDER; import static org.openkilda.wfm.topology.utils.KafkaRecordTranslator.FIELD_ID_PAYLOAD; @@ -40,6 +41,7 @@ import org.openkilda.rulemanager.RuleManager; import org.openkilda.rulemanager.RuleManagerConfig; import org.openkilda.rulemanager.RuleManagerImpl; +import org.openkilda.server42.control.messaging.flowrtt.DeactivateFlowMonitoringInfoData; import org.openkilda.wfm.error.PipelineException; import org.openkilda.wfm.share.flow.resources.FlowResourcesConfig; import org.openkilda.wfm.share.flow.resources.FlowResourcesManager; @@ -185,8 +187,13 @@ public void cancelTimeoutCallback(String key) { } @Override - public void sendDeactivateFlowMonitoring(String flow, SwitchId srcSwitchId, SwitchId dstSwitchId) { - //TODO: Implement logic during https://github.com/telstra/open-kilda/issues/5208 + public void sendDeactivateFlowMonitoring(String haSubFlowId, SwitchId srcSwitchId, SwitchId dstSwitchId) { + DeactivateFlowMonitoringInfoData payload = DeactivateFlowMonitoringInfoData.builder() + .flowId(haSubFlowId).switchId(srcSwitchId).switchId(dstSwitchId).build(); + Message message = new InfoMessage(payload, getCommandContext().getCreateTime(), + getCommandContext().getCorrelationId()); + emitWithContext(HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name(), getCurrentTuple(), + new Values(haSubFlowId, message)); } @Override @@ -204,6 +211,7 @@ public void declareOutputFields(OutputFieldsDeclarer declarer) { declarer.declareStream(HUB_TO_PING_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); declarer.declareStream(HUB_TO_FLOW_MONITORING_TOPOLOGY_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); declarer.declareStream(HUB_TO_STATS_TOPOLOGY_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); + declarer.declareStream(HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); declarer.declareStream(ZkStreams.ZK.toString(), new Fields(ZooKeeperBolt.FIELD_ID_STATE, ZooKeeperBolt.FIELD_ID_CONTEXT)); } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowUpdateHubBolt.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowUpdateHubBolt.java index 2f6ce2c151a..a7e82dee3e9 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowUpdateHubBolt.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/bolts/HaFlowUpdateHubBolt.java @@ -20,6 +20,7 @@ import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_METRICS_BOLT; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_NB_RESPONSE_SENDER; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_PING_SENDER; +import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_SPEAKER_WORKER; import static org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream.HUB_TO_STATS_TOPOLOGY_SENDER; import static org.openkilda.wfm.topology.utils.KafkaRecordTranslator.FIELD_ID_PAYLOAD; @@ -40,6 +41,7 @@ import org.openkilda.messaging.info.InfoMessage; import org.openkilda.messaging.info.stats.RemoveHaFlowPathInfo; import org.openkilda.messaging.info.stats.UpdateHaFlowPathInfo; +import org.openkilda.model.SwitchId; import org.openkilda.pce.AvailableNetworkFactory; import org.openkilda.pce.PathComputer; import org.openkilda.pce.PathComputerConfig; @@ -48,6 +50,8 @@ import org.openkilda.rulemanager.RuleManager; import org.openkilda.rulemanager.RuleManagerConfig; import org.openkilda.rulemanager.RuleManagerImpl; +import org.openkilda.server42.control.messaging.flowrtt.ActivateFlowMonitoringInfoData; +import org.openkilda.server42.control.messaging.flowrtt.DeactivateFlowMonitoringInfoData; import org.openkilda.wfm.CommandContext; import org.openkilda.wfm.error.PipelineException; import org.openkilda.wfm.share.flow.resources.FlowResourcesConfig; @@ -60,6 +64,7 @@ import org.openkilda.wfm.topology.flowhs.FlowHsTopology.Stream; import org.openkilda.wfm.topology.flowhs.exception.DuplicateKeyException; import org.openkilda.wfm.topology.flowhs.exception.FlowProcessingException; +import org.openkilda.wfm.topology.flowhs.mapper.RequestedFlowMapper; import org.openkilda.wfm.topology.flowhs.model.RequestedFlow; import org.openkilda.wfm.topology.flowhs.service.haflow.HaFlowGenericCarrier; import org.openkilda.wfm.topology.flowhs.service.haflow.HaFlowUpdateService; @@ -247,11 +252,26 @@ public void declareOutputFields(OutputFieldsDeclarer declarer) { declarer.declareStream(ZkStreams.ZK.toString(), new Fields(ZooKeeperBolt.FIELD_ID_STATE, ZooKeeperBolt.FIELD_ID_CONTEXT)); declarer.declareStream(HUB_TO_STATS_TOPOLOGY_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); + declarer.declareStream(HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name(), MessageKafkaTranslator.STREAM_FIELDS); } @Override public void sendActivateFlowMonitoring(@NonNull RequestedFlow flow) { - //TODO: Implement logic during https://github.com/telstra/open-kilda/issues/5208 + ActivateFlowMonitoringInfoData payload = RequestedFlowMapper.INSTANCE.toActivateFlowMonitoringInfoData(flow); + Message message = new InfoMessage(payload, getCommandContext().getCreateTime(), + getCommandContext().getCorrelationId()); + emitWithContext(HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name(), getCurrentTuple(), + new Values(flow.getFlowId(), message)); + } + + @Override + public void sendDeactivateFlowMonitoring(String haSubFlowId, SwitchId srcSwitchId, SwitchId dstSwitchId) { + DeactivateFlowMonitoringInfoData payload = DeactivateFlowMonitoringInfoData.builder() + .flowId(haSubFlowId).switchId(srcSwitchId).switchId(dstSwitchId).build(); + Message message = new InfoMessage(payload, getCommandContext().getCreateTime(), + getCommandContext().getCorrelationId()); + emitWithContext(HUB_TO_SERVER42_CONTROL_TOPOLOGY_SENDER.name(), getCurrentTuple(), + new Values(haSubFlowId, message)); } private void sendErrorResponse(Exception exception, ErrorType errorType) { diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/common/actions/haflow/BaseHaResourceAllocationAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/common/actions/haflow/BaseHaResourceAllocationAction.java index b3a00bb189f..bf8d2b6a058 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/common/actions/haflow/BaseHaResourceAllocationAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/common/actions/haflow/BaseHaResourceAllocationAction.java @@ -31,7 +31,7 @@ import org.openkilda.model.SwitchId; import org.openkilda.model.cookie.FlowSegmentCookie; import org.openkilda.model.cookie.FlowSegmentCookie.FlowSegmentCookieBuilder; -import org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.pce.GetHaPathsResult; import org.openkilda.pce.HaPath; import org.openkilda.pce.Path; diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/create/actions/OnFinishedAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/create/actions/OnFinishedAction.java index e2353053f99..1d449da751e 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/create/actions/OnFinishedAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/create/actions/OnFinishedAction.java @@ -22,6 +22,8 @@ import org.openkilda.wfm.topology.flowhs.fsm.haflow.create.HaFlowCreateFsm; import org.openkilda.wfm.topology.flowhs.fsm.haflow.create.HaFlowCreateFsm.Event; import org.openkilda.wfm.topology.flowhs.fsm.haflow.create.HaFlowCreateFsm.State; +import org.openkilda.wfm.topology.flowhs.mapper.HaFlowMapper; +import org.openkilda.wfm.topology.flowhs.model.RequestedFlow; import org.openkilda.wfm.topology.flowhs.service.history.FlowHistoryService; import org.openkilda.wfm.topology.flowhs.service.history.HaFlowHistory; @@ -37,8 +39,8 @@ public OnFinishedAction(FlowOperationsDashboardLogger dashboardLogger) { @Override public void perform(State from, State to, Event event, HaFlowCreateContext context, HaFlowCreateFsm stateMachine) { - //TODO activate server42 monitoring sendPeriodicPingNotification(stateMachine); + sendActivateFlowMonitoring(stateMachine); dashboardLogger.onSuccessfulHaFlowCreate(stateMachine.getHaFlowId()); FlowHistoryService.using(stateMachine.getCarrier()).save(HaFlowHistory @@ -52,4 +54,11 @@ private void sendPeriodicPingNotification(HaFlowCreateFsm stateMachine) { stateMachine.getCarrier().sendPeriodicPingNotification( requestedFlow.getHaFlowId(), requestedFlow.isPeriodicPings()); } + + private void sendActivateFlowMonitoring(HaFlowCreateFsm stateMachine) { + HaFlowRequest haFlowRequest = stateMachine.getTargetFlow(); + for (RequestedFlow requestedFlow : HaFlowMapper.INSTANCE.toRequestedFlows(haFlowRequest)) { + stateMachine.getCarrier().sendActivateFlowMonitoring(requestedFlow); + } + } } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/create/actions/ResourcesAllocationAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/create/actions/ResourcesAllocationAction.java index 02b42167914..3da8e5897f3 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/create/actions/ResourcesAllocationAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/create/actions/ResourcesAllocationAction.java @@ -30,7 +30,7 @@ import org.openkilda.model.PathId; import org.openkilda.model.cookie.FlowSegmentCookie; import org.openkilda.model.cookie.FlowSegmentCookie.FlowSegmentCookieBuilder; -import org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.pce.GetHaPathsResult; import org.openkilda.pce.Path; import org.openkilda.pce.PathComputer; diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/HaFlowDeleteFsm.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/HaFlowDeleteFsm.java index 629cbd4f943..b110441c28b 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/HaFlowDeleteFsm.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/HaFlowDeleteFsm.java @@ -17,6 +17,7 @@ import org.openkilda.messaging.error.ErrorType; import org.openkilda.model.FlowStatus; +import org.openkilda.model.HaFlow; import org.openkilda.persistence.PersistenceManager; import org.openkilda.rulemanager.RuleManager; import org.openkilda.wfm.CommandContext; @@ -67,6 +68,7 @@ public final class HaFlowDeleteFsm extends HaFlowProcessingFsm { private FlowStatus originalFlowStatus; + private HaFlow targetHaFlow; private final List haFlowResources = new ArrayList<>(); diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/OnFinishedAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/OnFinishedAction.java index eaa0f2be649..5008f2b3c74 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/OnFinishedAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/OnFinishedAction.java @@ -15,6 +15,7 @@ package org.openkilda.wfm.topology.flowhs.fsm.haflow.delete.actions; +import org.openkilda.model.HaSubFlow; import org.openkilda.wfm.share.logger.FlowOperationsDashboardLogger; import org.openkilda.wfm.topology.flowhs.fsm.common.actions.HistoryRecordingAction; import org.openkilda.wfm.topology.flowhs.fsm.haflow.delete.HaFlowDeleteContext; @@ -36,7 +37,7 @@ public OnFinishedAction(FlowOperationsDashboardLogger dashboardLogger) { @Override public void perform(State from, State to, Event event, HaFlowDeleteContext context, HaFlowDeleteFsm stateMachine) { - //TODO deactivate flow monitoring + sendDeactivateFlowMonitoring(stateMachine); stateMachine.getCarrier().sendPeriodicPingNotification(stateMachine.getHaFlowId(), false); dashboardLogger.onSuccessfulHaFlowDelete(stateMachine.getHaFlowId()); FlowHistoryService.using(stateMachine.getCarrier()).save(HaFlowHistory @@ -44,4 +45,14 @@ public void perform(State from, State to, Event event, HaFlowDeleteContext conte .withAction("HA-flow has been deleted successfully") .withHaFlowId(stateMachine.getHaFlowId())); } + + private void sendDeactivateFlowMonitoring(HaFlowDeleteFsm stateMachine) { + for (HaSubFlow haSubFlow : stateMachine.getTargetHaFlow().getHaSubFlows()) { + stateMachine.getCarrier().sendDeactivateFlowMonitoring( + haSubFlow.getHaSubFlowId(), + stateMachine.getTargetHaFlow().getSharedSwitchId(), + haSubFlow.getEndpointSwitchId()); + } + } + } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/RemoveHaFlowAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/RemoveHaFlowAction.java index a3b90022928..f81a1084670 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/RemoveHaFlowAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/delete/actions/RemoveHaFlowAction.java @@ -47,6 +47,8 @@ protected void perform( String affinityGroup = transactionManager.doInTransaction(() -> { HaFlow haFlow = getHaFlow(haFlowId); + // ha-flow is saved in the fsm to be used in OnFinishedAction + stateMachine.setTargetHaFlow(haFlow); log.debug("Removing the ha-flow {}", haFlowId); haFlowRepository.remove(haFlow); return haFlow.getAffinityGroupId(); diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java index 8c044255d00..66a8ec41c04 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/HaFlowUpdateFsm.java @@ -20,6 +20,7 @@ import org.openkilda.pce.PathComputer; import org.openkilda.persistence.PersistenceManager; import org.openkilda.rulemanager.RuleManager; +import org.openkilda.rulemanager.SpeakerData; import org.openkilda.wfm.CommandContext; import org.openkilda.wfm.share.flow.resources.FlowResourcesManager; import org.openkilda.wfm.share.history.model.HaFlowEventData; @@ -77,7 +78,9 @@ import org.squirrelframework.foundation.fsm.StateMachineBuilderFactory; import java.io.Serializable; +import java.util.ArrayList; import java.util.Collection; +import java.util.List; import java.util.concurrent.TimeUnit; @Getter @@ -87,6 +90,7 @@ public final class HaFlowUpdateFsm extends HaFlowPathSwappingFsm { private HaFlowRequest targetHaFlow; private FlowStatus newFlowStatus; + private List targetSpeakerDataCommands = new ArrayList<>(); public HaFlowUpdateFsm(@NonNull CommandContext commandContext, @NonNull HaFlowGenericCarrier carrier, @NonNull String flowId, diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/BuildNewRulesAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/BuildNewRulesAction.java index b9f133c1a48..80af86bed85 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/BuildNewRulesAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/BuildNewRulesAction.java @@ -42,6 +42,8 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; @Slf4j public class BuildNewRulesAction @@ -82,6 +84,9 @@ protected void perform(State from, State to, } } + stateMachine.setTargetSpeakerDataCommands(Stream.concat(ingressCommands.stream(), nonIngressCommands.stream()) + .collect(Collectors.toList())); + buildHaFlowInstallRequests(ingressCommands, stateMachine.getCommandContext(), true) .forEach(request -> stateMachine.getIngressCommands().put(request.getCommandId(), request)); buildHaFlowInstallRequests(nonIngressCommands, stateMachine.getCommandContext(), true) diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/OnFinishedAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/OnFinishedAction.java index e94920487a0..38327007e60 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/OnFinishedAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/OnFinishedAction.java @@ -18,6 +18,7 @@ import static java.lang.String.format; import org.openkilda.messaging.command.haflow.HaFlowRequest; +import org.openkilda.model.FlowEndpoint; import org.openkilda.model.FlowStatus; import org.openkilda.model.HaFlow; import org.openkilda.model.HaSubFlow; @@ -28,13 +29,12 @@ import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.HaFlowUpdateFsm.Event; import org.openkilda.wfm.topology.flowhs.fsm.haflow.update.HaFlowUpdateFsm.State; import org.openkilda.wfm.topology.flowhs.mapper.HaFlowMapper; +import org.openkilda.wfm.topology.flowhs.model.RequestedFlow; import org.openkilda.wfm.topology.flowhs.service.history.FlowHistoryService; import org.openkilda.wfm.topology.flowhs.service.history.HaFlowHistory; import lombok.extern.slf4j.Slf4j; -import java.util.stream.Collectors; - @Slf4j public class OnFinishedAction extends HistoryRecordingAction { public static final String DEGRADED_FAIL_REASON = "Not all paths meet the SLA"; @@ -77,49 +77,27 @@ private void sendPeriodicPingNotification(HaFlowUpdateFsm stateMachine) { private void updateFlowMonitoring(HaFlowUpdateFsm stateMachine) { HaFlow original = stateMachine.getOriginalHaFlow(); - HaFlow target = mapToTargetHaFlow(stateMachine.getTargetHaFlow()); - - for (HaSubFlow originalSubFlow : original.getHaSubFlows()) { - HaSubFlow targetSubFlow = target.getHaSubFlowOrThrowException(originalSubFlow.getHaSubFlowId()); - boolean originalNotSingle = !originalSubFlow.isOneSwitch(); - boolean targetNotSingle = !targetSubFlow.isOneSwitch(target.getSharedSwitchId()); - boolean srcUpdated = isSrcUpdated(original, target); - boolean dstUpdated = isDstUpdated(originalSubFlow, targetSubFlow); - - // clean up old if it is not single - //TODO: Review logic during https://github.com/telstra/open-kilda/issues/5208 - if (originalNotSingle && (srcUpdated || dstUpdated)) { - stateMachine.getCarrier().sendDeactivateFlowMonitoring(stateMachine.getFlowId(), - original.getSharedSwitchId(), originalSubFlow.getEndpointSwitchId()); - - } - // setup new if it is not single - //TODO: Review logic during https://github.com/telstra/open-kilda/issues/5208 - if (targetNotSingle && (srcUpdated || dstUpdated)) { - //stateMachine.getCarrier().sendActivateFlowMonitoring(null); + HaFlowRequest target = stateMachine.getTargetHaFlow(); + + for (RequestedFlow targetSubFlow : HaFlowMapper.INSTANCE.toRequestedFlows(target)) { + HaSubFlow originalHaSubFlow = original.getHaSubFlow(targetSubFlow.getFlowId()).orElseThrow( + () -> new IllegalArgumentException(format("target ha-subflow %s not found " + + "in the original ha-flow %s", + targetSubFlow.getFlowId(), original.getHaFlowId()))); + if (isEndpointUpdated(originalHaSubFlow.getEndpoint(), targetSubFlow.getDstEndpoint())) { + if (!originalHaSubFlow.isOneSwitch()) { + stateMachine.getCarrier().sendDeactivateFlowMonitoring(originalHaSubFlow.getHaSubFlowId(), + original.getSharedSwitchId(), originalHaSubFlow.getEndpointSwitchId()); + } + + if (!targetSubFlow.isOneSwitchFlow()) { + stateMachine.getCarrier().sendActivateFlowMonitoring(targetSubFlow); + } } } } - private boolean isSrcUpdated(HaFlow original, HaFlow target) { - return !(original.getSharedSwitchId().equals(target.getSharedSwitchId()) - && original.getSharedPort() == target.getSharedPort() - && original.getSharedOuterVlan() == target.getSharedOuterVlan() - && original.getSharedInnerVlan() == target.getSharedInnerVlan()); - } - - private boolean isDstUpdated(HaSubFlow originalSubFlow, HaSubFlow targetSubFlow) { - return !(originalSubFlow.getEndpointSwitchId().equals(targetSubFlow.getEndpointSwitchId()) - && originalSubFlow.getEndpointPort() == targetSubFlow.getEndpointPort() - && originalSubFlow.getEndpointVlan() == targetSubFlow.getEndpointVlan() - && originalSubFlow.getEndpointInnerVlan() == targetSubFlow.getEndpointInnerVlan()); - } - - private HaFlow mapToTargetHaFlow(HaFlowRequest targetHaFlowRequest) { - HaFlow target = HaFlowMapper.INSTANCE.toHaFlow(targetHaFlowRequest); - target.setHaSubFlows(targetHaFlowRequest.getSubFlows().stream() - .map(HaFlowMapper.INSTANCE::toSubFlow) - .collect(Collectors.toSet())); - return target; + private boolean isEndpointUpdated(FlowEndpoint originalEndpoint, FlowEndpoint targetEndpoint) { + return !originalEndpoint.equals(targetEndpoint); } } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/RemoveOldRulesAction.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/RemoveOldRulesAction.java index e54fab8f6d9..f24a794d595 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/RemoveOldRulesAction.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/update/actions/RemoveOldRulesAction.java @@ -87,6 +87,7 @@ protected void perform( .withHaFlowId(stateMachine.getHaFlowId())); stateMachine.fire(Event.RULES_REMOVED); } else { + commands.removeAll(stateMachine.getTargetSpeakerDataCommands()); Collection deleteRequests = buildHaFlowDeleteRequests( commands, stateMachine.getCommandContext()); diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/model/RequestedFlow.java b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/model/RequestedFlow.java index 640da1f3cf0..f11719ab7ce 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/model/RequestedFlow.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/main/java/org/openkilda/wfm/topology/flowhs/model/RequestedFlow.java @@ -16,6 +16,7 @@ package org.openkilda.wfm.topology.flowhs.model; import org.openkilda.model.FlowEncapsulationType; +import org.openkilda.model.FlowEndpoint; import org.openkilda.model.PathComputationStrategy; import org.openkilda.model.SwitchId; @@ -67,4 +68,16 @@ public class RequestedFlow { public boolean isOneSwitchFlow() { return Objects.equals(srcSwitch, destSwitch); } + + /** + * Returns dst endpoint. + */ + public FlowEndpoint getDstEndpoint() { + return FlowEndpoint.builder() + .switchId(destSwitch) + .portNumber(destPort) + .outerVlanId(destVlan) + .innerVlanId(destInnerVlan) + .build(); + } } diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/validation/HaFlowValidationTestBase.java b/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/validation/HaFlowValidationTestBase.java index ec597069dfc..68bb47eb1ed 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/validation/HaFlowValidationTestBase.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/fsm/haflow/validation/HaFlowValidationTestBase.java @@ -41,6 +41,7 @@ import org.openkilda.model.SwitchStatus; import org.openkilda.model.cookie.Cookie; import org.openkilda.model.cookie.FlowSegmentCookie; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.rulemanager.Field; import org.openkilda.rulemanager.FlowSpeakerData; import org.openkilda.rulemanager.GroupSpeakerData; @@ -86,19 +87,19 @@ public class HaFlowValidationTestBase { public static final FlowSegmentCookie FORWARD_COOKIE = FlowSegmentCookie.builder() .direction(FlowPathDirection.FORWARD).flowEffectiveId(1) - .subType(FlowSegmentCookie.FlowSubType.SHARED).build(); + .subType(FlowSubType.SHARED).build(); public static final FlowSegmentCookie REVERSE_COOKIE = FlowSegmentCookie.builder() .direction(FlowPathDirection.REVERSE).flowEffectiveId(1) - .subType(FlowSegmentCookie.FlowSubType.SHARED).build(); + .subType(FlowSubType.SHARED).build(); public static final FlowSegmentCookie FORWARD_SUB_COOKIE_1 = FORWARD_COOKIE.toBuilder() - .subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1).build(); + .subType(FlowSubType.HA_SUB_FLOW_1).build(); public static final FlowSegmentCookie REVERSE_SUB_COOKIE_1 = REVERSE_COOKIE.toBuilder() - .subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1).build(); + .subType(FlowSubType.HA_SUB_FLOW_1).build(); public static final FlowSegmentCookie FORWARD_SUB_COOKIE_2 = FORWARD_COOKIE.toBuilder() - .subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2).build(); + .subType(FlowSubType.HA_SUB_FLOW_2).build(); public static final FlowSegmentCookie REVERSE_SUB_COOKIE_2 = REVERSE_COOKIE.toBuilder() - .subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2).build(); + .subType(FlowSubType.HA_SUB_FLOW_2).build(); protected static Switch buildSwitch(SwitchId switchId) { diff --git a/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/mapper/RequestedFlowMapperTest.java b/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/mapper/RequestedFlowMapperTest.java index 1387e70e697..1ab41bf371b 100644 --- a/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/mapper/RequestedFlowMapperTest.java +++ b/src-java/flowhs-topology/flowhs-storm-topology/src/test/java/org/openkilda/wfm/topology/flowhs/mapper/RequestedFlowMapperTest.java @@ -28,6 +28,7 @@ import org.openkilda.model.PathComputationStrategy; import org.openkilda.model.Switch; import org.openkilda.model.SwitchId; +import org.openkilda.server42.control.messaging.flowrtt.ActivateFlowMonitoringInfoData; import org.openkilda.wfm.topology.flowhs.model.DetectConnectedDevices; import org.openkilda.wfm.topology.flowhs.model.RequestedFlow; @@ -298,4 +299,36 @@ public void mapSwapDtoToRequesterFlowTest() { assertEquals(DST_VLAN, requestedFlow.getDestVlan()); assertEquals(DST_INNER_VLAN, requestedFlow.getDestInnerVlan()); } + + @Test + public void requestedFlowToActivateFlowMonitoringInfoData() { + RequestedFlow requestedFlow = RequestedFlow.builder() + .flowId(FLOW_ID) + .srcSwitch(SRC_SWITCH_ID) + .srcPort(SRC_PORT) + .srcVlan(SRC_VLAN) + .srcInnerVlan(SRC_INNER_VLAN) + .destSwitch(DST_SWITCH_ID) + .destPort(DST_PORT) + .destVlan(DST_VLAN) + .destInnerVlan(DST_INNER_VLAN) + .haFlowId("ha_flow_id") + .build(); + + ActivateFlowMonitoringInfoData infoData = RequestedFlowMapper.INSTANCE + .toActivateFlowMonitoringInfoData(requestedFlow); + + assertEquals(FLOW_ID, infoData.getId()); + assertEquals(SRC_SWITCH_ID, infoData.getSource().getDatapath()); + assertEquals(SRC_PORT, (int) infoData.getSource().getPortNumber()); + assertEquals(SRC_VLAN, (int) infoData.getSource().getVlanId()); + assertEquals(SRC_INNER_VLAN, (int) infoData.getSource().getInnerVlanId()); + + assertEquals(DST_SWITCH_ID, infoData.getDestination().getDatapath()); + assertEquals(DST_PORT, (int) infoData.getDestination().getPortNumber()); + assertEquals(DST_VLAN, (int) infoData.getDestination().getVlanId()); + assertEquals(DST_INNER_VLAN, (int) infoData.getDestination().getInnerVlanId()); + + assertEquals("ha_flow_id", infoData.getHaFlowId()); + } } diff --git a/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSegmentCookie.java b/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSegmentCookie.java index e997dd986a4..2d76a95757c 100644 --- a/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSegmentCookie.java +++ b/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSegmentCookie.java @@ -18,7 +18,6 @@ import org.openkilda.exception.InvalidCookieException; import org.openkilda.model.FlowPathDirection; import org.openkilda.model.bitops.BitField; -import org.openkilda.model.bitops.NumericEnumField; import com.fasterxml.jackson.annotation.JsonCreator; import com.google.common.collect.ImmutableSet; @@ -41,12 +40,13 @@ public class FlowSegmentCookie extends Cookie { static final BitField FLOW_LOOP_FLAG = new BitField(0x0008_0000_0000_0000L); static final BitField FLOW_MIRROR_FLAG = new BitField(0x0004_0000_0000_0000L); static final BitField Y_FLOW_FLAG = new BitField(0x0002_0000_0000_0000L); - static final BitField HA_FLOW_FLAG = new BitField(0x0001_0000_0000_0000L); + static final BitField HA_SUB_FLOW_SERVER_42_FLAG = new BitField(0x0001_0000_0000_0000L); // used by unit tests to check fields intersections static final BitField[] ALL_FIELDS = ArrayUtils.addAll( CookieBase.ALL_FIELDS, FLOW_FORWARD_DIRECTION_FLAG, FLOW_REVERSE_DIRECTION_FLAG, FLOW_EFFECTIVE_ID_FIELD, - FLOW_LOOP_FLAG, FLOW_MIRROR_FLAG, Y_FLOW_FLAG, HA_FLOW_FLAG, STATS_VLAN_ID_FIELD, SUB_TYPE_FIELD); + FLOW_LOOP_FLAG, FLOW_MIRROR_FLAG, Y_FLOW_FLAG, HA_SUB_FLOW_SERVER_42_FLAG, STATS_VLAN_ID_FIELD, + SUB_TYPE_FIELD); private static final Set VALID_TYPES = ImmutableSet.of( CookieType.SERVICE_OR_FLOW_SEGMENT, @@ -59,7 +59,7 @@ public FlowSegmentCookie(long value) { } public FlowSegmentCookie(FlowPathDirection direction, long flowEffectiveId) { - this(CookieType.SERVICE_OR_FLOW_SEGMENT, direction, flowEffectiveId, false, false, false, 0, null); + this(CookieType.SERVICE_OR_FLOW_SEGMENT, direction, flowEffectiveId, false, false, false, false, 0, null); } FlowSegmentCookie(CookieType type, long value) { @@ -68,8 +68,10 @@ public FlowSegmentCookie(FlowPathDirection direction, long flowEffectiveId) { @Builder private FlowSegmentCookie(CookieType type, FlowPathDirection direction, long flowEffectiveId, boolean looped, - boolean mirror, boolean yFlow, int statsVlan, FlowSubType subType) { - super(makeValue(type, direction, flowEffectiveId, looped, mirror, yFlow, statsVlan, subType), type); + boolean mirror, boolean yFlow, boolean haSubFlowServer42, int statsVlan, + FlowSubType subType) { + super(makeValue(type, direction, flowEffectiveId, looped, mirror, yFlow, haSubFlowServer42, statsVlan, subType), + type); } @Override @@ -103,6 +105,8 @@ public FlowSegmentCookieBuilder toBuilder() { .looped(isLooped()) .mirror(isMirror()) .yFlow(isYFlow()) + .haSubFlowServer42(isHaSubFlowServer42()) + .subType(getFlowSubType()) .statsVlan(getStatsVlan()); } @@ -152,6 +156,10 @@ public boolean isYFlow() { return getField(Y_FLOW_FLAG) == 1; } + public boolean isHaSubFlowServer42() { + return getField(HA_SUB_FLOW_SERVER_42_FLAG) == 1; + } + public FlowSubType getFlowSubType() { long longValue = getField(SUB_TYPE_FIELD); return resolveEnum(FlowSubType.values(), longValue).orElse(FlowSubType.INVALID); @@ -163,7 +171,8 @@ public static FlowSegmentCookieBuilder builder() { } private static long makeValue(CookieType type, FlowPathDirection direction, long flowEffectiveId, - boolean looped, boolean mirror, boolean yFlow, int statsVlan, FlowSubType subType) { + boolean looped, boolean mirror, boolean yFlow, boolean haSubFlowServer42, + int statsVlan, FlowSubType subType) { if (!VALID_TYPES.contains(type)) { throw new IllegalArgumentException(formatIllegalTypeError(type, VALID_TYPES)); } @@ -183,6 +192,9 @@ private static long makeValue(CookieType type, FlowPathDirection direction, long if (yFlow) { result = setField(result, Y_FLOW_FLAG, 1); } + if (haSubFlowServer42) { + result = setField(result, HA_SUB_FLOW_SERVER_42_FLAG, 1); + } if (subType != null) { result = setField(result, SUB_TYPE_FIELD, subType.getValue()); } @@ -215,29 +227,6 @@ protected static long makeValueDirection(FlowPathDirection direction) { return setField(value, FLOW_REVERSE_DIRECTION_FLAG, reverse); } - // 2 bit long type field - public enum FlowSubType implements NumericEnumField { - SHARED(0x00), - HA_SUB_FLOW_1(0x01), - HA_SUB_FLOW_2(0x02), - - // This do not consume any value from allowed address space - you can define another field with -1 value. - // (must be last entry) - INVALID(-1); - - private final int value; - - FlowSubType(int value) { - this.value = value; - } - - public int getValue() { - return value; - } - - public static FlowSubType[] HA_SUB_FLOW_TYPES = new FlowSubType[] {HA_SUB_FLOW_1, HA_SUB_FLOW_2}; - } - /** * Need to declare builder inheritance, to be able to override {@code toBuilder()} method. */ diff --git a/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSharedSegmentCookie.java b/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSharedSegmentCookie.java index 8393d860354..39d11d4118c 100644 --- a/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSharedSegmentCookie.java +++ b/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSharedSegmentCookie.java @@ -29,11 +29,13 @@ public class FlowSharedSegmentCookie extends CookieBase { static final BitField SHARED_TYPE_FIELD = new BitField(0x000F_0000_0000_0000L); static final BitField PORT_NUMBER_FIELD = new BitField(0x0000_0000_0000_FFFFL); static final BitField VLAN_ID_FIELD = new BitField(0x0000_0000_0FFF_0000L); + static final BitField SUB_TYPE_FIELD = new BitField(0x0000_000F_0000_0000L); + // used by unit tests to check fields intersections static final BitField[] ALL_FIELDS = ArrayUtils.addAll( CookieBase.ALL_FIELDS, - SHARED_TYPE_FIELD, PORT_NUMBER_FIELD, VLAN_ID_FIELD); + SHARED_TYPE_FIELD, PORT_NUMBER_FIELD, VLAN_ID_FIELD, SUB_TYPE_FIELD); private static final CookieType VALID_TYPE = CookieType.SHARED_OF_FLOW; @@ -43,8 +45,9 @@ public FlowSharedSegmentCookie(long value) { } @Builder - protected FlowSharedSegmentCookie(CookieType type, SharedSegmentType segmentType, int portNumber, int vlanId) { - super(makeValue(type, segmentType, portNumber, vlanId), type); + protected FlowSharedSegmentCookie(CookieType type, SharedSegmentType segmentType, int portNumber, int vlanId, + FlowSubType subType) { + super(makeValue(type, segmentType, portNumber, vlanId, subType), type); } /** @@ -71,6 +74,11 @@ public int getVlanId() { return (int) getField(VLAN_ID_FIELD); } + public FlowSubType getFlowSubType() { + long longValue = getField(SUB_TYPE_FIELD); + return resolveEnum(FlowSubType.values(), longValue).orElse(FlowSubType.INVALID); + } + /** * Make builder. */ @@ -80,14 +88,19 @@ public static FlowSharedSegmentCookieBuilder builder(SharedSegmentType segmentTy .segmentType(segmentType); } - private static long makeValue(CookieType type, SharedSegmentType segmentType, int portNumber, int vlanId) { + private static long makeValue(CookieType type, SharedSegmentType segmentType, int portNumber, int vlanId, + FlowSubType subType) { if (type != VALID_TYPE) { throw new IllegalArgumentException(formatIllegalTypeError(type, VALID_TYPE)); } - long value = setField(0, SHARED_TYPE_FIELD, segmentType.getValue()); - value = setField(value, PORT_NUMBER_FIELD, portNumber); - return setField(value, VLAN_ID_FIELD, vlanId); + long result = setField(0, SHARED_TYPE_FIELD, segmentType.getValue()); + result = setField(result, PORT_NUMBER_FIELD, portNumber); + result = setField(result, VLAN_ID_FIELD, vlanId); + if (subType != null) { + result = setField(result, SUB_TYPE_FIELD, subType.getValue()); + } + return result; } public enum SharedSegmentType implements NumericEnumField { diff --git a/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSubType.java b/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSubType.java new file mode 100644 index 00000000000..947f7d47df8 --- /dev/null +++ b/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/FlowSubType.java @@ -0,0 +1,41 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.model.cookie; + +import org.openkilda.model.bitops.NumericEnumField; + +import lombok.Getter; + +@Getter +public enum FlowSubType implements NumericEnumField { + // 2 bit long type field + + SHARED(0x00), + HA_SUB_FLOW_1(0x01), + HA_SUB_FLOW_2(0x02), + + // This do not consume any value from allowed address space - you can define another field with -1 value. + // (must be last entry) + INVALID(-1); + + private final int value; + + FlowSubType(int value) { + this.value = value; + } + + public static final FlowSubType[] HA_SUB_FLOW_TYPES = new FlowSubType[] {HA_SUB_FLOW_1, HA_SUB_FLOW_2}; +} diff --git a/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/PortColourCookie.java b/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/PortColourCookie.java index 0ce2b32769f..68a617b6e57 100644 --- a/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/PortColourCookie.java +++ b/src-java/kilda-model/src/main/java/org/openkilda/model/cookie/PortColourCookie.java @@ -39,30 +39,41 @@ public class PortColourCookie extends CookieBase implements Comparable 0x9FF0_0000_0000_0000L - static final BitField PORT_FIELD = new BitField(0x0000_0000_FFFF_FFFFL); + static final BitField PORT_FIELD = new BitField(0x0000_0000_FFFF_FFFFL); + static final BitField SUB_TYPE_FIELD = new BitField(0x0000_000F_0000_0000L); // used by unit tests to check fields intersections - static final BitField[] ALL_FIELDS = ArrayUtils.addAll(CookieBase.ALL_FIELDS, PORT_FIELD); + static final BitField[] ALL_FIELDS = ArrayUtils.addAll(CookieBase.ALL_FIELDS, PORT_FIELD, SUB_TYPE_FIELD); public PortColourCookie(long value) { super(value); } @Builder + public PortColourCookie(CookieType type, int portNumber, FlowSubType subType) { + super(makeValue(type, portNumber, subType), type); + } + public PortColourCookie(CookieType type, int portNumber) { - super(makeValue(type, portNumber), type); + this(type, portNumber, null); } public int getPortNumber() { return (int) getField(PORT_FIELD); } + public FlowSubType getFlowSubType() { + long longValue = getField(SUB_TYPE_FIELD); + return resolveEnum(FlowSubType.values(), longValue).orElse(FlowSubType.INVALID); + } + /** * Conver existing {@link PortColourCookie} instance into {@link PortColourCookieBuilder}. */ public PortColourCookieBuilder toBuilder() { return new PortColourCookieBuilder() .type(getType()) + .subType(getFlowSubType()) .portNumber(getPortNumber()); } @@ -71,12 +82,18 @@ public int compareTo(PortColourCookie other) { return cookieComparison(other); } - private static long makeValue(CookieType type, int portNumber) { + private static long makeValue(CookieType type, int portNumber, FlowSubType subType) { if (! allowedTypes.contains(type)) { throw new IllegalArgumentException(formatIllegalTypeError(type, allowedTypes)); } - long value = setField(0, SERVICE_FLAG, 1); - return setField(value, PORT_FIELD, portNumber); + long result = setField(0, SERVICE_FLAG, 1); + result = setField(result, PORT_FIELD, portNumber); + + if (subType != null) { + result = setField(result, SUB_TYPE_FIELD, subType.getValue()); + } + + return result; } } diff --git a/src-java/kilda-model/src/test/java/org/openkilda/model/cookie/FlowSegmentCookieTest.java b/src-java/kilda-model/src/test/java/org/openkilda/model/cookie/FlowSegmentCookieTest.java index 64a4c442fca..af24a1a70c5 100644 --- a/src-java/kilda-model/src/test/java/org/openkilda/model/cookie/FlowSegmentCookieTest.java +++ b/src-java/kilda-model/src/test/java/org/openkilda/model/cookie/FlowSegmentCookieTest.java @@ -16,6 +16,7 @@ package org.openkilda.model.cookie; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; import org.openkilda.model.FlowPathDirection; import org.openkilda.model.cookie.CookieBase.CookieType; @@ -53,4 +54,27 @@ public void changingOfFlowSegmentCookieTypeTest() { .build(); assertEquals(CookieType.SERVER_42_FLOW_RTT_INGRESS, server42Cookie.getType()); } + + @Test + public void changingOfFlowSegmentCookieServer42Test() { + + FlowSegmentCookie flowCookie = FlowSegmentCookie.builder() + .direction(FlowPathDirection.FORWARD) + .flowEffectiveId(10) + .subType(FlowSubType.HA_SUB_FLOW_1) + .build(); + + assertEquals(CookieType.SERVICE_OR_FLOW_SEGMENT, flowCookie.getType()); + assertEquals(FlowSubType.HA_SUB_FLOW_1, flowCookie.getFlowSubType()); + + FlowSegmentCookie server42Cookie = flowCookie.toBuilder() + .haSubFlowServer42(true) + .build(); + + assertTrue(server42Cookie.isHaSubFlowServer42()); + assertEquals(FlowSubType.HA_SUB_FLOW_1, server42Cookie.getFlowSubType()); + + + } } + diff --git a/src-java/kilda-persistence-api/src/main/java/org/openkilda/persistence/repositories/HaFlowRepository.java b/src-java/kilda-persistence-api/src/main/java/org/openkilda/persistence/repositories/HaFlowRepository.java index cbbca0642d6..09648a4d325 100644 --- a/src-java/kilda-persistence-api/src/main/java/org/openkilda/persistence/repositories/HaFlowRepository.java +++ b/src-java/kilda-persistence-api/src/main/java/org/openkilda/persistence/repositories/HaFlowRepository.java @@ -33,6 +33,8 @@ public interface HaFlowRepository extends Repository { Collection findByEndpoint(SwitchId switchId, int port, int vlan, int innerVLan); + Collection findBySharedEndpointSwitchId(SwitchId switchId); + Collection findWithPeriodicPingsEnabled(); Collection findInactive(); diff --git a/src-java/kilda-persistence-api/src/main/java/org/openkilda/persistence/repositories/HaSubFlowRepository.java b/src-java/kilda-persistence-api/src/main/java/org/openkilda/persistence/repositories/HaSubFlowRepository.java index 73095b4a1fb..b9340de5e41 100644 --- a/src-java/kilda-persistence-api/src/main/java/org/openkilda/persistence/repositories/HaSubFlowRepository.java +++ b/src-java/kilda-persistence-api/src/main/java/org/openkilda/persistence/repositories/HaSubFlowRepository.java @@ -16,6 +16,7 @@ package org.openkilda.persistence.repositories; import org.openkilda.model.HaSubFlow; +import org.openkilda.model.SwitchId; import java.util.Collection; import java.util.Optional; @@ -26,4 +27,6 @@ public interface HaSubFlowRepository extends Repository { boolean exists(String haSubFlowId); Optional findById(String haSubFlowId); + + Collection findByEndpointSwitchId(SwitchId switchId); } diff --git a/src-java/kilda-persistence-tinkerpop/src/main/java/org/openkilda/persistence/ferma/repositories/FermaHaFlowRepository.java b/src-java/kilda-persistence-tinkerpop/src/main/java/org/openkilda/persistence/ferma/repositories/FermaHaFlowRepository.java index 0c5fa66a2aa..913805e759f 100644 --- a/src-java/kilda-persistence-tinkerpop/src/main/java/org/openkilda/persistence/ferma/repositories/FermaHaFlowRepository.java +++ b/src-java/kilda-persistence-tinkerpop/src/main/java/org/openkilda/persistence/ferma/repositories/FermaHaFlowRepository.java @@ -126,6 +126,17 @@ public Collection findByEndpoint(SwitchId switchId, int port, int vlan, return result.values(); } + @Override + public Collection findBySharedEndpointSwitchId(SwitchId switchId) { + return framedGraph().traverse(g -> g.V() + .hasLabel(HaFlowFrame.FRAME_LABEL) + .has(HaFlowFrame.SHARED_ENDPOINT_SWITCH_ID_PROPERTY, + SwitchIdConverter.INSTANCE.toGraphProperty(switchId))) + .toListExplicit(HaFlowFrame.class).stream() + .map(HaFlow::new) + .collect(Collectors.toList()); + } + @Override public Collection findInactive() { String downStatus = FlowStatusConverter.INSTANCE.toGraphProperty(FlowStatus.DOWN); diff --git a/src-java/kilda-persistence-tinkerpop/src/main/java/org/openkilda/persistence/ferma/repositories/FermaHaSubFlowRepository.java b/src-java/kilda-persistence-tinkerpop/src/main/java/org/openkilda/persistence/ferma/repositories/FermaHaSubFlowRepository.java index 6d5631ac0a8..9b5a1b54cf1 100644 --- a/src-java/kilda-persistence-tinkerpop/src/main/java/org/openkilda/persistence/ferma/repositories/FermaHaSubFlowRepository.java +++ b/src-java/kilda-persistence-tinkerpop/src/main/java/org/openkilda/persistence/ferma/repositories/FermaHaSubFlowRepository.java @@ -17,10 +17,12 @@ import org.openkilda.model.HaSubFlow; import org.openkilda.model.HaSubFlow.HaSubFlowData; +import org.openkilda.model.SwitchId; import org.openkilda.persistence.exceptions.PersistenceException; import org.openkilda.persistence.ferma.FermaPersistentImplementation; import org.openkilda.persistence.ferma.frames.HaSubFlowFrame; import org.openkilda.persistence.ferma.frames.KildaBaseVertexFrame; +import org.openkilda.persistence.ferma.frames.converters.SwitchIdConverter; import org.openkilda.persistence.repositories.FlowStatsRepository; import org.openkilda.persistence.repositories.HaSubFlowRepository; @@ -69,6 +71,17 @@ public Optional findById(String haSubFlowId) { return HaSubFlowFrame.load(framedGraph(), haSubFlowId).map(HaSubFlow::new); } + @Override + public Collection findByEndpointSwitchId(SwitchId switchId) { + return framedGraph().traverse(g -> g.V() + .hasLabel(HaSubFlowFrame.FRAME_LABEL) + .has(HaSubFlowFrame.ENDPOINT_SWITCH_ID_PROPERTY, + SwitchIdConverter.INSTANCE.toGraphProperty(switchId))) + .toListExplicit(HaSubFlowFrame.class).stream() + .map(HaSubFlow::new) + .collect(Collectors.toList()); + } + @Override protected HaSubFlowFrame doAdd(HaSubFlowData data) { HaSubFlowFrame frame = KildaBaseVertexFrame.addNewFramedVertex(framedGraph(), HaSubFlowFrame.FRAME_LABEL, diff --git a/src-java/kilda-persistence-tinkerpop/src/test/java/org/openkilda/persistence/ferma/repositories/FermaHaSubFlowRepositoryTest.java b/src-java/kilda-persistence-tinkerpop/src/test/java/org/openkilda/persistence/ferma/repositories/FermaHaSubFlowRepositoryTest.java index 0f1649fb991..8b8a2283b8b 100644 --- a/src-java/kilda-persistence-tinkerpop/src/test/java/org/openkilda/persistence/ferma/repositories/FermaHaSubFlowRepositoryTest.java +++ b/src-java/kilda-persistence-tinkerpop/src/test/java/org/openkilda/persistence/ferma/repositories/FermaHaSubFlowRepositoryTest.java @@ -137,6 +137,20 @@ public void haSubFlowsExistTest() { assertFalse(haSubFlowRepository.exists(SUB_FLOW_ID_3)); } + @Test + public void findByEndpointSwitchId() { + HaSubFlow sub1 = createSubFlow(SUB_FLOW_ID_1, switch1, PORT_1, VLAN_1, INNER_VLAN_1, DESCRIPTION_1); + HaSubFlow sub2 = createSubFlow(SUB_FLOW_ID_2, switch2, PORT_2, VLAN_2, INNER_VLAN_2, DESCRIPTION_2); + haFlow.setHaSubFlows(Sets.newHashSet(sub1, sub2)); + + Collection subFlows = haSubFlowRepository.findByEndpointSwitchId(SWITCH_ID_1); + + assertEquals(1, subFlows.size()); + assertSubFlow(SUB_FLOW_ID_1, SWITCH_ID_1, PORT_1, VLAN_1, INNER_VLAN_1, haFlow, subFlows.iterator().next(), + DESCRIPTION_1); + } + + private void assertSubFlow( String subFlowId, SwitchId switchId, int port, int vlan, int innerVLan, HaFlow haFlow, HaSubFlow actualSubFlow, String description) { diff --git a/src-java/reroute-topology/reroute-storm-topology/src/test/java/org/openkilda/wfm/topology/reroute/service/RerouteServiceTest.java b/src-java/reroute-topology/reroute-storm-topology/src/test/java/org/openkilda/wfm/topology/reroute/service/RerouteServiceTest.java index 3e10d04addd..b6be20d348c 100644 --- a/src-java/reroute-topology/reroute-storm-topology/src/test/java/org/openkilda/wfm/topology/reroute/service/RerouteServiceTest.java +++ b/src-java/reroute-topology/reroute-storm-topology/src/test/java/org/openkilda/wfm/topology/reroute/service/RerouteServiceTest.java @@ -59,7 +59,7 @@ import org.openkilda.model.YFlow.SharedEndpoint; import org.openkilda.model.YSubFlow; import org.openkilda.model.cookie.FlowSegmentCookie; -import org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.persistence.PersistenceManager; import org.openkilda.persistence.exceptions.EntityNotFoundException; import org.openkilda.persistence.repositories.FlowPathRepository; diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/Constants.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/Constants.java index a4a0485a239..c39ef155d6b 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/Constants.java +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/Constants.java @@ -104,6 +104,7 @@ public static final class Priority { public static final int SERVER_42_FLOW_RTT_VXLAN_TURNING_PRIORITY = VERIFICATION_RULE_VXLAN_PRIORITY + 1; public static final int SERVER_42_FLOW_RTT_OUTPUT_VLAN_PRIORITY = DISCOVERY_RULE_PRIORITY; public static final int SERVER_42_FLOW_RTT_OUTPUT_VXLAN_PRIORITY = DISCOVERY_RULE_PRIORITY; + public static final int SERVER_42_FLOW_RTT_INPUT_TRANSIT_HA_PRIORITY = SERVER_42_FLOW_RTT_INPUT_PRIORITY - 1; public static final int SERVER_42_ISL_RTT_INPUT_PRIORITY = DISCOVERY_RULE_PRIORITY; public static final int SERVER_42_ISL_RTT_TURNING_PRIORITY = DISCOVERY_RULE_PRIORITY; diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/RuleManagerImpl.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/RuleManagerImpl.java index 34ee36e70cd..78bc3badeb3 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/RuleManagerImpl.java +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/RuleManagerImpl.java @@ -733,17 +733,25 @@ private List buildForwardYPointEgressOrTransitHaRules( int yPointInPort = getShortestSubPath(subPaths).getSegments().get(lastCommonSegmentId).getDestPort(); HaFlow haFlow = adapter.getHaFlow(haPath.getHaPathId()); - RuleGenerator generator; - if (subPaths.get(0).getDestSwitchId().equals(subPaths.get(1).getDestSwitchId()) - && subPaths.get(0).getDestSwitchId().equals(haPath.getYPointSwitchId())) { - generator = flowRulesFactory.getYPointForwardEgressHaRuleGenerator( - haFlow, haPath, subPaths, encapsulation, yPointInPort); + List generators = new ArrayList<>(); + if (bothEndpointAreTheSameAndAreYPoint(haPath)) { + generators.add(flowRulesFactory.getYPointForwardEgressHaRuleGenerator( + haFlow, haPath, subPaths, encapsulation, yPointInPort)); } else { Map outPorts = getHaYPointOutPorts(lastCommonSegmentId, subPaths); - generator = flowRulesFactory.getYPointForwardTransitHaRuleGenerator( - haFlow, haPath, subPaths, encapsulation, yPointInPort, outPorts); + generators.add(flowRulesFactory.getYPointForwardTransitHaRuleGenerator( + haFlow, haPath, subPaths, encapsulation, yPointInPort, outPorts)); + generators.add(flowRulesFactory.getServer42FlowRttHaFlowTransitRuleGenerator( + subPaths, encapsulation, yPointInPort, outPorts)); } - return generateCommands(generator, haPath.getYPointSwitchId(), ignoreUnknownSwitches, adapter); + + return generateCommands(generators, haPath.getYPointSwitchId(), ignoreUnknownSwitches, adapter); + } + + private boolean bothEndpointAreTheSameAndAreYPoint(HaFlowPath haPath) { + List subPaths = haPath.getSubPaths(); + return subPaths.get(0).getDestSwitchId().equals(subPaths.get(1).getDestSwitchId()) + && subPaths.get(0).getDestSwitchId().equals(haPath.getYPointSwitchId()); } private List buildForwardIngressHaRules( @@ -756,9 +764,8 @@ private List buildForwardIngressHaRules( } if (haPath.getSharedSwitchId().equals(haPath.getYPointSwitchId())) { - RuleGenerator generator = flowRulesFactory.getYPointForwardIngressHaRuleGenerator( - haFlow, haPath, haPath.getSubPaths(), encapsulation, overlappingAdapters); - return generateCommands(generator, haPath.getSharedSwitchId(), ignoreUnknownSwitches, adapter); + return buildHaIngressRulesYPointEqualsSharedPoint( + haFlow, haPath, encapsulation, overlappingAdapters, ignoreUnknownSwitches, adapter); } else { FlowPath randomSubPath = haPath.getSubPaths().get(0); @@ -774,7 +781,30 @@ private List buildHaIngressRules( RuleGenerator generator = flowRulesFactory.getIngressHaRuleGenerator( haFlow, subPath, meterId, encapsulation, sharedPath, overlappingAdapters, externalMeterCommandUuid, generateMeterCommand); - return generateCommands(generator, subPath.getSrcSwitchId(), ignoreUnknownSwitches, adapter); + List commands = generateCommands(generator, subPath.getSrcSwitchId(), ignoreUnknownSwitches, + adapter); + + RuleGenerator server42Generator = flowRulesFactory.getServer42IngressHaRuleGenerator(subPath, haFlow, + encapsulation, adapter.getSwitchProperties(subPath.getSrcSwitchId()), overlappingAdapters); + + commands.addAll(generateCommands(server42Generator, subPath.getSrcSwitchId(), ignoreUnknownSwitches, adapter)); + return commands; + } + + private List buildHaIngressRulesYPointEqualsSharedPoint( + HaFlow haFlow, HaFlowPath haPath, FlowTransitEncapsulation encapsulation, + Set overlappingAdapters, boolean ignoreUnknownSwitches, DataAdapter adapter) { + + RuleGenerator generator = flowRulesFactory.getYPointForwardIngressHaRuleGenerator( + haFlow, haPath, haPath.getSubPaths(), encapsulation, overlappingAdapters); + + RuleGenerator server42Generator = flowRulesFactory.getSharedYServer42IngressHaRuleGenerator( + haPath.getSubPaths().get(0), haFlow, encapsulation, + adapter.getSwitchProperties(haPath.getSharedSwitchId()), overlappingAdapters); + + + return generateCommands(Lists.newArrayList(generator, server42Generator), haPath.getSharedSwitchId(), + ignoreUnknownSwitches, adapter); } private List buildHaEgressRules( @@ -1021,6 +1051,14 @@ private SwitchPathSegments findPathSegmentsForSwitch(SwitchId switchId, List generateCommands( + List generators, SwitchId switchId, boolean ignoreUnknownSwitch, DataAdapter adapter) { + return generators.stream() + .map(generator -> generateCommands(generator, switchId, ignoreUnknownSwitch, adapter)) + .flatMap(Collection::stream) + .collect(Collectors.toList()); + } + private List generateCommands( RuleGenerator generator, SwitchId switchId, boolean ignoreUnknownSwitch, DataAdapter adapter) { Switch sw = adapter.getSwitch(switchId); diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/FlowRulesGeneratorFactory.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/FlowRulesGeneratorFactory.java index 0b7c25285f8..b167c9eb51a 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/FlowRulesGeneratorFactory.java +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/FlowRulesGeneratorFactory.java @@ -40,10 +40,12 @@ import org.openkilda.rulemanager.factory.generator.flow.VlanStatsRuleGenerator; import org.openkilda.rulemanager.factory.generator.flow.haflow.EgressHaRuleGenerator; import org.openkilda.rulemanager.factory.generator.flow.haflow.IngressHaRuleGenerator; +import org.openkilda.rulemanager.factory.generator.flow.haflow.SharedYServer42IngressForwardHaRuleGenerator; import org.openkilda.rulemanager.factory.generator.flow.haflow.TransitHaRuleGenerator; import org.openkilda.rulemanager.factory.generator.flow.haflow.YPointForwardEgressHaRuleGenerator; import org.openkilda.rulemanager.factory.generator.flow.haflow.YPointForwardIngressHaRuleGenerator; import org.openkilda.rulemanager.factory.generator.flow.haflow.YPointForwardTransitHaRuleGenerator; +import org.openkilda.rulemanager.factory.generator.flow.haflow.YPointServer42HaFlowRttTransitRuleGenerator; import org.openkilda.rulemanager.factory.generator.flow.loop.FlowLoopIngressRuleGenerator; import org.openkilda.rulemanager.factory.generator.flow.loop.FlowLoopTransitRuleGenerator; import org.openkilda.rulemanager.factory.generator.flow.mirror.EgressMirrorRuleGenerator; @@ -96,6 +98,39 @@ public RuleGenerator getServer42IngressRuleGenerator( .build(); } + /** + * Get shared Y point server42 ingress rule generator for HA flow. + */ + public RuleGenerator getSharedYServer42IngressHaRuleGenerator( + FlowPath flowPath, HaFlow haFlow, FlowTransitEncapsulation encapsulation, + SwitchProperties switchProperties, Set overlappingIngressAdapters) { + return SharedYServer42IngressForwardHaRuleGenerator.builder() + .config(config) + .flowPath(flowPath) + .haFlow(haFlow) + .encapsulation(encapsulation) + .overlappingIngressAdapters(overlappingIngressAdapters) + .switchProperties(switchProperties) + .build(); + } + + /** + * Get ingress Server42 rule generator for HA flow. + */ + public RuleGenerator getServer42IngressHaRuleGenerator( + FlowPath flowPath, HaFlow haFlow, FlowTransitEncapsulation encapsulation, + SwitchProperties switchProperties, Set overlappingIngressAdapters) { + + return Server42IngressRuleGenerator.builder() + .config(config) + .flowPath(flowPath) + .haFlow(haFlow) + .encapsulation(encapsulation) + .overlappingIngressAdapters(overlappingIngressAdapters) + .switchProperties(switchProperties) + .build(); + } + /** * Get vlan stats rule generator. */ @@ -347,6 +382,20 @@ public RuleGenerator getYPointForwardEgressHaRuleGenerator( .build(); } + /** + * Get server42 flow rtt ha flow transit rule generator. + */ + public RuleGenerator getServer42FlowRttHaFlowTransitRuleGenerator( + List subPaths, FlowTransitEncapsulation encapsulation, int inPort, + Map outPorts) { + return YPointServer42HaFlowRttTransitRuleGenerator.builder() + .subPaths(subPaths) + .encapsulation(encapsulation) + .inPort(inPort) + .outPorts(outPorts) + .build(); + } + /** * Get ingress ha rule generator. */ diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/Server42IngressRuleGenerator.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/Server42IngressRuleGenerator.java index da75ca54647..d2de9db9835 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/Server42IngressRuleGenerator.java +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/Server42IngressRuleGenerator.java @@ -26,7 +26,6 @@ import static org.openkilda.rulemanager.Constants.Priority.SERVER_42_PRE_INGRESS_FLOW_PRIORITY; import static org.openkilda.rulemanager.Constants.SERVER_42_FLOW_RTT_FORWARD_UDP_PORT; import static org.openkilda.rulemanager.utils.Utils.buildPushVxlan; -import static org.openkilda.rulemanager.utils.Utils.getOutPort; import static org.openkilda.rulemanager.utils.Utils.makeVlanReplaceActions; import static org.openkilda.rulemanager.utils.Utils.mapMetadata; @@ -35,12 +34,14 @@ import org.openkilda.model.FlowEndpoint; import org.openkilda.model.FlowPath; import org.openkilda.model.FlowTransitEncapsulation; +import org.openkilda.model.HaFlow; import org.openkilda.model.Switch; import org.openkilda.model.SwitchFeature; import org.openkilda.model.SwitchId; import org.openkilda.model.SwitchProperties; import org.openkilda.model.cookie.CookieBase.CookieType; import org.openkilda.model.cookie.FlowSegmentCookie; +import org.openkilda.model.cookie.FlowSegmentCookie.FlowSegmentCookieBuilder; import org.openkilda.model.cookie.FlowSharedSegmentCookie; import org.openkilda.model.cookie.FlowSharedSegmentCookie.SharedSegmentType; import org.openkilda.model.cookie.PortColourCookie; @@ -55,6 +56,7 @@ import org.openkilda.rulemanager.OfVersion; import org.openkilda.rulemanager.ProtoConstants.EthType; import org.openkilda.rulemanager.ProtoConstants.IpProto; +import org.openkilda.rulemanager.ProtoConstants.PortNumber; import org.openkilda.rulemanager.RuleManagerConfig; import org.openkilda.rulemanager.SpeakerData; import org.openkilda.rulemanager.action.Action; @@ -66,6 +68,8 @@ import org.openkilda.rulemanager.factory.RuleGenerator; import org.openkilda.rulemanager.match.FieldMatch; import org.openkilda.rulemanager.utils.RoutingMetadata; +import org.openkilda.rulemanager.utils.RoutingMetadata.HaSubFlowType; +import org.openkilda.rulemanager.utils.RoutingMetadata.RoutingMetadataBuilder; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Lists; @@ -93,17 +97,23 @@ public class Server42IngressRuleGenerator implements RuleGenerator { protected final Flow flow; protected final FlowTransitEncapsulation encapsulation; protected final SwitchProperties switchProperties; + protected final HaFlow haFlow; @Override public List generateCommands(Switch sw) { List result = new ArrayList<>(); - if (switchProperties == null || !switchProperties.isServer42FlowRtt() - || flowPath.isOneSwitchPath()) { + if (switchProperties == null || !switchProperties.isServer42FlowRtt()) { return result; } + if (flowPath != null && flowPath.isOneSwitchPath()) { + return result; + } + if (flow == null && haFlow == null) { + throw new IllegalArgumentException("Flow and HaFlow are null"); + } FlowEndpoint ingressEndpoint = getIngressEndpoint(sw.getSwitchId()); - result.add(buildServer42IngressCommand(sw, ingressEndpoint)); + result.add(buildServer42IngressCommand(sw, ingressEndpoint, flowPath, null)); if (needToBuildServer42PreIngressRule(ingressEndpoint)) { result.add(buildServer42PreIngressCommand(sw, ingressEndpoint)); } @@ -114,7 +124,7 @@ public List generateCommands(Switch sw) { } @VisibleForTesting - boolean needToBuildServer42PreIngressRule(FlowEndpoint ingressEndpoint) { + protected boolean needToBuildServer42PreIngressRule(FlowEndpoint ingressEndpoint) { if (!isVlanIdSet(ingressEndpoint.getOuterVlanId())) { // Full port flows do not need pre ingress shared rule return false; @@ -130,7 +140,7 @@ boolean needToBuildServer42PreIngressRule(FlowEndpoint ingressEndpoint) { } @VisibleForTesting - boolean needToBuildServer42InputRule(FlowEndpoint ingressEndpoint) { + protected boolean needToBuildServer42InputRule(FlowEndpoint ingressEndpoint) { for (FlowSideAdapter overlappingIngressAdapter : overlappingIngressAdapters) { if (overlappingIngressAdapter.getEndpoint().getPortNumber().equals(ingressEndpoint.getPortNumber()) && !overlappingIngressAdapter.isOneSwitchFlow()) { @@ -141,7 +151,7 @@ boolean needToBuildServer42InputRule(FlowEndpoint ingressEndpoint) { return true; } - private FlowSpeakerData buildServer42PreIngressCommand(Switch sw, FlowEndpoint endpoint) { + protected FlowSpeakerData buildServer42PreIngressCommand(Switch sw, FlowEndpoint endpoint) { FlowSharedSegmentCookie cookie = FlowSharedSegmentCookie.builder(SharedSegmentType.SERVER42_QINQ_OUTER_VLAN) .portNumber(switchProperties.getServer42Port()) .vlanId(endpoint.getOuterVlanId()) @@ -169,52 +179,60 @@ private FlowSpeakerData buildServer42PreIngressCommand(Switch sw, FlowEndpoint e return builder.build(); } - private FlowSpeakerData buildServer42IngressDoubleVlanCommand(Switch sw, FlowEndpoint ingressEndpoint) { - RoutingMetadata metadata = RoutingMetadata.builder() - .inputPort(ingressEndpoint.getPortNumber()) + private FlowSpeakerData buildServer42IngressDoubleVlanCommand(Switch sw, FlowEndpoint ingressEndpoint, + FlowPath flowPath, + HaSubFlowType haSubFlowType) { + RoutingMetadata metadata = getRoutingMetadataBuilderBase(ingressEndpoint, haSubFlowType) .outerVlanId(ingressEndpoint.getOuterVlanId()) .build(sw.getFeatures()); + Set match = Sets.newHashSet( FieldMatch.builder().field(Field.IN_PORT).value(switchProperties.getServer42Port()).build(), FieldMatch.builder().field(Field.METADATA).value(metadata.getValue()).mask(metadata.getMask()).build(), FieldMatch.builder().field(Field.VLAN_VID).value(ingressEndpoint.getInnerVlanId()).build()); - return buildServer42IngressCommand(sw, ingressEndpoint, match, SERVER_42_INGRESS_DOUBLE_VLAN_FLOW_PRIORITY); + return buildServer42IngressCommand(sw, ingressEndpoint, match, + SERVER_42_INGRESS_DOUBLE_VLAN_FLOW_PRIORITY, flowPath); } - private FlowSpeakerData buildServer42IngressSingleVlanCommand(Switch sw, FlowEndpoint ingressEndpoint) { - RoutingMetadata metadata = RoutingMetadata.builder() - .inputPort(ingressEndpoint.getPortNumber()) + private FlowSpeakerData buildServer42IngressSingleVlanCommand(Switch sw, FlowEndpoint ingressEndpoint, + FlowPath flowPath, + HaSubFlowType haSubFlowType) { + RoutingMetadata metadata = getRoutingMetadataBuilderBase(ingressEndpoint, haSubFlowType) .outerVlanId(ingressEndpoint.getOuterVlanId()) .build(sw.getFeatures()); Set match = Sets.newHashSet( FieldMatch.builder().field(Field.IN_PORT).value(switchProperties.getServer42Port()).build(), FieldMatch.builder().field(Field.METADATA).value(metadata.getValue()).mask(metadata.getMask()).build()); - return buildServer42IngressCommand(sw, ingressEndpoint, match, SERVER_42_INGRESS_SINGLE_VLAN_FLOW_PRIORITY); + return buildServer42IngressCommand(sw, ingressEndpoint, match, + SERVER_42_INGRESS_SINGLE_VLAN_FLOW_PRIORITY, flowPath); } - private FlowSpeakerData buildServer42IngressFullPortCommand(Switch sw, FlowEndpoint ingressEndpoint) { - RoutingMetadata metadata = RoutingMetadata.builder() - .inputPort(ingressEndpoint.getPortNumber()) + private FlowSpeakerData buildServer42IngressFullPortCommand(Switch sw, FlowEndpoint ingressEndpoint, + FlowPath flowPath, + HaSubFlowType haSubFlowType) { + RoutingMetadata metadata = getRoutingMetadataBuilderBase(ingressEndpoint, haSubFlowType) .build(sw.getFeatures()); + Set match = Sets.newHashSet( FieldMatch.builder().field(Field.IN_PORT).value(switchProperties.getServer42Port()).build(), FieldMatch.builder().field(Field.METADATA).value(metadata.getValue()).mask(metadata.getMask()).build()); - return buildServer42IngressCommand(sw, ingressEndpoint, match, SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY); + return buildServer42IngressCommand(sw, ingressEndpoint, match, + SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY, flowPath); } - private FlowSpeakerData buildServer42IngressCommand( - Switch sw, FlowEndpoint ingressEndpoint, Set match, int priority) { - FlowSegmentCookie cookie = new FlowSegmentCookie(flowPath.getCookie().getValue()).toBuilder() - .type(CookieType.SERVER_42_FLOW_RTT_INGRESS) - .build(); + private FlowSpeakerData buildServer42IngressCommand(Switch sw, FlowEndpoint ingressEndpoint, Set match, + int priority, FlowPath flowPath) { + FlowSegmentCookieBuilder cookieBuilder = new FlowSegmentCookie(flowPath.getCookie().getValue()).toBuilder() + .type(CookieType.SERVER_42_FLOW_RTT_INGRESS); + FlowSpeakerDataBuilder builder = FlowSpeakerData.builder() .switchId(ingressEndpoint.getSwitchId()) .ofVersion(OfVersion.of(sw.getOfVersion())) - .cookie(cookie) + .cookie(cookieBuilder.build()) .table(OfTable.INGRESS) .priority(priority) .match(match) - .instructions(buildIngressInstructions(sw, ingressEndpoint.getInnerVlanId())); + .instructions(buildIngressInstructions(sw, ingressEndpoint.getInnerVlanId(), flowPath)); if (sw.getFeatures().contains(SwitchFeature.RESET_COUNTS_FLAG)) { builder.flags(Sets.newHashSet(OfFlowFlag.RESET_COUNTERS)); @@ -222,34 +240,33 @@ private FlowSpeakerData buildServer42IngressCommand( return builder.build(); } - private FlowSpeakerData buildServer42IngressCommand(Switch sw, FlowEndpoint ingressEndpoint) { + protected FlowSpeakerData buildServer42IngressCommand(Switch sw, FlowEndpoint ingressEndpoint, FlowPath flowPath, + HaSubFlowType haSubFlowType) { if (FlowEndpoint.isVlanIdSet(ingressEndpoint.getInnerVlanId())) { - return buildServer42IngressDoubleVlanCommand(sw, ingressEndpoint); + return buildServer42IngressDoubleVlanCommand(sw, ingressEndpoint, flowPath, haSubFlowType); } else if (FlowEndpoint.isVlanIdSet(ingressEndpoint.getOuterVlanId())) { - return buildServer42IngressSingleVlanCommand(sw, ingressEndpoint); + return buildServer42IngressSingleVlanCommand(sw, ingressEndpoint, flowPath, haSubFlowType); } else { - return buildServer42IngressFullPortCommand(sw, ingressEndpoint); + return buildServer42IngressFullPortCommand(sw, ingressEndpoint, flowPath, haSubFlowType); } } - private Instructions buildIngressInstructions(Switch sw, int innerVlan) { - List applyActions = new ArrayList<>(buildTransformActions(innerVlan, sw.getFeatures())); - applyActions.add(new PortOutAction(getOutPort(flowPath, flow))); + protected Instructions buildIngressInstructions(Switch sw, int vlan, FlowPath flowPath) { + List applyActions = new ArrayList<>(buildTransformActions(vlan, sw.getFeatures(), flowPath)); + applyActions.add(new PortOutAction(getOutPort(flowPath))); return Instructions.builder() .applyActions(applyActions) .build(); } @VisibleForTesting - List buildTransformActions(int innerVlan, Set features) { + List buildTransformActions(int innerVlan, Set features, FlowPath flowPath) { List actions = new ArrayList<>(); List currentStack = makeVlanStack(innerVlan); switch (encapsulation.getType()) { case TRANSIT_VLAN: actions.add(SetFieldAction.builder().field(Field.ETH_SRC) .value(flowPath.getSrcSwitchId().toMacAddressAsLong()).build()); - actions.add(SetFieldAction.builder().field(Field.ETH_DST) - .value(flowPath.getDestSwitchId().toMacAddressAsLong()).build()); actions.addAll(makeVlanReplaceActions(currentStack, makeVlanStack(encapsulation.getId()))); break; case VXLAN: @@ -264,6 +281,12 @@ List buildTransformActions(int innerVlan, Set features) { } private FlowSpeakerData buildServer42InputCommand(Switch sw, int inPort) { + return buildServer42InputCommand(sw, inPort, null, null); + } + + protected FlowSpeakerData buildServer42InputCommand(Switch sw, int inPort, FlowPath flowPath, + HaSubFlowType haSubFlowType) { + int udpSrcPort = inPort + config.getServer42FlowRttUdpPortOffset(); Set match = Sets.newHashSet( FieldMatch.builder().field(Field.IN_PORT).value(switchProperties.getServer42Port()).build(), @@ -273,7 +296,6 @@ private FlowSpeakerData buildServer42InputCommand(Switch sw, int inPort) { FieldMatch.builder().field(Field.IP_PROTO).value(IpProto.UDP).build(), FieldMatch.builder().field(Field.UDP_SRC).value(udpSrcPort).build()); - PortColourCookie cookie = new PortColourCookie(CookieType.SERVER_42_FLOW_RTT_INPUT, inPort); List applyActions = Lists.newArrayList( SetFieldAction.builder().field(Field.UDP_SRC).value(SERVER_42_FLOW_RTT_FORWARD_UDP_PORT).build(), @@ -282,16 +304,30 @@ private FlowSpeakerData buildServer42InputCommand(Switch sw, int inPort) { applyActions.add(buildServer42CopyFirstTimestamp()); } + RoutingMetadataBuilder routingMetadataBuilder = RoutingMetadata.builder() + .inputPort(inPort) + .haSubFlowType(haSubFlowType); + Instructions instructions = Instructions.builder() .applyActions(applyActions) .goToTable(OfTable.PRE_INGRESS) - .writeMetadata(mapMetadata(RoutingMetadata.builder().inputPort(inPort).build(sw.getFeatures()))) + .writeMetadata(mapMetadata(routingMetadataBuilder.build(sw.getFeatures()))) .build(); + PortColourCookie.PortColourCookieBuilder cookieBuilder = PortColourCookie.builder() + .type(CookieType.SERVER_42_FLOW_RTT_INPUT) + .portNumber(inPort); + + if (flowPath != null) { + match.add(FieldMatch.builder().field(Field.ETH_DST).value(flowPath.getDestSwitchId().toMacAddressAsLong()) + .build()); + cookieBuilder.subType(flowPath.getCookie().getFlowSubType()); + } + return FlowSpeakerData.builder() .switchId(sw.getSwitchId()) .ofVersion(OfVersion.of(sw.getOfVersion())) - .cookie(cookie) + .cookie(cookieBuilder.build()) .table(OfTable.INPUT) .priority(Priority.SERVER_42_FLOW_RTT_INPUT_PRIORITY) .match(match) @@ -299,7 +335,6 @@ private FlowSpeakerData buildServer42InputCommand(Switch sw, int inPort) { .build(); } - private CopyFieldAction buildServer42CopyFirstTimestamp() { return CopyFieldAction.builder() .numberOfBits(NOVIFLOW_TIMESTAMP_SIZE_IN_BITS) @@ -310,8 +345,13 @@ private CopyFieldAction buildServer42CopyFirstTimestamp() { .build(); } - private FlowEndpoint getIngressEndpoint(SwitchId switchId) { - FlowEndpoint ingressEndpoint = FlowSideAdapter.makeIngressAdapter(flow, flowPath).getEndpoint(); + protected FlowEndpoint getIngressEndpoint(SwitchId switchId) { + FlowEndpoint ingressEndpoint; + if (haFlow != null) { + ingressEndpoint = FlowSideAdapter.makeIngressAdapter(haFlow, flowPath).getEndpoint(); + } else { + ingressEndpoint = FlowSideAdapter.makeIngressAdapter(flow, flowPath).getEndpoint(); + } if (!ingressEndpoint.getSwitchId().equals(switchId)) { throw new IllegalArgumentException(format("Path %s has ingress endpoint %s with switchId %s. But switchId " + "must be equal to target switchId %s", flowPath.getPathId(), ingressEndpoint, @@ -319,4 +359,19 @@ private FlowEndpoint getIngressEndpoint(SwitchId switchId) { } return ingressEndpoint; } + + private PortNumber getOutPort(FlowPath flowPath) { + if (flowPath.getSegments().isEmpty()) { + throw new IllegalStateException(format("Multi-switch flow path %s has no segments", flowPath.getPathId())); + } + return new PortNumber(flowPath.getSegments().get(0).getSrcPort()); + } + + private RoutingMetadataBuilder getRoutingMetadataBuilderBase(FlowEndpoint ingressEndpoint, + HaSubFlowType haSubFlowType) { + return RoutingMetadata.builder() + .inputPort(ingressEndpoint.getPortNumber()) + .haSubFlowType(haSubFlowType); + } + } diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/EgressHaRuleGenerator.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/EgressHaRuleGenerator.java index c794a173825..dfc116d8651 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/EgressHaRuleGenerator.java +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/EgressHaRuleGenerator.java @@ -31,7 +31,7 @@ import org.openkilda.model.Switch; import org.openkilda.model.SwitchId; import org.openkilda.model.cookie.FlowSegmentCookie; -import org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.rulemanager.Constants.Priority; import org.openkilda.rulemanager.FlowSpeakerData; import org.openkilda.rulemanager.Instructions; diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/IngressHaRuleGenerator.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/IngressHaRuleGenerator.java index 7df1643f492..10dd5c57fbe 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/IngressHaRuleGenerator.java +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/IngressHaRuleGenerator.java @@ -36,7 +36,7 @@ import org.openkilda.model.SwitchFeature; import org.openkilda.model.SwitchId; import org.openkilda.model.cookie.FlowSegmentCookie; -import org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.rulemanager.Constants.Priority; import org.openkilda.rulemanager.FlowSpeakerData; import org.openkilda.rulemanager.Instructions; diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/SharedYServer42IngressForwardHaRuleGenerator.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/SharedYServer42IngressForwardHaRuleGenerator.java new file mode 100644 index 00000000000..39e0d1ec955 --- /dev/null +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/SharedYServer42IngressForwardHaRuleGenerator.java @@ -0,0 +1,74 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.rulemanager.factory.generator.flow.haflow; + +import org.openkilda.model.FlowEndpoint; +import org.openkilda.model.FlowPath; +import org.openkilda.model.Switch; +import org.openkilda.model.cookie.FlowSubType; +import org.openkilda.rulemanager.SpeakerData; +import org.openkilda.rulemanager.factory.generator.flow.Server42IngressRuleGenerator; +import org.openkilda.rulemanager.utils.RoutingMetadata.HaSubFlowType; + +import com.google.common.collect.ImmutableMap; +import lombok.Getter; +import lombok.experimental.SuperBuilder; +import lombok.extern.slf4j.Slf4j; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +@SuperBuilder +@Getter +@Slf4j +public class SharedYServer42IngressForwardHaRuleGenerator extends Server42IngressRuleGenerator { + + @Override + public List generateCommands(Switch sw) { + if (switchProperties == null || !switchProperties.isServer42FlowRtt() + || flow != null || haFlow == null) { + return Collections.emptyList(); + } + + List flowPaths = haFlow.getForwardPath().getSubPaths(); + if (flowPaths.size() > 2) { + return Collections.emptyList(); + } + + Map cookieFlowSubTypeToMetadataHaSubFlowType = ImmutableMap.of( + FlowSubType.HA_SUB_FLOW_1, HaSubFlowType.HA_SUB_FLOW_1, + FlowSubType.HA_SUB_FLOW_2, HaSubFlowType.HA_SUB_FLOW_2 + ); + + FlowEndpoint ingressEndpoint = getIngressEndpoint(sw.getSwitchId()); + List result = new ArrayList<>(); + if (needToBuildServer42PreIngressRule(ingressEndpoint)) { + result.add(buildServer42PreIngressCommand(sw, ingressEndpoint)); + } + flowPaths.stream().filter(flowPath -> !flowPath.isOneSwitchPath()).forEach(flowPath -> { + HaSubFlowType haSubFlowType = + cookieFlowSubTypeToMetadataHaSubFlowType.get(flowPath.getCookie().getFlowSubType()); + result.add(buildServer42IngressCommand(sw, ingressEndpoint, flowPath, haSubFlowType)); + + if (needToBuildServer42InputRule(ingressEndpoint)) { + result.add(buildServer42InputCommand(sw, ingressEndpoint.getPortNumber(), flowPath, haSubFlowType)); + } + }); + return result; + } +} diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/TransitHaRuleGenerator.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/TransitHaRuleGenerator.java index 6615e17d8f0..03d24a5b4e6 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/TransitHaRuleGenerator.java +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/TransitHaRuleGenerator.java @@ -20,7 +20,7 @@ import org.openkilda.model.MeterId; import org.openkilda.model.Switch; import org.openkilda.model.cookie.FlowSegmentCookie; -import org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.rulemanager.Constants.Priority; import org.openkilda.rulemanager.FlowSpeakerData; import org.openkilda.rulemanager.Instructions; diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/YPointServer42HaFlowRttTransitRuleGenerator.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/YPointServer42HaFlowRttTransitRuleGenerator.java new file mode 100644 index 00000000000..5fc7f4465dd --- /dev/null +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/factory/generator/flow/haflow/YPointServer42HaFlowRttTransitRuleGenerator.java @@ -0,0 +1,110 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.rulemanager.factory.generator.flow.haflow; + +import static java.lang.String.format; +import static org.openkilda.rulemanager.Constants.Priority.SERVER_42_FLOW_RTT_INPUT_TRANSIT_HA_PRIORITY; +import static org.openkilda.rulemanager.Constants.SERVER_42_FLOW_RTT_FORWARD_UDP_PORT; +import static org.openkilda.rulemanager.Constants.SERVER_42_FLOW_RTT_REVERSE_UDP_PORT; +import static org.openkilda.rulemanager.Constants.SERVER_42_FLOW_RTT_REVERSE_UDP_VXLAN_PORT; +import static org.openkilda.rulemanager.Constants.VXLAN_UDP_DST; + +import org.openkilda.model.FlowPath; +import org.openkilda.model.FlowTransitEncapsulation; +import org.openkilda.model.PathId; +import org.openkilda.model.Switch; +import org.openkilda.rulemanager.Field; +import org.openkilda.rulemanager.FlowSpeakerData; +import org.openkilda.rulemanager.Instructions; +import org.openkilda.rulemanager.OfTable; +import org.openkilda.rulemanager.OfVersion; +import org.openkilda.rulemanager.ProtoConstants.EthType; +import org.openkilda.rulemanager.ProtoConstants.IpProto; +import org.openkilda.rulemanager.ProtoConstants.PortNumber; +import org.openkilda.rulemanager.SpeakerData; +import org.openkilda.rulemanager.action.PortOutAction; +import org.openkilda.rulemanager.factory.RuleGenerator; +import org.openkilda.rulemanager.factory.generator.flow.NotIngressRuleGenerator; +import org.openkilda.rulemanager.match.FieldMatch; + +import com.google.common.collect.Sets; +import lombok.experimental.SuperBuilder; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +@SuperBuilder +public class YPointServer42HaFlowRttTransitRuleGenerator extends NotIngressRuleGenerator implements RuleGenerator { + + private final List subPaths; + private final int inPort; + private final Map outPorts; + private final FlowTransitEncapsulation encapsulation; + + @Override + public List generateCommands(Switch sw) { + return subPaths.stream() + .map(subPath -> FlowSpeakerData.builder() + .switchId(sw.getSwitchId()) + .ofVersion(OfVersion.of(sw.getOfVersion())) + .cookie(subPath.getCookie().toBuilder().haSubFlowServer42(true).build()) + .table(OfTable.INPUT) + .priority(SERVER_42_FLOW_RTT_INPUT_TRANSIT_HA_PRIORITY) + .match(buildMatch(sw, subPath.getDestSwitchId().toMacAddressAsLong())) + .instructions(buildInstructions(subPath)) + .build()) + .collect(Collectors.toList()); + } + + + private Instructions buildInstructions(FlowPath subPath) { + return Instructions.builder() + .applyActions(Collections.singletonList( + new PortOutAction(new PortNumber(outPorts.get(subPath.getPathId()))))) + .build(); + } + + + private Set buildMatch(Switch sw, long dstMacAddress) { + int udpSrc; + int udpDst; + switch (encapsulation.getType()) { + case TRANSIT_VLAN: + udpSrc = SERVER_42_FLOW_RTT_REVERSE_UDP_PORT; + udpDst = SERVER_42_FLOW_RTT_FORWARD_UDP_PORT; + break; + case VXLAN: + udpSrc = SERVER_42_FLOW_RTT_REVERSE_UDP_VXLAN_PORT; + udpDst = VXLAN_UDP_DST; + break; + default: + throw new IllegalArgumentException(format("Unknown encapsulation type %s", encapsulation.getType())); + } + + Set match = makeTransitMatch(sw, inPort, encapsulation); + match.addAll(Sets.newHashSet( + FieldMatch.builder().field(Field.ETH_DST).value(dstMacAddress).build(), + FieldMatch.builder().field(Field.ETH_TYPE).value(EthType.IPv4).build(), + FieldMatch.builder().field(Field.IP_PROTO).value(IpProto.UDP).build(), + FieldMatch.builder().field(Field.UDP_SRC).value(udpSrc).build(), + FieldMatch.builder().field(Field.UDP_DST).value(udpDst).build() + )); + return match; + } +} diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/utils/RoutingMetadata.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/utils/RoutingMetadata.java index a692440fb90..3515efbaed1 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/utils/RoutingMetadata.java +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/utils/RoutingMetadata.java @@ -36,28 +36,31 @@ public class RoutingMetadata { private static final BitField ONE_SWITCH_FLOW_FLAG = new BitField(0x0000_0000_0000_0002L); private static final BitField ARP_MARKER_FLAG = new BitField(0x0000_0000_0000_0004L); private static final BitField OUTER_VLAN_PRESENCE_FLAG = new BitField(0x0000_0000_0000_0008L); + private static final BitField HA_SUB_FLOW_TYPE_FIELD = new BitField(0x0000_0000_0001_0000L); private static final BitField OUTER_VLAN_FIELD = new BitField(0x0000_0000_0000_FFF0L); - // NOTE: port count was increased from 128 to 4096. At this moment only 1000 ports can be used + // NOTE: port count is 2048. At this moment only 1000 ports can be used // on Noviflow switches. But according to open flow specs port count could be up to 65536. // So we increased port count to maximum possible value. - private static final BitField INPUT_PORT_FIELD = new BitField(0x0000_0000_0FFF_0000L); + private static final BitField INPUT_PORT_FIELD = new BitField(0x0000_0000_0FFE_0000L); public static final int FULL_MASK = -1; static final long MAX_INPUT_PORT = INPUT_PORT_FIELD.getMask() >> INPUT_PORT_FIELD.getOffset(); static final BitField[] ALL_FIELDS = new BitField[] { TYPE_FIELD, LLDP_MARKER_FLAG, ONE_SWITCH_FLOW_FLAG, ARP_MARKER_FLAG, OUTER_VLAN_PRESENCE_FLAG, - OUTER_VLAN_FIELD, INPUT_PORT_FIELD}; + HA_SUB_FLOW_TYPE_FIELD, OUTER_VLAN_FIELD, INPUT_PORT_FIELD}; private final long value; private final long mask; @Builder protected RoutingMetadata( - Boolean lldpFlag, Boolean arpFlag, Boolean oneSwitchFlowFlag, Integer outerVlanId, Integer inputPort) { - this.value = setField(makeValue(lldpFlag, arpFlag, oneSwitchFlowFlag, outerVlanId, inputPort), - MetadataType.ROUTING.getValue(), TYPE_FIELD); - this.mask = setField(makeMask(lldpFlag, arpFlag, oneSwitchFlowFlag, outerVlanId, inputPort), + Boolean lldpFlag, Boolean arpFlag, Boolean oneSwitchFlowFlag, Integer outerVlanId, Integer inputPort, + HaSubFlowType haSubFlowType) { + this.value = setField(makeValue(lldpFlag, arpFlag, oneSwitchFlowFlag, outerVlanId, inputPort, haSubFlowType), + MetadataType.ROUTING.getValue(), + TYPE_FIELD); + this.mask = setField(makeMask(lldpFlag, arpFlag, oneSwitchFlowFlag, outerVlanId, inputPort, haSubFlowType), FULL_MASK, TYPE_FIELD); } @@ -84,8 +87,25 @@ public int getValue() { } } + public enum HaSubFlowType implements NumericEnumField { + HA_SUB_FLOW_1(0), + HA_SUB_FLOW_2(1); + + private final int value; + + HaSubFlowType(int value) { + this.value = value; + } + + @Override + public int getValue() { + return value; + } + } + private static long makeValue( - Boolean lldpFlag, Boolean arpFlag, Boolean oneSwitchFlowFlag, Integer outerVlanId, Integer inputPort) { + Boolean lldpFlag, Boolean arpFlag, Boolean oneSwitchFlowFlag, Integer outerVlanId, Integer inputPort, + HaSubFlowType haSubFlowType) { long result = 0; if (lldpFlag != null) { result = setField(result, lldpFlag ? 1 : 0, LLDP_MARKER_FLAG); @@ -107,11 +127,15 @@ private static long makeValue( } result = setField(result, inputPort, INPUT_PORT_FIELD); } + + if (haSubFlowType != null) { + result = setField(result, haSubFlowType.getValue(), HA_SUB_FLOW_TYPE_FIELD); + } return result; } - private static long makeMask( - Boolean lldpFlag, Boolean arpFlag, Boolean oneSwitchFlowFlag, Integer outerVlanId, Integer inputPort) { + private static long makeMask(Boolean lldpFlag, Boolean arpFlag, Boolean oneSwitchFlowFlag, + Integer outerVlanId, Integer inputPort, HaSubFlowType haSubFlowType) { long result = 0; if (lldpFlag != null) { result = setField(result, FULL_MASK, LLDP_MARKER_FLAG); @@ -129,6 +153,9 @@ private static long makeMask( if (inputPort != null) { result = setField(result, FULL_MASK, INPUT_PORT_FIELD); } + if (haSubFlowType != null) { + result = setField(result, FULL_MASK, HA_SUB_FLOW_TYPE_FIELD); + } return result; } @@ -146,11 +173,11 @@ public RoutingMetadata build(Set features) { private RoutingMetadata buildTruncatedTo32Bits() { // todo fix 32-bits metadata - return new RoutingMetadata(lldpFlag, arpFlag, oneSwitchFlowFlag, outerVlanId, inputPort); + return new RoutingMetadata(lldpFlag, arpFlag, oneSwitchFlowFlag, outerVlanId, inputPort, haSubFlowType); } private RoutingMetadata buildGeneric() { - return new RoutingMetadata(lldpFlag, arpFlag, oneSwitchFlowFlag, outerVlanId, inputPort); + return new RoutingMetadata(lldpFlag, arpFlag, oneSwitchFlowFlag, outerVlanId, inputPort, haSubFlowType); } } diff --git a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/RuleManagerHaFlowRulesTest.java b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/RuleManagerHaFlowRulesTest.java index 77c50b3688f..ad773f1aa7b 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/RuleManagerHaFlowRulesTest.java +++ b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/RuleManagerHaFlowRulesTest.java @@ -15,103 +15,27 @@ package org.openkilda.rulemanager; -import static java.util.function.Function.identity; -import static java.util.stream.Collectors.toMap; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import static org.openkilda.model.SwitchFeature.METERS; -import static org.openkilda.model.SwitchFeature.NOVIFLOW_PUSH_POP_VXLAN; -import static org.openkilda.model.SwitchFeature.RESET_COUNTS_FLAG; import static org.openkilda.rulemanager.OfTable.EGRESS; import static org.openkilda.rulemanager.OfTable.INGRESS; import static org.openkilda.rulemanager.OfTable.INPUT; +import static org.openkilda.rulemanager.OfTable.PRE_INGRESS; import static org.openkilda.rulemanager.OfTable.TRANSIT; -import static org.openkilda.rulemanager.Utils.buildSwitch; -import org.openkilda.model.FlowEncapsulationType; -import org.openkilda.model.FlowPath; -import org.openkilda.model.FlowPathDirection; -import org.openkilda.model.FlowTransitEncapsulation; -import org.openkilda.model.GroupId; import org.openkilda.model.HaFlow; -import org.openkilda.model.HaFlowPath; -import org.openkilda.model.HaSubFlow; -import org.openkilda.model.MeterId; -import org.openkilda.model.PathId; -import org.openkilda.model.PathSegment; -import org.openkilda.model.Switch; -import org.openkilda.model.SwitchFeature; import org.openkilda.model.SwitchId; -import org.openkilda.model.cookie.FlowSegmentCookie; -import org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType; -import org.openkilda.rulemanager.adapter.InMemoryDataAdapter; +import org.openkilda.rulemanager.factory.generator.flow.haflow.HaFlowRulesBaseTest; -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import org.apache.commons.lang3.ArrayUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -public class RuleManagerHaFlowRulesTest { - - public static final String HA_FLOW_ID = "ha_flow_id"; - public static final String SUB_FLOW_1 = "sub_flow_1"; - public static final String SUB_FLOW_2 = "sub_flow_2"; - public static final PathId PATH_ID_1 = new PathId("path_id_1"); - public static final PathId PATH_ID_2 = new PathId("path_id_2"); - public static final PathId PATH_ID_3 = new PathId("path_id_3"); - public static final PathId PATH_ID_4 = new PathId("path_id_4"); - public static final PathId PATH_ID_5 = new PathId("path_id_5"); - public static final PathId PATH_ID_6 = new PathId("path_id_6"); - public static final MeterId SHARED_POINT_METER_ID = new MeterId(1); - public static final MeterId Y_POINT_METER_ID = new MeterId(2); - public static final MeterId SUB_FLOW_1_METER_ID = new MeterId(3); - public static final MeterId SUB_FLOW_2_METER_ID = new MeterId(4); - public static final GroupId GROUP_ID = new GroupId(5); - public static final Set FEATURES = Sets.newHashSet( - RESET_COUNTS_FLAG, METERS, NOVIFLOW_PUSH_POP_VXLAN); - public static final SwitchId SWITCH_ID_1 = new SwitchId(1); - public static final SwitchId SWITCH_ID_2 = new SwitchId(2); - public static final SwitchId SWITCH_ID_3 = new SwitchId(3); - public static final SwitchId SWITCH_ID_4 = new SwitchId(4); - public static final SwitchId SWITCH_ID_5 = new SwitchId(5); - public static final SwitchId SWITCH_ID_6 = new SwitchId(6); - public static final SwitchId SWITCH_ID_7 = new SwitchId(7); - public static final Switch SWITCH_1 = buildSwitch(SWITCH_ID_1, FEATURES); - public static final Switch SWITCH_2 = buildSwitch(SWITCH_ID_2, FEATURES); - public static final Switch SWITCH_3 = buildSwitch(SWITCH_ID_3, FEATURES); - public static final Switch SWITCH_4 = buildSwitch(SWITCH_ID_4, FEATURES); - public static final Switch SWITCH_5 = buildSwitch(SWITCH_ID_5, FEATURES); - public static final Switch SWITCH_6 = buildSwitch(SWITCH_ID_6, FEATURES); - public static final Switch SWITCH_7 = buildSwitch(SWITCH_ID_7, FEATURES); - - public static final FlowTransitEncapsulation VLAN_ENCAPSULATION = new FlowTransitEncapsulation( - 14, FlowEncapsulationType.TRANSIT_VLAN); - - public static final FlowSegmentCookie FORWARD_COOKIE = FlowSegmentCookie.builder() - .direction(FlowPathDirection.FORWARD).flowEffectiveId(1).subType(FlowSubType.SHARED).build(); - public static final FlowSegmentCookie REVERSE_COOKIE = FlowSegmentCookie.builder() - .direction(FlowPathDirection.REVERSE).flowEffectiveId(1).subType(FlowSubType.SHARED).build(); - - public static final FlowSegmentCookie FORWARD_SUB_COOKIE_1 = FORWARD_COOKIE.toBuilder() - .subType(FlowSubType.HA_SUB_FLOW_1).build(); - public static final FlowSegmentCookie REVERSE_SUB_COOKIE_1 = REVERSE_COOKIE.toBuilder() - .subType(FlowSubType.HA_SUB_FLOW_1).build(); - public static final FlowSegmentCookie FORWARD_SUB_COOKIE_2 = FORWARD_COOKIE.toBuilder() - .subType(FlowSubType.HA_SUB_FLOW_2).build(); - public static final FlowSegmentCookie REVERSE_SUB_COOKIE_2 = REVERSE_COOKIE.toBuilder() - .subType(FlowSubType.HA_SUB_FLOW_2).build(); + +public class RuleManagerHaFlowRulesTest extends HaFlowRulesBaseTest { + private RuleManagerImpl ruleManager; @@ -133,7 +57,7 @@ public void buildYShapedHaFlowForwardCommands() { List forwardSpeakerData = ruleManager.buildRulesHaFlowPath( haFlow.getForwardPath(), false, adapter); - Assertions.assertEquals(7, forwardSpeakerData.size()); + Assertions.assertEquals(9, forwardSpeakerData.size()); Map> forwardCommands = groupBySwitchId(forwardSpeakerData); Assertions.assertEquals(3, forwardCommands.get(SWITCH_ID_1).size()); @@ -141,10 +65,10 @@ public void buildYShapedHaFlowForwardCommands() { Assertions.assertEquals(1, getMeterCount(forwardCommands.get(SWITCH_ID_1))); assertFlowTables(forwardCommands.get(SWITCH_ID_1), INPUT, INGRESS); - Assertions.assertEquals(2, forwardCommands.get(SWITCH_ID_2).size()); - Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_2))); + Assertions.assertEquals(4, forwardCommands.get(SWITCH_ID_2).size()); + Assertions.assertEquals(3, getFlowCount(forwardCommands.get(SWITCH_ID_2))); Assertions.assertEquals(1, getGroupCount(forwardCommands.get(SWITCH_ID_2))); - assertFlowTables(forwardCommands.get(SWITCH_ID_2), TRANSIT); + assertFlowTables(forwardCommands.get(SWITCH_ID_2), INPUT, INPUT, TRANSIT); Assertions.assertEquals(1, forwardCommands.get(SWITCH_ID_3).size()); Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_3))); @@ -191,7 +115,7 @@ public void buildLongYShapedHaFlowForwardCommands() { DataAdapter adapter = buildAdapter(haFlow); List forwardSpeakerData = ruleManager.buildRulesHaFlowPath( haFlow.getForwardPath(), false, adapter); - Assertions.assertEquals(10, forwardSpeakerData.size()); + Assertions.assertEquals(12, forwardSpeakerData.size()); Map> switchCommandMap = groupBySwitchId(forwardSpeakerData); Assertions.assertEquals(3, switchCommandMap.get(SWITCH_ID_1).size()); @@ -203,10 +127,10 @@ public void buildLongYShapedHaFlowForwardCommands() { Assertions.assertEquals(1, getFlowCount(switchCommandMap.get(SWITCH_ID_2))); assertFlowTables(switchCommandMap.get(SWITCH_ID_2), TRANSIT); - Assertions.assertEquals(2, switchCommandMap.get(SWITCH_ID_3).size()); - Assertions.assertEquals(1, getFlowCount(switchCommandMap.get(SWITCH_ID_3))); + Assertions.assertEquals(4, switchCommandMap.get(SWITCH_ID_3).size()); + Assertions.assertEquals(3, getFlowCount(switchCommandMap.get(SWITCH_ID_3))); Assertions.assertEquals(1, getGroupCount(switchCommandMap.get(SWITCH_ID_3))); - assertFlowTables(switchCommandMap.get(SWITCH_ID_3), TRANSIT); + assertFlowTables(switchCommandMap.get(SWITCH_ID_3), INPUT, INPUT, TRANSIT); Assertions.assertEquals(1, switchCommandMap.get(SWITCH_ID_4).size()); Assertions.assertEquals(1, getFlowCount(switchCommandMap.get(SWITCH_ID_4))); @@ -273,7 +197,7 @@ public void buildISharedDifferentLengthHaFlowForwardCommands() { List forwardSpeakerData = ruleManager.buildRulesHaFlowPath( haFlow.getForwardPath(), false, adapter); - Assertions.assertEquals(7, forwardSpeakerData.size()); + Assertions.assertEquals(9, forwardSpeakerData.size()); Map> forwardCommands = groupBySwitchId(forwardSpeakerData); Assertions.assertEquals(3, forwardCommands.get(SWITCH_ID_1).size()); @@ -285,10 +209,10 @@ public void buildISharedDifferentLengthHaFlowForwardCommands() { Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_2))); assertFlowTables(forwardCommands.get(SWITCH_ID_2), TRANSIT); - Assertions.assertEquals(2, forwardCommands.get(SWITCH_ID_3).size()); - Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_3))); + Assertions.assertEquals(4, forwardCommands.get(SWITCH_ID_3).size()); + Assertions.assertEquals(3, getFlowCount(forwardCommands.get(SWITCH_ID_3))); Assertions.assertEquals(1, getGroupCount(forwardCommands.get(SWITCH_ID_3))); - assertFlowTables(forwardCommands.get(SWITCH_ID_3), TRANSIT); + assertFlowTables(forwardCommands.get(SWITCH_ID_3), INPUT, INPUT, TRANSIT); Assertions.assertEquals(1, forwardCommands.get(SWITCH_ID_4).size()); Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_4))); @@ -516,6 +440,7 @@ public void buildIShapedOneSwitchHaFlowForwardCommands() { Assertions.assertEquals(1, forwardCommands.get(SWITCH_ID_3).size()); Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_3))); + assertFlowTables(forwardCommands.get(SWITCH_ID_3), EGRESS); } @@ -544,265 +469,126 @@ public void buildIShapedOneSwitchHaFlowReverseCommands() { assertFlowTables(switchCommandMap.get(SWITCH_ID_3), INPUT, INGRESS); } - private HaFlow buildYShapedHaFlow() { - // HA-flow 3 - // / - // 1------2 - // \ - // 4 - - HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_3, SWITCH_4); - HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); - HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); - FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, - subFlow1, SWITCH_1, SWITCH_2, SWITCH_3); - FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, - subFlow2, SWITCH_1, SWITCH_2, SWITCH_4); - setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); - setYPoint(haFlow, SWITCH_ID_2); - return haFlow; - } + @Test + public void buildIShapedOneSwitchHaFlowServer42ForwardCommands() { + HaFlow haFlow = buildIShapedOneSwitchHaFlowServer42(); + DataAdapter adapter = buildAdapter(haFlow); + List forwardSpeakerData = ruleManager.buildRulesHaFlowPath( + haFlow.getForwardPath(), false, adapter); - private HaFlow buildLongYShapedHaFlow() { - // HA-flow 4-----5 - // / - // 1------2-----3 - // \ - // 6-----7 - - HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_5, SWITCH_7); - HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); - HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); - FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, - subFlow1, SWITCH_1, SWITCH_2, SWITCH_3, SWITCH_4, SWITCH_5); - FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, - subFlow2, SWITCH_1, SWITCH_2, SWITCH_3, SWITCH_6, SWITCH_7); - setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); - setYPoint(haFlow, SWITCH_ID_3); - return haFlow; - } + Assertions.assertEquals(9, forwardSpeakerData.size()); - private HaFlow buildIShapedDifferentLengthHaFlow() { - // HA-flow 4 - // / - // 1------2-----3 - // ^ - // Y-point - - HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_3, SWITCH_4); - HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); - HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); - FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, - subFlow1, SWITCH_1, SWITCH_2, SWITCH_3); - FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, - subFlow2, SWITCH_1, SWITCH_2, SWITCH_3, SWITCH_4); - setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); - setYPoint(haFlow, SWITCH_ID_3); - return haFlow; - } + Map> forwardCommands = groupBySwitchId(forwardSpeakerData); + Assertions.assertEquals(8, forwardCommands.get(SWITCH_ID_8_SERVER42).size()); + Assertions.assertEquals(6, getFlowCount(forwardCommands.get(SWITCH_ID_8_SERVER42))); + Assertions.assertEquals(1, getMeterCount(forwardCommands.get(SWITCH_ID_8_SERVER42))); + Assertions.assertEquals(1, getGroupCount(forwardCommands.get(SWITCH_ID_8_SERVER42))); + assertFlowTables(forwardCommands.get(SWITCH_ID_8_SERVER42), INPUT, INPUT, PRE_INGRESS, PRE_INGRESS, + INGRESS, INGRESS); - private HaFlow buildIShapedEqualLengthHaFlow() { - // HA-flow - // - // 1------2-----3 - // ^ - // Y-point - - HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_3, SWITCH_3); - HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); - HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); - FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, - subFlow1, SWITCH_1, SWITCH_2, SWITCH_3); - FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, - subFlow2, SWITCH_1, SWITCH_2, SWITCH_3); - setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); - setYPoint(haFlow, SWITCH_ID_3); - return haFlow; - } + Assertions.assertEquals(1, forwardCommands.get(SWITCH_ID_9_SERVER42).size()); + Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_9_SERVER42))); - private HaFlow buildIShapedEqualLengthDifferentIslsHaFlow() { - // HA-flow - // - // 1======2 - // ^ - // Y-point - - HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_2, SWITCH_2); - HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); - HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); - FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, - subFlow1, SWITCH_1, SWITCH_2); - FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, - subFlow2, SWITCH_1, SWITCH_2); - setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); - setYPoint(haFlow, SWITCH_ID_1); - return haFlow; + assertFlowTables(forwardCommands.get(SWITCH_ID_9_SERVER42), EGRESS); } - private HaFlow buildIShapedOneSwitchHaFlow() { - // HA-flow - // - // 1------2-----3 - // ^ - // Y-point - - HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_1, SWITCH_3); - HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); - HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); - FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, - subFlow1, SWITCH_1); - FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, - subFlow2, SWITCH_1, SWITCH_2, SWITCH_3); - setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); - setYPoint(haFlow, SWITCH_ID_1); - return haFlow; - } + @Test + public void buildIShapedOneSwitchHaFlowServer42ReverseCommands() { + HaFlow haFlow = buildIShapedOneSwitchHaFlowServer42(); + DataAdapter adapter = buildAdapter(haFlow); + List reverseSpeakerData = ruleManager.buildRulesHaFlowPath( + haFlow.getReversePath(), false, adapter); - private static void setYPoint(HaFlow haFlow, SwitchId switchId3) { - haFlow.getForwardPath().setYPointSwitchId(switchId3); - haFlow.getReversePath().setYPointSwitchId(switchId3); - } + Assertions.assertEquals(9, reverseSpeakerData.size()); - private void setMainPaths(HaFlow haFlow, PathId forwardId, PathId reverseId, - FlowPath[] firstSubPaths, FlowPath[] secondSubPaths) { - firstSubPaths[1].setMeterId(SUB_FLOW_1_METER_ID); - secondSubPaths[1].setMeterId(SUB_FLOW_2_METER_ID); - - HaFlowPath forwardHaPath = HaFlowPath.builder() - .cookie(FORWARD_COOKIE) - .sharedPointMeterId(SHARED_POINT_METER_ID) - .yPointMeterId(null) - .yPointGroupId(GROUP_ID) - .haPathId(forwardId) - .sharedSwitch(haFlow.getSharedSwitch()) - .build(); - forwardHaPath.setHaSubFlows(haFlow.getHaSubFlows()); - forwardHaPath.setSubPaths(Lists.newArrayList(firstSubPaths[0], secondSubPaths[0])); - haFlow.setForwardPath(forwardHaPath); - - HaFlowPath reverseHaPath = HaFlowPath.builder() - .cookie(REVERSE_COOKIE) - .sharedPointMeterId(null) - .yPointMeterId(Y_POINT_METER_ID) - .yPointGroupId(null) - .haPathId(reverseId) - .sharedSwitch(haFlow.getSharedSwitch()) - .build(); - reverseHaPath.setHaSubFlows(haFlow.getHaSubFlows()); - reverseHaPath.setSubPaths(Lists.newArrayList(firstSubPaths[1], secondSubPaths[1])); - haFlow.setReversePath(reverseHaPath); - } + Map> switchCommandMap = groupBySwitchId(reverseSpeakerData); + Assertions.assertEquals(4, switchCommandMap.get(SWITCH_ID_8_SERVER42).size()); + Assertions.assertEquals(3, getFlowCount(switchCommandMap.get(SWITCH_ID_8_SERVER42))); + Assertions.assertEquals(1, getMeterCount(switchCommandMap.get(SWITCH_ID_8_SERVER42))); + assertFlowTables(switchCommandMap.get(SWITCH_ID_8_SERVER42), INPUT, INGRESS, EGRESS); - private FlowPath[] buildSubPathPair( - PathId forwardId, PathId reverseId, FlowSegmentCookie forwardCookie, FlowSegmentCookie reverseCookie, - HaSubFlow haSubFlow, Switch... switches) { - Switch[] reverseSwitches = Arrays.copyOf(switches, switches.length); - ArrayUtils.reverse(reverseSwitches); - return new FlowPath[]{ - buildSubPath(forwardId, haSubFlow, forwardCookie, switches), - buildSubPath(reverseId, haSubFlow, reverseCookie, reverseSwitches) - }; + Assertions.assertEquals(5, switchCommandMap.get(SWITCH_ID_9_SERVER42).size()); + Assertions.assertEquals(4, getFlowCount(switchCommandMap.get(SWITCH_ID_9_SERVER42))); + Assertions.assertEquals(1, getMeterCount(switchCommandMap.get(SWITCH_ID_9_SERVER42))); + assertFlowTables(switchCommandMap.get(SWITCH_ID_9_SERVER42), INPUT, INPUT, INGRESS, INGRESS); } - private HaFlow buildHaFlow(Switch sharedSwitch, Switch endpointSwitch1, Switch endpointSwitch2) { - HaFlow haFlow = HaFlow.builder() - .haFlowId(HA_FLOW_ID) - .sharedSwitch(sharedSwitch) - .build(); - haFlow.setHaSubFlows(Lists.newArrayList( - buildHaSubFlow(endpointSwitch1, SUB_FLOW_1), buildHaSubFlow(endpointSwitch2, SUB_FLOW_2))); - return haFlow; - } + @Test + public void buildYShapedYEqualsSharedHaFlowHaFlowForwardCommands() { + HaFlow haFlow = buildYShapedYEqualsSharedHaFlow(); + DataAdapter adapter = buildAdapter(haFlow); + List forwardSpeakerData = ruleManager.buildRulesHaFlowPath( + haFlow.getForwardPath(), false, adapter); + Map> forwardCommands = groupBySwitchId(forwardSpeakerData); - private HaSubFlow buildHaSubFlow(Switch sw, String subFlowId) { - return HaSubFlow.builder() - .haSubFlowId(subFlowId) - .endpointSwitch(sw) - .build(); - } + Assertions.assertEquals(6, forwardSpeakerData.size()); - private FlowPath buildSubPath(PathId pathId, HaSubFlow haSubFlow, FlowSegmentCookie cookie, Switch... switches) { - FlowPath subPath = FlowPath.builder() - .cookie(cookie) - .pathId(pathId) - .srcSwitch(switches[0]) - .destSwitch(switches[switches.length - 1]) - .build(); - List segments = new ArrayList<>(); - for (int i = 1; i < switches.length; i++) { - segments.add(PathSegment.builder() - .pathId(pathId) - .srcSwitch(switches[i - 1]) - .destSwitch(switches[i]) - .build()); - } - subPath.setSegments(segments); - subPath.setHaSubFlow(haSubFlow); - return subPath; - } + Assertions.assertEquals(4, forwardCommands.get(SWITCH_ID_1).size()); + Assertions.assertEquals(2, getFlowCount(forwardCommands.get(SWITCH_ID_1))); + Assertions.assertEquals(1, getGroupCount(forwardCommands.get(SWITCH_ID_1))); + Assertions.assertEquals(1, getMeterCount(forwardCommands.get(SWITCH_ID_1))); + assertFlowTables(forwardCommands.get(SWITCH_ID_1), INPUT, INGRESS); - private DataAdapter buildAdapter(HaFlow haFlow) { - List subPaths = haFlow.getPaths().stream().flatMap(path -> path.getSubPaths().stream()) - .collect(Collectors.toList()); - - Set switches = Sets.newHashSet(haFlow.getSharedSwitch()); - haFlow.getHaSubFlows().stream().map(HaSubFlow::getEndpointSwitch).forEach(switches::add); - subPaths.stream() - .flatMap(path -> path.getSegments().stream()) - .flatMap(segment -> Stream.of(segment.getSrcSwitch(), segment.getDestSwitch())) - .forEach(switches::add); - - Map haFlowMap = haFlow.getPaths().stream() - .collect(Collectors.toMap(HaFlowPath::getHaPathId, HaFlowPath::getHaFlow)); - for (FlowPath subPath : subPaths) { - haFlowMap.put(subPath.getPathId(), subPath.getHaFlowPath().getHaFlow()); - } - - Map encapsulationMap = new HashMap<>(); - for (HaFlowPath haFlowPath : haFlow.getPaths()) { - encapsulationMap.put(haFlowPath.getHaPathId(), VLAN_ENCAPSULATION); - } - - return InMemoryDataAdapter.builder() - .commonFlowPaths(new HashMap<>()) - .haFlowSubPaths(subPaths.stream().collect(toMap(FlowPath::getPathId, identity()))) - .transitEncapsulations(encapsulationMap) - .switches(switches.stream().collect(Collectors.toMap(Switch::getSwitchId, identity()))) - .haFlowMap(haFlowMap) - .haFlowPathMap(haFlow.getPaths().stream().collect(toMap(HaFlowPath::getHaPathId, identity()))) - .build(); + Assertions.assertEquals(1, forwardCommands.get(SWITCH_ID_2).size()); + Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_2))); + assertFlowTables(forwardCommands.get(SWITCH_ID_2), EGRESS); + + Assertions.assertEquals(1, forwardCommands.get(SWITCH_ID_3).size()); + Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_3))); + assertFlowTables(forwardCommands.get(SWITCH_ID_3), EGRESS); } + @Test + public void buildYShapedYEqualsSharedHaFlowServer42HaFlowForwardCommands() { + HaFlow haFlow = buildYShapedYEqualsSharedHaFlowServer42(); + DataAdapter adapter = buildAdapter(haFlow); + List forwardSpeakerData = ruleManager.buildRulesHaFlowPath( + haFlow.getForwardPath(), false, adapter); - private void assertFlowTables(Collection commands, OfTable... expectedTables) { - List actualTables = commands.stream() - .filter(FlowSpeakerData.class::isInstance) - .map(FlowSpeakerData.class::cast) - .map(FlowSpeakerData::getTable) - .sorted() - .collect(Collectors.toList()); + Map> forwardCommands = groupBySwitchId(forwardSpeakerData); - Arrays.sort(expectedTables); - Assertions.assertEquals(Arrays.asList(expectedTables), actualTables); - } + Assertions.assertEquals(12, forwardSpeakerData.size()); - private int getFlowCount(Collection commands) { - return getCommandCount(commands, FlowSpeakerData.class); - } + Assertions.assertEquals(10, forwardCommands.get(SWITCH_ID_8_SERVER42).size()); + Assertions.assertEquals(8, getFlowCount(forwardCommands.get(SWITCH_ID_8_SERVER42))); + Assertions.assertEquals(1, getGroupCount(forwardCommands.get(SWITCH_ID_8_SERVER42))); + Assertions.assertEquals(1, getMeterCount(forwardCommands.get(SWITCH_ID_8_SERVER42))); + assertFlowTables(forwardCommands.get(SWITCH_ID_8_SERVER42), INPUT, INPUT, INPUT, PRE_INGRESS, + PRE_INGRESS, INGRESS, INGRESS, INGRESS); - private int getMeterCount(Collection commands) { - return getCommandCount(commands, MeterSpeakerData.class); - } + Assertions.assertEquals(1, forwardCommands.get(SWITCH_ID_9_SERVER42).size()); + Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_9_SERVER42))); + assertFlowTables(forwardCommands.get(SWITCH_ID_9_SERVER42), EGRESS); - private int getGroupCount(Collection commands) { - return getCommandCount(commands, GroupSpeakerData.class); + Assertions.assertEquals(1, forwardCommands.get(SWITCH_ID_10_SERVER42).size()); + Assertions.assertEquals(1, getFlowCount(forwardCommands.get(SWITCH_ID_10_SERVER42))); + assertFlowTables(forwardCommands.get(SWITCH_ID_10_SERVER42), EGRESS); } - private int getCommandCount(Collection commands, Class clazz) { - return (int) commands.stream().filter(clazz::isInstance).count(); - } + @Test + public void buildYShapedHaFlowReverseServer42Commands() { + HaFlow haFlow = buildYShapedYEqualsSharedHaFlowServer42(); + DataAdapter adapter = buildAdapter(haFlow); + List reverseSpeakerData = ruleManager.buildRulesHaFlowPath( + haFlow.getReversePath(), false, adapter); + + Map> switchCommandMap = groupBySwitchId(reverseSpeakerData); + Assertions.assertEquals(13, reverseSpeakerData.size()); + + Assertions.assertEquals(3, switchCommandMap.get(SWITCH_ID_8_SERVER42).size()); + Assertions.assertEquals(2, getFlowCount(switchCommandMap.get(SWITCH_ID_8_SERVER42))); + Assertions.assertEquals(1, getMeterCount(switchCommandMap.get(SWITCH_ID_8_SERVER42))); + assertFlowTables(switchCommandMap.get(SWITCH_ID_8_SERVER42), EGRESS, EGRESS); - private Map> groupBySwitchId(Collection commands) { - return commands.stream().collect(Collectors.groupingBy(SpeakerData::getSwitchId)); + Assertions.assertEquals(5, switchCommandMap.get(SWITCH_ID_9_SERVER42).size()); + Assertions.assertEquals(4, getFlowCount(switchCommandMap.get(SWITCH_ID_9_SERVER42))); + Assertions.assertEquals(1, getMeterCount(switchCommandMap.get(SWITCH_ID_9_SERVER42))); + assertFlowTables(switchCommandMap.get(SWITCH_ID_9_SERVER42), INPUT, INPUT, INGRESS, INGRESS); + + Assertions.assertEquals(5, switchCommandMap.get(SWITCH_ID_10_SERVER42).size()); + Assertions.assertEquals(4, getFlowCount(switchCommandMap.get(SWITCH_ID_10_SERVER42))); + Assertions.assertEquals(1, getMeterCount(switchCommandMap.get(SWITCH_ID_10_SERVER42))); + assertFlowTables(switchCommandMap.get(SWITCH_ID_10_SERVER42), INPUT, INPUT, INGRESS, INGRESS); } + } diff --git a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/Utils.java b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/Utils.java index 8fecc27d2e7..b31b3d3b8e5 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/Utils.java +++ b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/Utils.java @@ -116,6 +116,10 @@ public static SwitchProperties buildSwitchProperties( .build(); } + public static SwitchProperties buildSwitchPropertiesServer42(Switch sw) { + return buildSwitchProperties(sw, false, false, true, RttState.DISABLED); + } + /** * Find Speaker Command Data of specific type. */ diff --git a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/Server42IngressRuleGeneratorTest.java b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/Server42IngressRuleGeneratorTest.java index 575b916e628..4a218335ddd 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/Server42IngressRuleGeneratorTest.java +++ b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/Server42IngressRuleGeneratorTest.java @@ -152,10 +152,9 @@ public void setup() { public void buildTransformActionsVlanEncapsulationDoubleVlanTest() { Flow flow = buildFlow(PATH, OUTER_VLAN_ID_1, INNER_VLAN_ID_1); Server42IngressRuleGenerator generator = buildGenerator(PATH, flow, VLAN_ENCAPSULATION); - List transformActions = generator.buildTransformActions(INNER_VLAN_ID_1, FEATURES); + List transformActions = generator.buildTransformActions(INNER_VLAN_ID_1, FEATURES, PATH); List expectedActions = newArrayList( SetFieldAction.builder().field(Field.ETH_SRC).value(SWITCH_ID_1.toMacAddressAsLong()).build(), - SetFieldAction.builder().field(Field.ETH_DST).value(SWITCH_ID_2.toMacAddressAsLong()).build(), SetFieldAction.builder().field(Field.VLAN_VID).value(TRANSIT_VLAN_ID).build()); Assertions.assertEquals(expectedActions, transformActions); } @@ -164,10 +163,9 @@ public void buildTransformActionsVlanEncapsulationDoubleVlanTest() { public void buildTransformActionsVlanEncapsulationSingleVlanTest() { Flow flow = buildFlow(PATH, OUTER_VLAN_ID_1, 0); Server42IngressRuleGenerator generator = buildGenerator(PATH, flow, VLAN_ENCAPSULATION); - List transformActions = generator.buildTransformActions(0, FEATURES); + List transformActions = generator.buildTransformActions(0, FEATURES, PATH); List expectedActions = newArrayList( SetFieldAction.builder().field(Field.ETH_SRC).value(SWITCH_ID_1.toMacAddressAsLong()).build(), - SetFieldAction.builder().field(Field.ETH_DST).value(SWITCH_ID_2.toMacAddressAsLong()).build(), new PushVlanAction(), SetFieldAction.builder().field(Field.VLAN_VID).value(TRANSIT_VLAN_ID).build()); Assertions.assertEquals(expectedActions, transformActions); @@ -177,10 +175,9 @@ public void buildTransformActionsVlanEncapsulationSingleVlanTest() { public void buildTransformActionsVlanEncapsulationFullPortTest() { Flow flow = buildFlow(PATH, 0, 0); Server42IngressRuleGenerator generator = buildGenerator(PATH, flow, VLAN_ENCAPSULATION); - List transformActions = generator.buildTransformActions(0, FEATURES); + List transformActions = generator.buildTransformActions(0, FEATURES, PATH); List expectedActions = newArrayList( SetFieldAction.builder().field(Field.ETH_SRC).value(SWITCH_ID_1.toMacAddressAsLong()).build(), - SetFieldAction.builder().field(Field.ETH_DST).value(SWITCH_ID_2.toMacAddressAsLong()).build(), new PushVlanAction(), SetFieldAction.builder().field(Field.VLAN_VID).value(TRANSIT_VLAN_ID).build()); Assertions.assertEquals(expectedActions, transformActions); @@ -190,10 +187,9 @@ public void buildTransformActionsVlanEncapsulationFullPortTest() { public void buildTransformActionsVlanEncapsulationInnerVlanEqualTransitVlanTest() { Flow flow = buildFlow(PATH, OUTER_VLAN_ID_1, TRANSIT_VLAN_ID); Server42IngressRuleGenerator generator = buildGenerator(PATH, flow, VLAN_ENCAPSULATION); - List transformActions = generator.buildTransformActions(TRANSIT_VLAN_ID, FEATURES); + List transformActions = generator.buildTransformActions(TRANSIT_VLAN_ID, FEATURES, PATH); List expectedActions = newArrayList( - SetFieldAction.builder().field(Field.ETH_SRC).value(SWITCH_ID_1.toMacAddressAsLong()).build(), - SetFieldAction.builder().field(Field.ETH_DST).value(SWITCH_ID_2.toMacAddressAsLong()).build()); + SetFieldAction.builder().field(Field.ETH_SRC).value(SWITCH_ID_1.toMacAddressAsLong()).build()); Assertions.assertEquals(expectedActions, transformActions); } @@ -201,7 +197,7 @@ public void buildTransformActionsVlanEncapsulationInnerVlanEqualTransitVlanTest( public void buildTransformActionsVxlanEncapsulationDoubleVlanTest() { Flow flow = buildFlow(PATH, OUTER_VLAN_ID_1, INNER_VLAN_ID_1); Server42IngressRuleGenerator generator = buildGenerator(PATH, flow, VXLAN_ENCAPSULATION); - List transformActions = generator.buildTransformActions(INNER_VLAN_ID_1, FEATURES); + List transformActions = generator.buildTransformActions(INNER_VLAN_ID_1, FEATURES, PATH); List expectedActions = newArrayList(new PopVlanAction(), buildPushVxlan()); Assertions.assertEquals(expectedActions, transformActions); } @@ -210,7 +206,7 @@ public void buildTransformActionsVxlanEncapsulationDoubleVlanTest() { public void buildTransformActionsVxlanEncapsulationSingleVlanTest() { Flow flow = buildFlow(PATH, OUTER_VLAN_ID_1, 0); Server42IngressRuleGenerator generator = buildGenerator(PATH, flow, VXLAN_ENCAPSULATION); - List transformActions = generator.buildTransformActions(0, FEATURES); + List transformActions = generator.buildTransformActions(0, FEATURES, PATH); List expectedActions = newArrayList(buildPushVxlan()); Assertions.assertEquals(expectedActions, transformActions); } @@ -219,7 +215,7 @@ public void buildTransformActionsVxlanEncapsulationSingleVlanTest() { public void buildTransformActionsVxlanEncapsulationFullPortTest() { Flow flow = buildFlow(PATH, 0, 0); Server42IngressRuleGenerator generator = buildGenerator(PATH, flow, VXLAN_ENCAPSULATION); - List transformActions = generator.buildTransformActions(0, FEATURES); + List transformActions = generator.buildTransformActions(0, FEATURES, PATH); List expectedActions = newArrayList(buildPushVxlan()); Assertions.assertEquals(expectedActions, transformActions); } @@ -274,7 +270,6 @@ public void buildCommandsVlanEncapsulationDoubleVlanTest() { .value(ingressMetadata.getValue()).mask(ingressMetadata.getMask()).build()); List expectedIngressActions = newArrayList( SetFieldAction.builder().field(Field.ETH_SRC).value(SWITCH_ID_1.toMacAddressAsLong()).build(), - SetFieldAction.builder().field(Field.ETH_DST).value(SWITCH_ID_2.toMacAddressAsLong()).build(), SetFieldAction.builder().field(Field.VLAN_VID).value(TRANSIT_VLAN_ID).build(), new PortOutAction(new PortNumber(PORT_NUMBER_2)) ); @@ -328,7 +323,6 @@ public void buildCommandsVlanEncapsulationFullPortTest() { .value(ingressMetadata.getValue()).mask(ingressMetadata.getMask()).build()); List expectedIngressActions = newArrayList( SetFieldAction.builder().field(Field.ETH_SRC).value(SWITCH_ID_1.toMacAddressAsLong()).build(), - SetFieldAction.builder().field(Field.ETH_DST).value(SWITCH_ID_2.toMacAddressAsLong()).build(), new PushVlanAction(), SetFieldAction.builder().field(Field.VLAN_VID).value(TRANSIT_VLAN_ID).build(), new PortOutAction(new PortNumber(PORT_NUMBER_2))); diff --git a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/HaFlowRulesBaseTest.java b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/HaFlowRulesBaseTest.java new file mode 100644 index 00000000000..f1126382fc1 --- /dev/null +++ b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/HaFlowRulesBaseTest.java @@ -0,0 +1,469 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.rulemanager.factory.generator.flow.haflow; + +import static java.util.function.Function.identity; +import static java.util.stream.Collectors.toMap; +import static org.openkilda.model.SwitchFeature.METERS; +import static org.openkilda.model.SwitchFeature.NOVIFLOW_PUSH_POP_VXLAN; +import static org.openkilda.model.SwitchFeature.RESET_COUNTS_FLAG; +import static org.openkilda.rulemanager.Utils.buildSwitch; +import static org.openkilda.rulemanager.Utils.buildSwitchProperties; +import static org.openkilda.rulemanager.Utils.buildSwitchPropertiesServer42; + +import org.openkilda.model.FlowEncapsulationType; +import org.openkilda.model.FlowPath; +import org.openkilda.model.FlowPathDirection; +import org.openkilda.model.FlowTransitEncapsulation; +import org.openkilda.model.GroupId; +import org.openkilda.model.HaFlow; +import org.openkilda.model.HaFlowPath; +import org.openkilda.model.HaSubFlow; +import org.openkilda.model.MeterId; +import org.openkilda.model.PathId; +import org.openkilda.model.PathSegment; +import org.openkilda.model.Switch; +import org.openkilda.model.SwitchFeature; +import org.openkilda.model.SwitchId; +import org.openkilda.model.SwitchProperties; +import org.openkilda.model.cookie.FlowSegmentCookie; +import org.openkilda.model.cookie.FlowSubType; +import org.openkilda.rulemanager.DataAdapter; +import org.openkilda.rulemanager.FlowSpeakerData; +import org.openkilda.rulemanager.GroupSpeakerData; +import org.openkilda.rulemanager.MeterSpeakerData; +import org.openkilda.rulemanager.OfTable; +import org.openkilda.rulemanager.SpeakerData; +import org.openkilda.rulemanager.adapter.InMemoryDataAdapter; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import org.apache.commons.lang3.ArrayUtils; +import org.junit.jupiter.api.Assertions; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public abstract class HaFlowRulesBaseTest { + + public static final String HA_FLOW_ID = "ha_flow_id"; + public static final String SUB_FLOW_1 = "sub_flow_1"; + public static final String SUB_FLOW_2 = "sub_flow_2"; + public static final PathId PATH_ID_1 = new PathId("path_id_1"); + public static final PathId PATH_ID_2 = new PathId("path_id_2"); + public static final PathId PATH_ID_3 = new PathId("path_id_3"); + public static final PathId PATH_ID_4 = new PathId("path_id_4"); + public static final PathId PATH_ID_5 = new PathId("path_id_5"); + public static final PathId PATH_ID_6 = new PathId("path_id_6"); + public static final int PORT_NUMBER_1 = 1; + public static final MeterId SHARED_POINT_METER_ID = new MeterId(1); + public static final MeterId Y_POINT_METER_ID = new MeterId(2); + public static final MeterId SUB_FLOW_1_METER_ID = new MeterId(3); + public static final MeterId SUB_FLOW_2_METER_ID = new MeterId(4); + public static final GroupId GROUP_ID = new GroupId(5); + public static final Set FEATURES = Sets.newHashSet( + RESET_COUNTS_FLAG, METERS, NOVIFLOW_PUSH_POP_VXLAN); + public static final SwitchId SWITCH_ID_1 = new SwitchId(1); + public static final SwitchId SWITCH_ID_2 = new SwitchId(2); + public static final SwitchId SWITCH_ID_3 = new SwitchId(3); + public static final SwitchId SWITCH_ID_4 = new SwitchId(4); + public static final SwitchId SWITCH_ID_5 = new SwitchId(5); + public static final SwitchId SWITCH_ID_6 = new SwitchId(6); + public static final SwitchId SWITCH_ID_7 = new SwitchId(7); + public static final SwitchId SWITCH_ID_8_SERVER42 = new SwitchId(8); + public static final SwitchId SWITCH_ID_9_SERVER42 = new SwitchId(9); + public static final SwitchId SWITCH_ID_10_SERVER42 = new SwitchId(10); + + public static final Switch SWITCH_1 = buildSwitch(SWITCH_ID_1, FEATURES); + public static final Switch SWITCH_2 = buildSwitch(SWITCH_ID_2, FEATURES); + public static final Switch SWITCH_3 = buildSwitch(SWITCH_ID_3, FEATURES); + public static final Switch SWITCH_4 = buildSwitch(SWITCH_ID_4, FEATURES); + public static final Switch SWITCH_5 = buildSwitch(SWITCH_ID_5, FEATURES); + public static final Switch SWITCH_6 = buildSwitch(SWITCH_ID_6, FEATURES); + public static final Switch SWITCH_7 = buildSwitch(SWITCH_ID_7, FEATURES); + public static final Switch SWITCH_8_SERVER42 = buildSwitch(SWITCH_ID_8_SERVER42, FEATURES); + public static final Switch SWITCH_9_SERVER42 = buildSwitch(SWITCH_ID_9_SERVER42, FEATURES); + public static final Switch SWITCH_10_SERVER42 = buildSwitch(SWITCH_ID_10_SERVER42, FEATURES); + + public static final Map SWITCH_PROPERTIES_MAP = ImmutableMap.of( + SWITCH_ID_1, buildSwitchProperties(SWITCH_1), + SWITCH_ID_2, buildSwitchProperties(SWITCH_2), + SWITCH_ID_3, buildSwitchProperties(SWITCH_3), + SWITCH_ID_4, buildSwitchProperties(SWITCH_4), + SWITCH_ID_5, buildSwitchProperties(SWITCH_5), + SWITCH_ID_7, buildSwitchProperties(SWITCH_7), + SWITCH_ID_8_SERVER42, buildSwitchPropertiesServer42(SWITCH_8_SERVER42), + SWITCH_ID_9_SERVER42, buildSwitchPropertiesServer42(SWITCH_9_SERVER42), + SWITCH_ID_10_SERVER42, buildSwitchPropertiesServer42(SWITCH_10_SERVER42)); + + public static final FlowTransitEncapsulation VLAN_ENCAPSULATION = new FlowTransitEncapsulation( + 14, FlowEncapsulationType.TRANSIT_VLAN); + + public static final FlowSegmentCookie FORWARD_COOKIE = FlowSegmentCookie.builder() + .direction(FlowPathDirection.FORWARD).flowEffectiveId(1).subType(FlowSubType.SHARED).build(); + public static final FlowSegmentCookie REVERSE_COOKIE = FlowSegmentCookie.builder() + .direction(FlowPathDirection.REVERSE).flowEffectiveId(1).subType(FlowSubType.SHARED).build(); + + public static final FlowSegmentCookie FORWARD_SUB_COOKIE_1 = FORWARD_COOKIE.toBuilder() + .subType(FlowSubType.HA_SUB_FLOW_1).build(); + public static final FlowSegmentCookie REVERSE_SUB_COOKIE_1 = REVERSE_COOKIE.toBuilder() + .subType(FlowSubType.HA_SUB_FLOW_1).build(); + public static final FlowSegmentCookie FORWARD_SUB_COOKIE_2 = FORWARD_COOKIE.toBuilder() + .subType(FlowSubType.HA_SUB_FLOW_2).build(); + public static final FlowSegmentCookie REVERSE_SUB_COOKIE_2 = REVERSE_COOKIE.toBuilder() + .subType(FlowSubType.HA_SUB_FLOW_2).build(); + + public static final int SHARED_INNER_VLAN = 100; + + + + protected HaFlow buildYShapedYEqualsSharedHaFlowServer42() { + // HA-flow 9 + // / + // shared_sw 8 + // \ + // 10 + + HaFlow haFlow = buildHaFlow(SWITCH_8_SERVER42, SWITCH_9_SERVER42, SWITCH_10_SERVER42, SHARED_INNER_VLAN); + HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); + HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); + FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, + subFlow1, SWITCH_8_SERVER42, SWITCH_9_SERVER42); + FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, + subFlow2, SWITCH_8_SERVER42, SWITCH_10_SERVER42); + setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); + setYPoint(haFlow, SWITCH_ID_8_SERVER42); + return haFlow; + } + + protected HaFlow buildYShapedYEqualsSharedHaFlow() { + // HA-flow 2 + // / + // shared_sw 8 + // \ + // 3 + + HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_2, SWITCH_3); + HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); + HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); + FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, + subFlow1, SWITCH_1, SWITCH_2); + FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, + subFlow2, SWITCH_1, SWITCH_3); + setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); + setYPoint(haFlow, SWITCH_ID_1); + return haFlow; + } + + protected HaFlow buildLongYShapedHaFlow() { + // HA-flow 4-----5 + // / + // 1------2-----3 + // \ + // 6-----7 + + HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_5, SWITCH_7); + HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); + HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); + FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, + subFlow1, SWITCH_1, SWITCH_2, SWITCH_3, SWITCH_4, SWITCH_5); + FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, + subFlow2, SWITCH_1, SWITCH_2, SWITCH_3, SWITCH_6, SWITCH_7); + setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); + setYPoint(haFlow, SWITCH_ID_3); + return haFlow; + } + + protected HaFlow buildIShapedDifferentLengthHaFlow() { + // HA-flow 4 + // / + // 1------2-----3 + // ^ + // Y-point + + HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_3, SWITCH_4); + HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); + HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); + FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, + subFlow1, SWITCH_1, SWITCH_2, SWITCH_3); + FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, + subFlow2, SWITCH_1, SWITCH_2, SWITCH_3, SWITCH_4); + setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); + setYPoint(haFlow, SWITCH_ID_3); + return haFlow; + } + + protected HaFlow buildIShapedEqualLengthHaFlow() { + // HA-flow + // + // 1------2-----3 + // ^ + // Y-point + + HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_3, SWITCH_3); + HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); + HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); + FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, + subFlow1, SWITCH_1, SWITCH_2, SWITCH_3); + FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, + subFlow2, SWITCH_1, SWITCH_2, SWITCH_3); + setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); + setYPoint(haFlow, SWITCH_ID_3); + return haFlow; + } + + protected HaFlow buildIShapedEqualLengthDifferentIslsHaFlow() { + // HA-flow + // + // 1======2 + // ^ + // Y-point + + HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_2, SWITCH_2); + HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); + HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); + FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, + subFlow1, SWITCH_1, SWITCH_2); + FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, + subFlow2, SWITCH_1, SWITCH_2); + setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); + setYPoint(haFlow, SWITCH_ID_1); + return haFlow; + } + + protected HaFlow buildIShapedOneSwitchHaFlow() { + // HA-flow + // + // 1------2-----3 + // ^ + // Y-point + + HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_1, SWITCH_3); + HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); + HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); + FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, + subFlow1, SWITCH_1); + FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, + subFlow2, SWITCH_1, SWITCH_2, SWITCH_3); + setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); + setYPoint(haFlow, SWITCH_ID_1); + return haFlow; + } + + protected HaFlow buildIShapedOneSwitchHaFlowServer42() { + // HA-flow + // + // 8------9 + // ^ + // Y-point + + HaFlow haFlow = buildHaFlow(SWITCH_8_SERVER42, SWITCH_8_SERVER42, SWITCH_9_SERVER42, SHARED_INNER_VLAN); + HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); + HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); + FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, + subFlow1, SWITCH_8_SERVER42); + FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, + subFlow2, SWITCH_8_SERVER42, SWITCH_9_SERVER42); + setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); + setYPoint(haFlow, SWITCH_ID_8_SERVER42); + return haFlow; + } + + protected HaFlow buildYShapedHaFlow() { + // HA-flow 3 + // / + // 1------2 + // \ + // 4 + + HaFlow haFlow = buildHaFlow(SWITCH_1, SWITCH_3, SWITCH_4); + HaSubFlow subFlow1 = haFlow.getHaSubFlow(SUB_FLOW_1).get(); + HaSubFlow subFlow2 = haFlow.getHaSubFlow(SUB_FLOW_2).get(); + FlowPath[] subPaths1 = buildSubPathPair(PATH_ID_1, PATH_ID_2, FORWARD_SUB_COOKIE_1, REVERSE_SUB_COOKIE_1, + subFlow1, SWITCH_1, SWITCH_2, SWITCH_3); + FlowPath[] subPaths2 = buildSubPathPair(PATH_ID_3, PATH_ID_4, FORWARD_SUB_COOKIE_2, REVERSE_SUB_COOKIE_2, + subFlow2, SWITCH_1, SWITCH_2, SWITCH_4); + setMainPaths(haFlow, PATH_ID_5, PATH_ID_6, subPaths1, subPaths2); + setYPoint(haFlow, SWITCH_ID_2); + return haFlow; + } + + protected static void setYPoint(HaFlow haFlow, SwitchId switchId3) { + haFlow.getForwardPath().setYPointSwitchId(switchId3); + haFlow.getReversePath().setYPointSwitchId(switchId3); + } + + protected static void setMainPaths(HaFlow haFlow, PathId forwardId, PathId reverseId, + FlowPath[] firstSubPaths, FlowPath[] secondSubPaths) { + firstSubPaths[1].setMeterId(SUB_FLOW_1_METER_ID); + secondSubPaths[1].setMeterId(SUB_FLOW_2_METER_ID); + + HaFlowPath forwardHaPath = HaFlowPath.builder() + .cookie(FORWARD_COOKIE) + .sharedPointMeterId(SHARED_POINT_METER_ID) + .yPointMeterId(null) + .yPointGroupId(GROUP_ID) + .haPathId(forwardId) + .sharedSwitch(haFlow.getSharedSwitch()) + .build(); + forwardHaPath.setHaSubFlows(haFlow.getHaSubFlows()); + forwardHaPath.setSubPaths(Lists.newArrayList(firstSubPaths[0], secondSubPaths[0])); + haFlow.setForwardPath(forwardHaPath); + + HaFlowPath reverseHaPath = HaFlowPath.builder() + .cookie(REVERSE_COOKIE) + .sharedPointMeterId(null) + .yPointMeterId(Y_POINT_METER_ID) + .yPointGroupId(null) + .haPathId(reverseId) + .sharedSwitch(haFlow.getSharedSwitch()) + .build(); + reverseHaPath.setHaSubFlows(haFlow.getHaSubFlows()); + reverseHaPath.setSubPaths(Lists.newArrayList(firstSubPaths[1], secondSubPaths[1])); + haFlow.setReversePath(reverseHaPath); + } + + protected static FlowPath[] buildSubPathPair( + PathId forwardId, PathId reverseId, FlowSegmentCookie forwardCookie, FlowSegmentCookie reverseCookie, + HaSubFlow haSubFlow, Switch... switches) { + Switch[] reverseSwitches = Arrays.copyOf(switches, switches.length); + ArrayUtils.reverse(reverseSwitches); + return new FlowPath[]{ + buildSubPath(forwardId, haSubFlow, forwardCookie, switches), + buildSubPath(reverseId, haSubFlow, reverseCookie, reverseSwitches) + }; + } + + protected static HaFlow buildHaFlow(Switch sharedSwitch, Switch endpointSwitch1, Switch endpointSwitch2) { + return buildHaFlow(sharedSwitch, endpointSwitch1, endpointSwitch2, 0); + } + + private static HaFlow buildHaFlow(Switch sharedSwitch, Switch endpointSwitch1, Switch endpointSwitch2, + int sharedInnerVlan) { + HaFlow haFlow = HaFlow.builder() + .haFlowId(HA_FLOW_ID) + .sharedInnerVlan(sharedInnerVlan) + .sharedPort(PORT_NUMBER_1) + .sharedSwitch(sharedSwitch) + .build(); + haFlow.setHaSubFlows(Lists.newArrayList( + buildHaSubFlow(endpointSwitch1, SUB_FLOW_1), buildHaSubFlow(endpointSwitch2, SUB_FLOW_2))); + return haFlow; + } + + private static HaSubFlow buildHaSubFlow(Switch sw, String subFlowId) { + return HaSubFlow.builder() + .haSubFlowId(subFlowId) + .endpointSwitch(sw) + .build(); + } + + private static FlowPath buildSubPath(PathId pathId, HaSubFlow haSubFlow, FlowSegmentCookie cookie, + Switch... switches) { + FlowPath subPath = FlowPath.builder() + .cookie(cookie) + .pathId(pathId) + .srcSwitch(switches[0]) + .destSwitch(switches[switches.length - 1]) + .build(); + List segments = new ArrayList<>(); + for (int i = 1; i < switches.length; i++) { + segments.add(PathSegment.builder() + .pathId(pathId) + .srcSwitch(switches[i - 1]) + .destSwitch(switches[i]) + .build()); + } + subPath.setSegments(segments); + subPath.setHaSubFlow(haSubFlow); + return subPath; + } + + protected DataAdapter buildAdapter(HaFlow haFlow) { + List subPaths = haFlow.getPaths().stream().flatMap(path -> path.getSubPaths().stream()) + .collect(Collectors.toList()); + + + Set switches = Sets.newHashSet(haFlow.getSharedSwitch()); + haFlow.getHaSubFlows().stream().map(HaSubFlow::getEndpointSwitch).forEach(switches::add); + subPaths.stream() + .flatMap(path -> path.getSegments().stream()) + .flatMap(segment -> Stream.of(segment.getSrcSwitch(), segment.getDestSwitch())) + .forEach(switches::add); + + Map haFlowMap = haFlow.getPaths().stream() + .collect(Collectors.toMap(HaFlowPath::getHaPathId, HaFlowPath::getHaFlow)); + for (FlowPath subPath : subPaths) { + haFlowMap.put(subPath.getPathId(), subPath.getHaFlowPath().getHaFlow()); + } + + Map encapsulationMap = new HashMap<>(); + for (HaFlowPath haFlowPath : haFlow.getPaths()) { + encapsulationMap.put(haFlowPath.getHaPathId(), VLAN_ENCAPSULATION); + } + + return InMemoryDataAdapter.builder() + .switchProperties(SWITCH_PROPERTIES_MAP) + .commonFlowPaths(new HashMap<>()) + .haFlowSubPaths(subPaths.stream().collect(toMap(FlowPath::getPathId, identity()))) + .transitEncapsulations(encapsulationMap) + .switches(switches.stream().collect(Collectors.toMap(Switch::getSwitchId, identity()))) + .haFlowMap(haFlowMap) + .haFlowPathMap(haFlow.getPaths().stream().collect(toMap(HaFlowPath::getHaPathId, identity()))) + .build(); + } + + + protected void assertFlowTables(Collection commands, OfTable... expectedTables) { + List actualTables = commands.stream() + .filter(FlowSpeakerData.class::isInstance) + .map(FlowSpeakerData.class::cast) + .map(FlowSpeakerData::getTable) + .sorted() + .collect(Collectors.toList()); + + Arrays.sort(expectedTables); + Assertions.assertEquals(Arrays.asList(expectedTables), actualTables); + } + + protected int getFlowCount(Collection commands) { + return getCommandCount(commands, FlowSpeakerData.class); + } + + protected int getMeterCount(Collection commands) { + return getCommandCount(commands, MeterSpeakerData.class); + } + + protected int getGroupCount(Collection commands) { + return getCommandCount(commands, GroupSpeakerData.class); + } + + private int getCommandCount(Collection commands, Class clazz) { + return (int) commands.stream().filter(clazz::isInstance).count(); + } + + protected Map> groupBySwitchId(Collection commands) { + return commands.stream().collect(Collectors.groupingBy(SpeakerData::getSwitchId)); + } +} diff --git a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/HaRuleGeneratorBaseTest.java b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/HaRuleGeneratorBaseTest.java index 062055300b2..1cdf77e0a56 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/HaRuleGeneratorBaseTest.java +++ b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/HaRuleGeneratorBaseTest.java @@ -37,7 +37,7 @@ import org.openkilda.model.SwitchId; import org.openkilda.model.cookie.CookieBase.CookieType; import org.openkilda.model.cookie.FlowSegmentCookie; -import org.openkilda.model.cookie.FlowSegmentCookie.FlowSubType; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.rulemanager.Constants; import org.openkilda.rulemanager.Field; import org.openkilda.rulemanager.ProtoConstants.EthType; diff --git a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/SharedYServer42IngressForwardHaRuleGeneratorTest.java b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/SharedYServer42IngressForwardHaRuleGeneratorTest.java new file mode 100644 index 00000000000..4a8100e8f11 --- /dev/null +++ b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/SharedYServer42IngressForwardHaRuleGeneratorTest.java @@ -0,0 +1,337 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.rulemanager.factory.generator.flow.haflow; + +import static com.google.common.collect.Lists.newArrayList; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.openkilda.model.cookie.CookieBase.CookieType.SERVER_42_FLOW_RTT_INPUT; +import static org.openkilda.rulemanager.Constants.SERVER_42_FLOW_RTT_FORWARD_UDP_PORT; +import static org.openkilda.rulemanager.Utils.assertEqualsMatch; +import static org.openkilda.rulemanager.factory.generator.flow.haflow.HaRuleGeneratorBaseTest.TRANSIT_VLAN_ID; +import static org.openkilda.rulemanager.utils.Utils.mapMetadata; + +import org.openkilda.model.FlowTransitEncapsulation; +import org.openkilda.model.HaFlow; +import org.openkilda.model.MacAddress; +import org.openkilda.model.Switch; +import org.openkilda.model.SwitchProperties; +import org.openkilda.model.cookie.CookieBase; +import org.openkilda.model.cookie.CookieBase.CookieType; +import org.openkilda.model.cookie.FlowSegmentCookie; +import org.openkilda.model.cookie.FlowSharedSegmentCookie; +import org.openkilda.model.cookie.FlowSharedSegmentCookie.SharedSegmentType; +import org.openkilda.model.cookie.FlowSubType; +import org.openkilda.model.cookie.PortColourCookie; +import org.openkilda.rulemanager.Constants.Priority; +import org.openkilda.rulemanager.Field; +import org.openkilda.rulemanager.FlowSpeakerData; +import org.openkilda.rulemanager.Instructions; +import org.openkilda.rulemanager.OfFlowFlag; +import org.openkilda.rulemanager.OfTable; +import org.openkilda.rulemanager.ProtoConstants.EthType; +import org.openkilda.rulemanager.ProtoConstants.IpProto; +import org.openkilda.rulemanager.ProtoConstants.PortNumber; +import org.openkilda.rulemanager.RuleManagerConfig; +import org.openkilda.rulemanager.SpeakerData; +import org.openkilda.rulemanager.action.Action; +import org.openkilda.rulemanager.action.PopVlanAction; +import org.openkilda.rulemanager.action.PortOutAction; +import org.openkilda.rulemanager.action.PushVlanAction; +import org.openkilda.rulemanager.action.SetFieldAction; +import org.openkilda.rulemanager.match.FieldMatch; +import org.openkilda.rulemanager.utils.RoutingMetadata; +import org.openkilda.rulemanager.utils.RoutingMetadata.HaSubFlowType; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +public class SharedYServer42IngressForwardHaRuleGeneratorTest extends HaFlowRulesBaseTest { + + public static final int SERVER_42_PORT_NUMBER = 42; + public static final MacAddress SERVER_42_MAC_ADDRESS = new MacAddress("42:42:42:42:42:42"); + public static final int OUTER_VLAN_ID_1 = 100; + public static final int INNER_VLAN_ID_1 = 12; + private static final Integer PORT_OFFSET = 5000; + public static final SwitchProperties SWITCH_PROPERTIES = SwitchProperties.builder() + .server42Port(SERVER_42_PORT_NUMBER) + .server42MacAddress(SERVER_42_MAC_ADDRESS) + .server42FlowRtt(true) + .build(); + RuleManagerConfig config; + + @BeforeEach + public void setup() { + config = mock(RuleManagerConfig.class); + when(config.getServer42FlowRttUdpPortOffset()).thenReturn(PORT_OFFSET); + } + + @Test + public void generateCommandsSingleVlan() { + HaFlow haFlow = buildYShapedYEqualsSharedHaFlow(); + haFlow.setSharedInnerVlan(SHARED_INNER_VLAN); + SharedYServer42IngressForwardHaRuleGenerator generator = buildGenerator(haFlow, VLAN_ENCAPSULATION); + + List commands = generator.generateCommands(SWITCH_1).stream() + .map(FlowSpeakerData.class::cast) + .sorted(Comparator.comparing(command -> command.getCookie().getValue())) + .collect(Collectors.toList()); + + Assertions.assertEquals(5, commands.size()); + + FlowSpeakerData inputCustomerCommand1 = commands.get(0); + assertInputCommand(inputCustomerCommand1, FlowSubType.HA_SUB_FLOW_1, HaSubFlowType.HA_SUB_FLOW_1, + SWITCH_1, SWITCH_2); + + + FlowSpeakerData inputCustomerCommand2 = commands.get(1); + assertInputCommand(inputCustomerCommand2, FlowSubType.HA_SUB_FLOW_2, HaSubFlowType.HA_SUB_FLOW_2, + SWITCH_1, SWITCH_3); + + + FlowSpeakerData preIngressCommand = commands.get(2); + assertPreIngressCommand(preIngressCommand, SWITCH_1, SHARED_INNER_VLAN); + + FlowSpeakerData ingressCommand1 = commands.get(3); + assertIngressCommand(ingressCommand1, + HaSubFlowType.HA_SUB_FLOW_1, SWITCH_1, FORWARD_SUB_COOKIE_1, + Priority.SERVER_42_INGRESS_SINGLE_VLAN_FLOW_PRIORITY, SHARED_INNER_VLAN, null); + FlowSpeakerData ingressCommand2 = commands.get(4); + assertIngressCommand(ingressCommand2, + HaSubFlowType.HA_SUB_FLOW_2, SWITCH_1, FORWARD_SUB_COOKIE_2, + Priority.SERVER_42_INGRESS_SINGLE_VLAN_FLOW_PRIORITY, SHARED_INNER_VLAN, null); + } + + @Test + public void generateCommandsOneSubPathIsOneFlowFullPort() { + final HaFlow haFlow = buildIShapedOneSwitchHaFlow(); + SharedYServer42IngressForwardHaRuleGenerator generator = buildGenerator(haFlow, VLAN_ENCAPSULATION); + + List commands = generator.generateCommands(SWITCH_1); + + Assertions.assertEquals(2, commands.size()); + + FlowSpeakerData ingressCommand = (FlowSpeakerData) commands.get(0); + FlowSpeakerData inputCommand = (FlowSpeakerData) commands.get(1); + + assertIngressCommand(ingressCommand, HaSubFlowType.HA_SUB_FLOW_2, SWITCH_1, FORWARD_SUB_COOKIE_2, + Priority.SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY, null, null); + assertInputCommand(inputCommand, FlowSubType.HA_SUB_FLOW_2, HaSubFlowType.HA_SUB_FLOW_2, SWITCH_1, SWITCH_3); + } + + @Test + public void generateCommandsDoubleVlan() { + HaFlow haFlow = buildYShapedYEqualsSharedHaFlow(); + haFlow.setSharedInnerVlan(INNER_VLAN_ID_1); + haFlow.setSharedOuterVlan(OUTER_VLAN_ID_1); + SharedYServer42IngressForwardHaRuleGenerator generator = buildGenerator(haFlow, VLAN_ENCAPSULATION); + + List commands = generator.generateCommands(SWITCH_1).stream() + .map(FlowSpeakerData.class::cast) + .sorted(Comparator.comparing(command -> command.getCookie().getValue())) + .collect(Collectors.toList()); + + Assertions.assertEquals(5, commands.size()); + + FlowSpeakerData inputCustomerCommand1 = commands.get(0); + assertInputCommand(inputCustomerCommand1, FlowSubType.HA_SUB_FLOW_1, HaSubFlowType.HA_SUB_FLOW_1, + SWITCH_1, SWITCH_2); + + + FlowSpeakerData inputCustomerCommand2 = commands.get(1); + assertInputCommand(inputCustomerCommand2, FlowSubType.HA_SUB_FLOW_2, HaSubFlowType.HA_SUB_FLOW_2, + SWITCH_1, SWITCH_3); + + + FlowSpeakerData preIngressCommand = commands.get(2); + assertPreIngressCommand(preIngressCommand, SWITCH_1, OUTER_VLAN_ID_1); + + FlowSpeakerData ingressCommand1 = commands.get(3); + assertIngressCommand(ingressCommand1, + HaSubFlowType.HA_SUB_FLOW_1, SWITCH_1, FORWARD_SUB_COOKIE_1, + Priority.SERVER_42_INGRESS_DOUBLE_VLAN_FLOW_PRIORITY, OUTER_VLAN_ID_1, INNER_VLAN_ID_1); + FlowSpeakerData ingressCommand2 = commands.get(4); + assertIngressCommand(ingressCommand2, + HaSubFlowType.HA_SUB_FLOW_2, SWITCH_1, FORWARD_SUB_COOKIE_2, + Priority.SERVER_42_INGRESS_DOUBLE_VLAN_FLOW_PRIORITY, OUTER_VLAN_ID_1, INNER_VLAN_ID_1); + } + + + @Test + public void generateCommandsNoServer42() { + HaFlow haFlow = buildYShapedYEqualsSharedHaFlowServer42(); + SharedYServer42IngressForwardHaRuleGenerator generator = SharedYServer42IngressForwardHaRuleGenerator.builder() + .config(config) + .haFlow(haFlow) + .switchProperties(SwitchProperties.builder().build()) + .build(); + List commands = generator.generateCommands(SWITCH_8_SERVER42); + Assertions.assertTrue(commands.isEmpty()); + } + + @Test + public void generateCommandsNoHaFlow() { + SharedYServer42IngressForwardHaRuleGenerator generator = SharedYServer42IngressForwardHaRuleGenerator.builder() + .config(config) + .switchProperties(SWITCH_PROPERTIES) + .build(); + List commands = generator.generateCommands(SWITCH_8_SERVER42); + Assertions.assertTrue(commands.isEmpty()); + } + + + + private void assertIngressCommand(FlowSpeakerData ingressCommand, HaSubFlowType expectedHaSubFlowType, + Switch srcSwitch, CookieBase cookieBase, int expectedPriority, + Integer vlanId, Integer vlanId2) { + final RoutingMetadata expectedIngressMetadata = RoutingMetadata.builder() + .inputPort(PORT_NUMBER_1) + .haSubFlowType(expectedHaSubFlowType) + .outerVlanId(vlanId) + .build(srcSwitch.getFeatures()); + + Set expectedIngressMatch = Sets.newHashSet( + FieldMatch.builder().field(Field.IN_PORT).value(SERVER_42_PORT_NUMBER).build(), + FieldMatch.builder().field(Field.METADATA).value(expectedIngressMetadata.getValue()) + .mask(expectedIngressMetadata.getMask()).build() + ); + + List expectedIngressActions = newArrayList( + SetFieldAction.builder().field(Field.ETH_SRC) + .value(srcSwitch.getSwitchId().toMacAddressAsLong()).build()); + + if (vlanId2 != null) { + expectedIngressMatch.add(FieldMatch.builder().field(Field.VLAN_VID).value(vlanId2).build()); + } else { + expectedIngressActions.add(new PushVlanAction()); + } + + expectedIngressActions.add(SetFieldAction.builder().field(Field.VLAN_VID).value(TRANSIT_VLAN_ID).build()); + expectedIngressActions.add(new PortOutAction(new PortNumber(0))); + + final FlowSegmentCookie expectedIngressCookie = new FlowSegmentCookie(cookieBase.getValue()) + .toBuilder() + .type(CookieType.SERVER_42_FLOW_RTT_INGRESS) + .build(); + + assertIngressCommand(ingressCommand, expectedPriority, + expectedIngressMatch, expectedIngressActions, expectedIngressCookie, srcSwitch); + } + + private void assertIngressCommand( + FlowSpeakerData command, int expectedPriority, Set expectedMatch, + List expectedApplyActions, CookieBase expectedCookie, Switch srcSwitch) { + Assertions.assertEquals(srcSwitch.getSwitchId(), command.getSwitchId()); + Assertions.assertEquals(srcSwitch.getOfVersion(), command.getOfVersion().toString()); + + Assertions.assertEquals(expectedCookie, command.getCookie()); + Assertions.assertEquals(OfTable.INGRESS, command.getTable()); + Assertions.assertEquals(expectedPriority, command.getPriority()); + assertEqualsMatch(expectedMatch, command.getMatch()); + + Instructions expectedInstructions = Instructions.builder() + .applyActions(expectedApplyActions) + .build(); + Assertions.assertEquals(expectedInstructions, command.getInstructions()); + Assertions.assertEquals(Sets.newHashSet(OfFlowFlag.RESET_COUNTERS), command.getFlags()); + } + + private void assertPreIngressCommand(FlowSpeakerData command, Switch sharedSwitch, int vlanId) { + FlowSharedSegmentCookie cookie = FlowSharedSegmentCookie.builder(SharedSegmentType.SERVER42_QINQ_OUTER_VLAN) + .portNumber(SERVER_42_PORT_NUMBER) + .vlanId(vlanId).build(); + + Assertions.assertEquals(sharedSwitch.getSwitchId(), command.getSwitchId()); + Assertions.assertEquals(sharedSwitch.getOfVersion(), command.getOfVersion().toString()); + Assertions.assertEquals(cookie, command.getCookie()); + Assertions.assertEquals(OfTable.PRE_INGRESS, command.getTable()); + Assertions.assertEquals(Priority.SERVER_42_PRE_INGRESS_FLOW_PRIORITY, command.getPriority()); + Assertions.assertTrue(command.getDependsOn().isEmpty()); + + Set expectedMatch = Sets.newHashSet( + FieldMatch.builder().field(Field.IN_PORT).value(SERVER_42_PORT_NUMBER).build(), + FieldMatch.builder().field(Field.VLAN_VID).value(vlanId).build()); + assertEqualsMatch(expectedMatch, command.getMatch()); + + Instructions expectedInstructions = Instructions.builder() + .writeMetadata(mapMetadata(RoutingMetadata.builder().outerVlanId(vlanId) + .build(sharedSwitch.getFeatures()))) + .applyActions(Lists.newArrayList(new PopVlanAction())) + .goToTable(OfTable.INGRESS) + .build(); + Assertions.assertEquals(expectedInstructions, command.getInstructions()); + Assertions.assertTrue(command.getFlags().isEmpty()); + } + + + private void assertInputCommand(FlowSpeakerData command, FlowSubType subType, HaSubFlowType haSubFlowType, + Switch srcSwitch, Switch dstSwitch) { + Assertions.assertEquals(srcSwitch.getSwitchId(), command.getSwitchId()); + Assertions.assertEquals(srcSwitch.getOfVersion(), command.getOfVersion().toString()); + Assertions.assertEquals(new PortColourCookie(SERVER_42_FLOW_RTT_INPUT, PORT_NUMBER_1, subType), + command.getCookie()); + Assertions.assertEquals(OfTable.INPUT, command.getTable()); + Assertions.assertEquals(Priority.SERVER_42_FLOW_RTT_INPUT_PRIORITY, command.getPriority()); + Assertions.assertTrue(command.getDependsOn().isEmpty()); + + Set expectedMatch = Sets.newHashSet( + FieldMatch.builder().field(Field.IN_PORT).value(SERVER_42_PORT_NUMBER).build(), + FieldMatch.builder().field(Field.ETH_TYPE).value(EthType.IPv4).build(), + FieldMatch.builder().field(Field.IP_PROTO).value(IpProto.UDP).build(), + FieldMatch.builder().field(Field.UDP_SRC).value(PORT_NUMBER_1 + PORT_OFFSET).build(), + FieldMatch.builder().field(Field.ETH_SRC).value(SERVER_42_MAC_ADDRESS.toLong()).build(), + FieldMatch.builder().field(Field.ETH_DST).value(dstSwitch.getSwitchId().toMacAddressAsLong()).build() + ); + assertEqualsMatch(expectedMatch, command.getMatch()); + + List expectedApplyActions = newArrayList( + SetFieldAction.builder().field(Field.UDP_SRC).value(SERVER_42_FLOW_RTT_FORWARD_UDP_PORT).build(), + SetFieldAction.builder().field(Field.UDP_DST).value(SERVER_42_FLOW_RTT_FORWARD_UDP_PORT).build()); + + Instructions expectedInstructions = Instructions.builder() + .applyActions(expectedApplyActions) + .goToTable(OfTable.PRE_INGRESS) + .writeMetadata(mapMetadata(RoutingMetadata.builder() + .haSubFlowType(haSubFlowType).inputPort(PORT_NUMBER_1) + .build(srcSwitch.getFeatures()))) + .build(); + Assertions.assertEquals(expectedInstructions, command.getInstructions()); + Assertions.assertTrue(command.getFlags().isEmpty()); + } + + private SharedYServer42IngressForwardHaRuleGenerator buildGenerator(HaFlow haFlow, + FlowTransitEncapsulation encapsulation) { + return SharedYServer42IngressForwardHaRuleGenerator.builder() + .config(config) + .flowPath(haFlow.getForwardPath().getSubPaths().iterator().next()) + .haFlow(haFlow) + .encapsulation(encapsulation) + .overlappingIngressAdapters(Collections.emptySet()) + .switchProperties(SWITCH_PROPERTIES) + .build(); + } + +} + diff --git a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/YPointForwardIngressHaRuleGeneratorTest.java b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/YPointForwardIngressHaRuleGeneratorTest.java index 11c39e014de..df20a67ee2a 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/YPointForwardIngressHaRuleGeneratorTest.java +++ b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/factory/generator/flow/haflow/YPointForwardIngressHaRuleGeneratorTest.java @@ -15,6 +15,7 @@ package org.openkilda.rulemanager.factory.generator.flow.haflow; + import static com.google.common.collect.Lists.newArrayList; import static com.google.common.collect.Sets.newHashSet; import static org.junit.jupiter.api.Assertions.assertEquals; diff --git a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/utils/RoutingMetadataTest.java b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/utils/RoutingMetadataTest.java new file mode 100644 index 00000000000..ba4201b892c --- /dev/null +++ b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/utils/RoutingMetadataTest.java @@ -0,0 +1,71 @@ +/* Copyright 2021 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.rulemanager.utils; + +import org.openkilda.model.SwitchFeature; +import org.openkilda.rulemanager.utils.RoutingMetadata.HaSubFlowType; + +import com.google.common.collect.Sets; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; + +import java.util.Set; + +public class RoutingMetadataTest { + + static final Set FEATURES = Sets.newHashSet(); + + @Test + public void buildRoutingMetadata() { + RoutingMetadata routingMetadata = RoutingMetadata.builder().build(FEATURES); + + Assertions.assertEquals(0x0000_0000_F000_0000L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_1000_0000L, routingMetadata.getValue()); + } + + @Test + public void buildRoutingMetadataHaFlow() { + RoutingMetadata routingMetadata = RoutingMetadata.builder() + .haSubFlowType(HaSubFlowType.HA_SUB_FLOW_1) + .build(FEATURES); + + Assertions.assertEquals(0x0000_0000_F001_0000L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_1000_0000L, routingMetadata.getValue()); + + routingMetadata = RoutingMetadata.builder() + .haSubFlowType(HaSubFlowType.HA_SUB_FLOW_2) + .build(FEATURES); + + Assertions.assertEquals(0x0000_0000_F001_0000L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_1001_0000L, routingMetadata.getValue()); + } + + @Test + public void buildRoutingMetadataHaFlowWithOuterVlan() { + RoutingMetadata routingMetadata = RoutingMetadata.builder().outerVlanId(1).build(FEATURES); + + Assertions.assertEquals(0x0000_0000_F000_FFF8L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_1000_0018L, routingMetadata.getValue()); + + routingMetadata = RoutingMetadata.builder() + .haSubFlowType(HaSubFlowType.HA_SUB_FLOW_1) + .outerVlanId(1) + .build(FEATURES); + + Assertions.assertEquals(0x0000_0000_F001_FFF8L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_1000_0018L, routingMetadata.getValue()); + } +} diff --git a/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/ActivateFlowMonitoringInfoData.java b/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/ActivateFlowMonitoringInfoData.java index 4834fa0cc0e..d65e7d66c8e 100644 --- a/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/ActivateFlowMonitoringInfoData.java +++ b/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/ActivateFlowMonitoringInfoData.java @@ -33,23 +33,28 @@ public class ActivateFlowMonitoringInfoData extends InfoData { @JsonProperty(Utils.FLOW_ID) - private String id; + String id; @NonNull @JsonProperty("source") - private FlowEndpointPayload source; + FlowEndpointPayload source; @NonNull @JsonProperty("destination") - private FlowEndpointPayload destination; + FlowEndpointPayload destination; + + @JsonProperty("ha-flow-id") + String haFlowId; @Builder @JsonCreator public ActivateFlowMonitoringInfoData(@JsonProperty(Utils.FLOW_ID) String flowId, @JsonProperty("source") FlowEndpointPayload source, - @JsonProperty("destination") FlowEndpointPayload destination) { + @JsonProperty("destination") FlowEndpointPayload destination, + @JsonProperty("ha-flow-id") String haFlowId) { this.id = flowId; this.source = source; this.destination = destination; + this.haFlowId = haFlowId; } } diff --git a/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/AddFlow.java b/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/AddFlow.java index e75bc1d2ba6..212f151cc96 100644 --- a/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/AddFlow.java +++ b/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/AddFlow.java @@ -17,17 +17,17 @@ import org.openkilda.server42.messaging.FlowDirection; +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.PropertyNamingStrategy.SnakeCaseStrategy; import com.fasterxml.jackson.databind.annotation.JsonNaming; -import lombok.AllArgsConstructor; import lombok.Builder; import lombok.EqualsAndHashCode; import lombok.Value; @Value @Builder -@AllArgsConstructor @JsonNaming(value = SnakeCaseStrategy.class) +@JsonIgnoreProperties(ignoreUnknown = true) @EqualsAndHashCode(callSuper = false) public class AddFlow extends Message { Headers headers; @@ -36,4 +36,5 @@ public class AddFlow extends Message { Long innerTunnelId; FlowDirection direction; Integer port; + String dstMac; } diff --git a/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/FlowRttControl.java b/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/FlowRttControl.java index ad18ec511ea..a4d1b7ff19e 100644 --- a/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/FlowRttControl.java +++ b/src-java/server42/server42-control-messaging/src/main/java/org/openkilda/server42/control/messaging/flowrtt/FlowRttControl.java @@ -93,6 +93,16 @@ public interface FlowOrBuilder extends * int64 inner_tunnel_id = 10; */ long getInnerTunnelId(); + + /** + * string switch_id = 11; + */ + java.lang.String getSwitchId(); + /** + * string switch_id = 11; + */ + com.google.protobuf.ByteString + getSwitchIdBytes(); } /** * Protobuf type {@code org.openkilda.server42.control.messaging.flowrtt.Flow} @@ -111,6 +121,7 @@ private Flow() { encapsulationType_ = 0; transitEncapsulationType_ = 0; dstMac_ = ""; + switchId_ = ""; } @java.lang.Override @@ -197,6 +208,12 @@ private Flow( innerTunnelId_ = input.readInt64(); break; } + case 90: { + java.lang.String s = input.readStringRequireUtf8(); + + switchId_ = s; + break; + } default: { if (!parseUnknownField( input, unknownFields, extensionRegistry, tag)) { @@ -491,6 +508,40 @@ public long getInnerTunnelId() { return innerTunnelId_; } + public static final int SWITCH_ID_FIELD_NUMBER = 11; + private volatile java.lang.Object switchId_; + /** + * string switch_id = 11; + */ + public java.lang.String getSwitchId() { + java.lang.Object ref = switchId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + switchId_ = s; + return s; + } + } + /** + * string switch_id = 11; + */ + public com.google.protobuf.ByteString + getSwitchIdBytes() { + java.lang.Object ref = switchId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + switchId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + private byte memoizedIsInitialized = -1; @java.lang.Override public final boolean isInitialized() { @@ -535,6 +586,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) if (innerTunnelId_ != 0L) { output.writeInt64(10, innerTunnelId_); } + if (!getSwitchIdBytes().isEmpty()) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 11, switchId_); + } unknownFields.writeTo(output); } @@ -582,6 +636,9 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeInt64Size(10, innerTunnelId_); } + if (!getSwitchIdBytes().isEmpty()) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(11, switchId_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -615,6 +672,8 @@ public boolean equals(final java.lang.Object obj) { != other.getHashCode()) return false; if (getInnerTunnelId() != other.getInnerTunnelId()) return false; + if (!getSwitchId() + .equals(other.getSwitchId())) return false; if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -650,6 +709,8 @@ public int hashCode() { hash = (37 * hash) + INNER_TUNNEL_ID_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashLong( getInnerTunnelId()); + hash = (37 * hash) + SWITCH_ID_FIELD_NUMBER; + hash = (53 * hash) + getSwitchId().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -803,6 +864,8 @@ public Builder clear() { innerTunnelId_ = 0L; + switchId_ = ""; + return this; } @@ -839,6 +902,7 @@ public org.openkilda.server42.control.messaging.flowrtt.FlowRttControl.Flow buil result.udpSrcPort_ = udpSrcPort_; result.hashCode_ = hashCode_; result.innerTunnelId_ = innerTunnelId_; + result.switchId_ = switchId_; onBuilt(); return result; } @@ -919,6 +983,10 @@ public Builder mergeFrom(org.openkilda.server42.control.messaging.flowrtt.FlowRt if (other.getInnerTunnelId() != 0L) { setInnerTunnelId(other.getInnerTunnelId()); } + if (!other.getSwitchId().isEmpty()) { + switchId_ = other.switchId_; + onChanged(); + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -1351,6 +1419,75 @@ public Builder clearInnerTunnelId() { onChanged(); return this; } + + private java.lang.Object switchId_ = ""; + /** + * string switch_id = 11; + */ + public java.lang.String getSwitchId() { + java.lang.Object ref = switchId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + switchId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * string switch_id = 11; + */ + public com.google.protobuf.ByteString + getSwitchIdBytes() { + java.lang.Object ref = switchId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + switchId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * string switch_id = 11; + */ + public Builder setSwitchId( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + + switchId_ = value; + onChanged(); + return this; + } + /** + * string switch_id = 11; + */ + public Builder clearSwitchId() { + + switchId_ = getDefaultInstance().getSwitchId(); + onChanged(); + return this; + } + /** + * string switch_id = 11; + */ + public Builder setSwitchIdBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + + switchId_ = value; + onChanged(); + return this; + } @java.lang.Override public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { @@ -4567,7 +4704,7 @@ public org.openkilda.server42.control.messaging.flowrtt.FlowRttControl.ListFlows java.lang.String[] descriptorData = { "\n\026flow-rtt-control.proto\0220org.openkilda." + "server42.control.messaging.flowrtt\032\031goog" + - "le/protobuf/any.proto\"\261\003\n\004Flow\022\017\n\007flow_i" + + "le/protobuf/any.proto\"\304\003\n\004Flow\022\017\n\007flow_i" + "d\030\001 \001(\t\022h\n\022encapsulation_type\030\002 \001(\0162H.or" + "g.openkilda.server42.control.messaging.f" + "lowrtt.Flow.EncapsulationTypeB\002\030\001\022\021\n\ttun" + @@ -4577,16 +4714,17 @@ public org.openkilda.server42.control.messaging.flowrtt.FlowRttControl.ListFlows "peB\002\030\001\022\031\n\021transit_tunnel_id\030\005 \001(\003\022\021\n\tdir" + "ection\030\006 \001(\010\022\017\n\007dst_mac\030\007 \001(\t\022\024\n\014udp_src" + "_port\030\010 \001(\r\022\021\n\thash_code\030\t \001(\005\022\027\n\017inner_" + - "tunnel_id\030\n \001(\003\"(\n\021EncapsulationType\022\010\n\004" + - "VLAN\020\000\022\t\n\005VXLAN\020\001\"O\n\007AddFlow\022D\n\004flow\030\001 \001" + - "(\01326.org.openkilda.server42.control.mess" + - "aging.flowrtt.Flow\"R\n\nRemoveFlow\022D\n\004flow" + - "\030\001 \001(\01326.org.openkilda.server42.control." + - "messaging.flowrtt.Flow\"#\n\020ClearFlowsFilt" + - "er\022\017\n\007dst_mac\030\001 \001(\t\"Q\n\tListFlows\022D\n\004flow" + - "\030\001 \003(\01326.org.openkilda.server42.control." + - "messaging.flowrtt.Flow\"\"\n\017ListFlowsFilte" + - "r\022\017\n\007dst_mac\030\001 \001(\tb\006proto3" + "tunnel_id\030\n \001(\003\022\021\n\tswitch_id\030\013 \001(\t\"(\n\021En" + + "capsulationType\022\010\n\004VLAN\020\000\022\t\n\005VXLAN\020\001\"O\n\007" + + "AddFlow\022D\n\004flow\030\001 \001(\01326.org.openkilda.se" + + "rver42.control.messaging.flowrtt.Flow\"R\n" + + "\nRemoveFlow\022D\n\004flow\030\001 \001(\01326.org.openkild" + + "a.server42.control.messaging.flowrtt.Flo" + + "w\"#\n\020ClearFlowsFilter\022\017\n\007dst_mac\030\001 \001(\t\"Q" + + "\n\tListFlows\022D\n\004flow\030\001 \003(\01326.org.openkild" + + "a.server42.control.messaging.flowrtt.Flo" + + "w\"\"\n\017ListFlowsFilter\022\017\n\007dst_mac\030\001 \001(\tb\006p" + + "roto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, @@ -4598,7 +4736,7 @@ public org.openkilda.server42.control.messaging.flowrtt.FlowRttControl.ListFlows internal_static_org_openkilda_server42_control_messaging_flowrtt_Flow_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_org_openkilda_server42_control_messaging_flowrtt_Flow_descriptor, - new java.lang.String[] { "FlowId", "EncapsulationType", "TunnelId", "TransitEncapsulationType", "TransitTunnelId", "Direction", "DstMac", "UdpSrcPort", "HashCode", "InnerTunnelId", }); + new java.lang.String[] { "FlowId", "EncapsulationType", "TunnelId", "TransitEncapsulationType", "TransitTunnelId", "Direction", "DstMac", "UdpSrcPort", "HashCode", "InnerTunnelId", "SwitchId", }); internal_static_org_openkilda_server42_control_messaging_flowrtt_AddFlow_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_org_openkilda_server42_control_messaging_flowrtt_AddFlow_fieldAccessorTable = new diff --git a/src-java/server42/server42-control-server-stub/src/main/java/org/openkilda/server42/control/serverstub/ControlServer.java b/src-java/server42/server42-control-server-stub/src/main/java/org/openkilda/server42/control/serverstub/ControlServer.java index 51d7452c916..4413eecfc42 100644 --- a/src-java/server42/server42-control-server-stub/src/main/java/org/openkilda/server42/control/serverstub/ControlServer.java +++ b/src-java/server42/server42-control-server-stub/src/main/java/org/openkilda/server42/control/serverstub/ControlServer.java @@ -113,11 +113,11 @@ public void run() { ClearFlowsFilter filter = command.unpack(ClearFlowsFilter.class); List keys = flows.values() .stream() - .filter(flow -> flow.getDstMac().equals(filter.getDstMac())) + .filter(flow -> flow.getSwitchId().equals(filter.getDstMac())) .map(FlowKey::fromFlow) .collect(Collectors.toList()); - flows.keySet().removeAll(keys); + keys.forEach(flows.keySet()::remove); keys.forEach(statsServer::removeFlow); } else { flows.clear(); @@ -130,7 +130,7 @@ public void run() { ListFlowsFilter filter = command.unpack(ListFlowsFilter.class); flows.values() .stream() - .filter(flow -> flow.getDstMac().equals(filter.getDstMac())) + .filter(flow -> flow.getSwitchId().equals(filter.getDstMac())) .forEach(flow -> builder.addResponse(Any.pack(flow))); } else { for (Flow flow : flows.values()) { diff --git a/src-java/server42/server42-control-storm-stub/src/main/java/org/openkilda/server42/control/stormstub/api/AddFlowPayload.java b/src-java/server42/server42-control-storm-stub/src/main/java/org/openkilda/server42/control/stormstub/api/AddFlowPayload.java index e89ecc4b00a..14264f44fa2 100644 --- a/src-java/server42/server42-control-storm-stub/src/main/java/org/openkilda/server42/control/stormstub/api/AddFlowPayload.java +++ b/src-java/server42/server42-control-storm-stub/src/main/java/org/openkilda/server42/control/stormstub/api/AddFlowPayload.java @@ -41,4 +41,7 @@ public class AddFlowPayload { @JsonProperty("port") Integer port; + + @JsonProperty("dst_mac") + String dstMac; } diff --git a/src-java/server42/server42-control-storm-topology/build.gradle b/src-java/server42/server42-control-storm-topology/build.gradle index c429a123ca9..01299673f75 100644 --- a/src-java/server42/server42-control-storm-topology/build.gradle +++ b/src-java/server42/server42-control-storm-topology/build.gradle @@ -31,7 +31,11 @@ dependencies { testAnnotationProcessor 'org.mapstruct:mapstruct-processor' testImplementation 'org.hamcrest:hamcrest-library' + testImplementation 'org.junit.jupiter:junit-jupiter-api' + testImplementation 'org.junit.jupiter:junit-jupiter-engine' testImplementation 'org.mockito:mockito-junit-jupiter' + testRuntimeOnly 'org.apache.curator:curator-test' + testRuntimeOnly 'org.hibernate.validator:hibernate-validator' testRuntimeOnly 'org.glassfish:javax.el' testRuntimeOnly 'org.apache.logging.log4j:log4j-slf4j-impl' @@ -61,3 +65,7 @@ shadowJar { artifacts { archives shadowJar } + +test { + useJUnitPlatform() +} diff --git a/src-java/server42/server42-control-storm-topology/src/checkstyle/checkstyle-suppressions.xml b/src-java/server42/server42-control-storm-topology/src/checkstyle/checkstyle-suppressions.xml index 2a127580be3..e78ec484037 100644 --- a/src-java/server42/server42-control-storm-topology/src/checkstyle/checkstyle-suppressions.xml +++ b/src-java/server42/server42-control-storm-topology/src/checkstyle/checkstyle-suppressions.xml @@ -6,4 +6,5 @@ - \ No newline at end of file + + diff --git a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/ControlTopology.java b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/ControlTopology.java index fd3d28d6440..7d995e44674 100644 --- a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/ControlTopology.java +++ b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/ControlTopology.java @@ -27,6 +27,7 @@ import org.openkilda.wfm.share.zk.ZooKeeperSpout; import org.openkilda.wfm.topology.AbstractTopology; +import com.google.common.annotations.VisibleForTesting; import org.apache.storm.generated.StormTopology; import org.apache.storm.kafka.bolt.KafkaBolt; import org.apache.storm.topology.TopologyBuilder; @@ -41,6 +42,12 @@ public ControlTopology(LaunchEnvironment env) { persistenceManager = new PersistenceManager(configurationProvider); } + @VisibleForTesting + ControlTopology(LaunchEnvironment env, PersistenceManager persistenceManager) { + super(env, "control-topology", ControlTopologyConfig.class); + this.persistenceManager = persistenceManager; + } + /** * Topology uploader. */ @@ -139,7 +146,8 @@ private void islHandler(TopologyBuilder topology) { } private void outputSpeaker(TopologyBuilder topology) { - KafkaBolt output = buildKafkaBoltWithRawObject(topologyConfig.getKafkaTopics().getServer42StormCommandsTopic()); + KafkaBolt output = + buildKafkaBoltWithRawObject(topologyConfig.getKafkaTopics().getServer42StormCommandsTopic()); declareBolt(topology, output, ComponentId.OUTPUT_SERVER42_CONTROL.toString()) .shuffleGrouping(FlowHandler.BOLT_ID, FlowHandler.STREAM_CONTROL_COMMANDS_ID) .shuffleGrouping(IslHandler.BOLT_ID, IslHandler.STREAM_CONTROL_COMMANDS_ID); diff --git a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/service/FlowRttService.java b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/service/FlowRttService.java index 69e80752cdd..b685356e532 100644 --- a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/service/FlowRttService.java +++ b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/service/FlowRttService.java @@ -17,10 +17,13 @@ package org.openkilda.server42.control.topology.service; import org.openkilda.model.Flow; +import org.openkilda.model.HaSubFlow; import org.openkilda.model.SwitchId; import org.openkilda.model.SwitchProperties; import org.openkilda.persistence.PersistenceManager; import org.openkilda.persistence.repositories.FlowRepository; +import org.openkilda.persistence.repositories.HaFlowRepository; +import org.openkilda.persistence.repositories.HaSubFlowRepository; import org.openkilda.persistence.repositories.KildaFeatureTogglesRepository; import org.openkilda.persistence.repositories.RepositoryFactory; import org.openkilda.persistence.repositories.SwitchPropertiesRepository; @@ -40,6 +43,8 @@ public class FlowRttService { private final KildaFeatureTogglesRepository featureTogglesRepository; private final SwitchPropertiesRepository switchPropertiesRepository; private final FlowRepository flowRepository; + private final HaSubFlowRepository haSubFlowRepository; + private final HaFlowRepository haFlowRepository; public FlowRttService(IFlowCarrier carrier, PersistenceManager persistenceManager) { this.carrier = carrier; @@ -47,6 +52,8 @@ public FlowRttService(IFlowCarrier carrier, PersistenceManager persistenceManage switchPropertiesRepository = repositoryFactory.createSwitchPropertiesRepository(); featureTogglesRepository = repositoryFactory.createFeatureTogglesRepository(); flowRepository = repositoryFactory.createFlowRepository(); + haSubFlowRepository = repositoryFactory.createHaSubFlowRepository(); + haFlowRepository = repositoryFactory.createHaFlowRepository(); } /** @@ -57,13 +64,17 @@ public FlowRttService(IFlowCarrier carrier, PersistenceManager persistenceManage * @param vlan switch customer vlan id * @param isForward is endpoint forward */ - public void activateFlowMonitoring(String flowId, SwitchId switchId, Integer port, Integer vlan, Integer innerVlan, - boolean isForward) { - if (isFlowRttFeatureToggle() && isFlowRttFeatureEnabledFor(switchId)) { - carrier.notifyActivateFlowMonitoring(flowId, switchId, port, vlan, innerVlan, isForward); - } else { - log.info("skip activation of flow RTT for flow: {} and switch:{}", flowId, switchId); + public void activateFlowMonitoring(String flowId, SwitchId switchId, SwitchId dstSwitchId, Integer port, + Integer vlan, Integer innerVlan, boolean isForward) { + if (!isFlowRttFeatureToggle() || !isFlowRttFeatureEnabledFor(switchId)) { + log.info("RTT not enabled. Skip activation of RTT for flow/haSubflow:{} and switch:{}", flowId, switchId); + return; } + if (switchId.equals(dstSwitchId)) { + log.info("One-switch flow. Skip activation of RTT for flow/haSubflow:{} and switch:{}", flowId, switchId); + return; + } + carrier.notifyActivateFlowMonitoring(flowId, switchId, dstSwitchId, port, vlan, innerVlan, isForward); } /** @@ -81,13 +92,30 @@ public void activateFlowMonitoringForSwitch(SwitchId switchId) { flowByDirection.getOrDefault(true, Collections.emptyList()) .forEach(flow -> carrier.notifyActivateFlowMonitoring(flow.getFlowId(), - switchId, flow.getSrcPort(), flow.getSrcVlan(), flow.getSrcInnerVlan(), - true)); + switchId, flow.getDestSwitchId(), flow.getSrcPort(), flow.getSrcVlan(), + flow.getSrcInnerVlan(), true)); flowByDirection.getOrDefault(false, Collections.emptyList()) .forEach(flow -> carrier.notifyActivateFlowMonitoring(flow.getFlowId(), - switchId, flow.getDestPort(), flow.getDestVlan(), flow.getDestInnerVlan(), - false)); + switchId, flow.getSrcSwitchId(), flow.getDestPort(), flow.getDestVlan(), + flow.getDestInnerVlan(), false)); + + activateHaFlowMonitoringForSwitch(switchId); + } + + private void activateHaFlowMonitoringForSwitch(SwitchId switchId) { + haFlowRepository.findBySharedEndpointSwitchId(switchId) + .forEach(haFlow -> haFlow.getHaSubFlows().stream() + .filter(haSubFlow -> !haSubFlow.isOneSwitchFlow()) + .forEach(haSubFlow -> carrier.notifyActivateFlowMonitoring(haSubFlow.getHaSubFlowId(), + switchId, haSubFlow.getEndpointSwitchId(), haFlow.getSharedPort(), + haFlow.getSharedOuterVlan(), haFlow.getSharedInnerVlan(), true))); + + haSubFlowRepository.findByEndpointSwitchId(switchId).stream() + .filter(haSubFlow -> !haSubFlow.isOneSwitchFlow()) + .forEach(haSubFlow -> carrier.notifyActivateFlowMonitoring(haSubFlow.getHaSubFlowId(), + switchId, haSubFlow.getHaFlow().getSharedSwitchId(), haSubFlow.getEndpointPort(), + haSubFlow.getEndpointVlan(), haSubFlow.getEndpointInnerVlan(), false)); } /** @@ -96,13 +124,22 @@ public void activateFlowMonitoringForSwitch(SwitchId switchId) { * @param switchId switch id */ public void sendFlowListOnSwitchCommand(SwitchId switchId) { - Set flowOnSwitch = + Set flowsOnSwitch = flowRepository.findByEndpointSwitch(switchId).stream() .filter(f -> !f.isOneSwitchFlow()) .map(Flow::getFlowId) .collect(Collectors.toSet()); - carrier.sendListOfFlowBySwitchId(switchId, flowOnSwitch); + haSubFlowRepository.findByEndpointSwitchId(switchId).stream() + .map(HaSubFlow::getHaSubFlowId) + .forEach(flowsOnSwitch::add); + + haFlowRepository.findBySharedEndpointSwitchId(switchId).stream() + .flatMap(haFlow -> haFlow.getHaSubFlows().stream()) + .map(HaSubFlow::getHaSubFlowId) + .forEach(flowsOnSwitch::add); + + carrier.sendListOfFlowBySwitchId(switchId, flowsOnSwitch); } private boolean isFlowRttFeatureToggle() { @@ -115,3 +152,4 @@ private boolean isFlowRttFeatureEnabledFor(SwitchId switchId) { } } + diff --git a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/service/IFlowCarrier.java b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/service/IFlowCarrier.java index 97257adb382..afdf257beba 100644 --- a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/service/IFlowCarrier.java +++ b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/service/IFlowCarrier.java @@ -21,8 +21,8 @@ public interface IFlowCarrier { - void notifyActivateFlowMonitoring(String id, SwitchId switchId, Integer port, Integer vlan, Integer innerVlan, - boolean isForward); + void notifyActivateFlowMonitoring(String id, SwitchId dstSwitchId, SwitchId switchId, Integer port, Integer vlan, + Integer innerVlan, boolean isForward); void notifyDeactivateFlowMonitoring(SwitchId switchId, String flowId, boolean isForward); diff --git a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/flow/FlowHandler.java b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/flow/FlowHandler.java index cd42923cd1e..eb9eeb6224c 100644 --- a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/flow/FlowHandler.java +++ b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/flow/FlowHandler.java @@ -83,8 +83,9 @@ private void handleCommand(Tuple input, String fieldName) throws PipelineExcepti command.apply(this); } - public void processActivateFlowMonitoring(String flowId, FlowEndpointPayload flow, boolean isForward) { - flowRttService.activateFlowMonitoring(flowId, flow.getDatapath(), flow.getPortNumber(), + public void processActivateFlowMonitoring(String flowId, FlowEndpointPayload flow, SwitchId dstSwitchId, + boolean isForward) { + flowRttService.activateFlowMonitoring(flowId, flow.getDatapath(), dstSwitchId, flow.getPortNumber(), flow.getVlanId(), flow.getInnerVlanId(), isForward); } @@ -103,8 +104,8 @@ public void processDeactivateFlowMonitoringOnSwitch(SwitchId switchId) { } @Override - public void notifyActivateFlowMonitoring(String flowId, SwitchId switchId, Integer port, Integer vlan, - Integer innerVlan, boolean isForward) { + public void notifyActivateFlowMonitoring(String flowId, SwitchId switchId, SwitchId dstSwitchId, Integer port, + Integer vlan, Integer innerVlan, boolean isForward) { AddFlow addFlow = AddFlow.builder() .flowId(flowId) .port(port) @@ -112,6 +113,7 @@ public void notifyActivateFlowMonitoring(String flowId, SwitchId switchId, Integ .innerTunnelId(innerVlan.longValue()) .direction(isForward ? FlowDirection.FORWARD : FlowDirection.REVERSE) .headers(buildHeader()) + .dstMac(dstSwitchId.toMacAddress()) .build(); emit(STREAM_CONTROL_COMMANDS_ID, getCurrentTuple(), new Values(switchId.toString(), addFlow)); diff --git a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/flow/command/ActivateFlowMonitoringCommand.java b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/flow/command/ActivateFlowMonitoringCommand.java index a54179d1fc7..2c99d4cf665 100644 --- a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/flow/command/ActivateFlowMonitoringCommand.java +++ b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/flow/command/ActivateFlowMonitoringCommand.java @@ -16,7 +16,6 @@ package org.openkilda.server42.control.topology.storm.bolt.flow.command; -import org.openkilda.messaging.payload.flow.FlowEndpointPayload; import org.openkilda.server42.control.messaging.flowrtt.ActivateFlowMonitoringInfoData; import org.openkilda.server42.control.topology.storm.bolt.flow.FlowHandler; @@ -33,7 +32,12 @@ public ActivateFlowMonitoringCommand(ActivateFlowMonitoringInfoData data, boolea @Override public void apply(FlowHandler handler) { - FlowEndpointPayload flow = isForward ? data.getSource() : data.getDestination(); - handler.processActivateFlowMonitoring(data.getId(), flow, isForward); + if (isForward) { + handler.processActivateFlowMonitoring(data.getId(), data.getSource(), + data.getDestination().getDatapath(), true); + } else { + handler.processActivateFlowMonitoring(data.getId(), data.getDestination(), + data.getSource().getDatapath(), false); + } } } diff --git a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/router/Router.java b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/router/Router.java index 2f774b534ab..e8749f0125a 100644 --- a/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/router/Router.java +++ b/src-java/server42/server42-control-storm-topology/src/main/java/org/openkilda/server42/control/topology/storm/bolt/router/Router.java @@ -42,7 +42,6 @@ import org.openkilda.server42.control.topology.storm.bolt.isl.command.IslCommand; import org.openkilda.server42.control.topology.storm.bolt.isl.command.SendIslListOnSwitchCommand; import org.openkilda.wfm.AbstractBolt; -import org.openkilda.wfm.error.PipelineException; import org.openkilda.wfm.share.zk.ZkStreams; import org.openkilda.wfm.share.zk.ZooKeeperBolt; import org.openkilda.wfm.topology.utils.MessageKafkaTranslator; @@ -99,7 +98,7 @@ private void handleTick(Tuple input) { service.processSync(); } - private void handleMessage(Tuple input, Message message) throws PipelineException { + private void handleMessage(Tuple input, Message message) { if (message instanceof InfoMessage) { handleInfoMessage(input, ((InfoMessage) message).getData()); } else { @@ -107,11 +106,11 @@ private void handleMessage(Tuple input, Message message) throws PipelineExceptio } } - private void handleInfoMessage(Tuple input, InfoData payload) throws PipelineException { + private void handleInfoMessage(Tuple input, InfoData payload) { if (payload instanceof ActivateFlowMonitoringInfoData) { ActivateFlowMonitoringInfoData data = (ActivateFlowMonitoringInfoData) payload; - emit(STREAM_FLOW_ID, input, makeTuple(new ActivateFlowMonitoringCommand(data, true))); emit(STREAM_FLOW_ID, input, makeTuple(new ActivateFlowMonitoringCommand(data, false))); + emit(STREAM_FLOW_ID, input, makeTuple(new ActivateFlowMonitoringCommand(data, true))); } else if (payload instanceof DeactivateFlowMonitoringInfoData) { DeactivateFlowMonitoringInfoData data = (DeactivateFlowMonitoringInfoData) payload; for (SwitchId switchId : data.getSwitchIds()) { diff --git a/src-java/server42/server42-control-storm-topology/src/test/java/org/openkilda/server42/control/topology/service/FlowRttServiceTest.java b/src-java/server42/server42-control-storm-topology/src/test/java/org/openkilda/server42/control/topology/service/FlowRttServiceTest.java new file mode 100644 index 00000000000..285c4e69b5a --- /dev/null +++ b/src-java/server42/server42-control-storm-topology/src/test/java/org/openkilda/server42/control/topology/service/FlowRttServiceTest.java @@ -0,0 +1,154 @@ +/* Copyright 2023 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.server42.control.topology.service; + +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.openkilda.persistence.ferma.repositories.FermaModelUtils.buildHaSubFlow; + +import org.openkilda.model.Flow; +import org.openkilda.model.FlowEncapsulationType; +import org.openkilda.model.HaFlow; +import org.openkilda.model.HaSubFlow; +import org.openkilda.model.Switch; +import org.openkilda.persistence.inmemory.InMemoryGraphBasedTest; +import org.openkilda.persistence.repositories.FlowRepository; +import org.openkilda.persistence.repositories.HaFlowRepository; +import org.openkilda.persistence.repositories.HaSubFlowRepository; + +import com.google.common.collect.Sets; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +import java.util.HashSet; +import java.util.Set; + +@ExtendWith(MockitoExtension.class) +public class FlowRttServiceTest extends InMemoryGraphBasedTest { + + private static final String SUB_FLOW_ID_A = "test_ha_flow_1-a"; + private static final String SUB_FLOW_ID_B = "test_ha_flow_1-b"; + private static final String FLOW_ID_1 = "test_flow_1"; + + private Switch switch1; + private Switch switch2; + private Switch switch3; + + private static HaFlowRepository haFlowRepository; + private static HaSubFlowRepository haSubFlowRepository; + private static FlowRepository flowRepository; + + @Mock + private IFlowCarrier carrier; + + private FlowRttService flowRttService; + + + @BeforeAll + public static void setupOnce() { + haFlowRepository = persistenceManager.getRepositoryFactory().createHaFlowRepository(); + haSubFlowRepository = persistenceManager.getRepositoryFactory().createHaSubFlowRepository(); + flowRepository = persistenceManager.getRepositoryFactory().createFlowRepository(); + } + + @BeforeEach + public void setUp() { + flowRttService = new FlowRttService(carrier, persistenceManager); + switch1 = createTestSwitch(SWITCH_ID_1); + switch2 = createTestSwitch(SWITCH_ID_2); + switch3 = createTestSwitch(SWITCH_ID_3); + createHaFlow(); + createFlow(); + } + + @Test + public void sendFlowListOnSwitchCommandHaFlow() { + final Set expectedFlowsOnSwitch = Sets.newHashSet(SUB_FLOW_ID_A, SUB_FLOW_ID_B, FLOW_ID_1); + flowRttService.sendFlowListOnSwitchCommand(SWITCH_ID_1); + verify(carrier).sendListOfFlowBySwitchId(SWITCH_ID_1, expectedFlowsOnSwitch); + verifyNoMoreInteractions(carrier); + } + + @Test + void activateFlowMonitoringForSwitchForward() { + flowRttService.activateFlowMonitoringForSwitch(SWITCH_ID_1); + verify(carrier).notifyActivateFlowMonitoring(SUB_FLOW_ID_A, SWITCH_ID_1, SWITCH_ID_2, + PORT_1, VLAN_1, INNER_VLAN_1, true); + verify(carrier).notifyActivateFlowMonitoring(SUB_FLOW_ID_B, SWITCH_ID_1, SWITCH_ID_3, + PORT_1, VLAN_1, INNER_VLAN_1, true); + verify(carrier).notifyActivateFlowMonitoring(FLOW_ID_1, SWITCH_ID_1, SWITCH_ID_2, + PORT_1, VLAN_1, INNER_VLAN_1, true); + verifyNoMoreInteractions(carrier); + } + + @Test + void activateFlowMonitoringForSwitchReverse() { + flowRttService.activateFlowMonitoringForSwitch(SWITCH_ID_2); + verify(carrier).notifyActivateFlowMonitoring(SUB_FLOW_ID_A, SWITCH_ID_2, SWITCH_ID_1, + PORT_1, VLAN_2, INNER_VLAN_2, false); + verify(carrier).notifyActivateFlowMonitoring(FLOW_ID_1, SWITCH_ID_2, SWITCH_ID_1, + PORT_2, VLAN_2, INNER_VLAN_2, false); + verifyNoMoreInteractions(carrier); + } + + @Test + void activateFlowMonitoringNoRtt() { + flowRttService.activateFlowMonitoring(SUB_FLOW_ID_A, SWITCH_ID_3, SWITCH_ID_2, + PORT_1, 0, 0, true); + verifyNoMoreInteractions(carrier); + } + + private void createHaFlow() { + final HaFlow haFlow = HaFlow.builder() + .haFlowId(HA_FLOW_ID_1) + .sharedSwitch(switch1) + .sharedPort(PORT_1) + .sharedOuterVlan(VLAN_1) + .sharedInnerVlan(INNER_VLAN_1) + .encapsulationType(FlowEncapsulationType.TRANSIT_VLAN) + .build(); + + final Set haSubFlows = new HashSet<>(); + + HaSubFlow subFlowA = buildHaSubFlow(SUB_FLOW_ID_A, switch2, PORT_1, VLAN_2, INNER_VLAN_2, DESCRIPTION_1); + haSubFlows.add(subFlowA); + haSubFlowRepository.add(subFlowA); + HaSubFlow subFlowB = buildHaSubFlow(SUB_FLOW_ID_B, switch3, PORT_2, VLAN_3, INNER_VLAN_3, DESCRIPTION_2); + haSubFlows.add(subFlowB); + haSubFlowRepository.add(subFlowB); + + haFlow.setHaSubFlows(haSubFlows); + haFlowRepository.add(haFlow); + } + + private void createFlow() { + flowRepository.add(Flow.builder() + .flowId(FLOW_ID_1) + .srcSwitch(switch1) + .srcPort(PORT_1) + .srcVlan(VLAN_1) + .srcInnerVlan(INNER_VLAN_1) + .destSwitch(switch2) + .destPort(PORT_2) + .destVlan(VLAN_2) + .destInnerVlan(INNER_VLAN_2) + .build()); + } +} diff --git a/src-java/server42/server42-control/build.gradle b/src-java/server42/server42-control/build.gradle index c8f9d39bb7e..eb9af859e49 100644 --- a/src-java/server42/server42-control/build.gradle +++ b/src-java/server42/server42-control/build.gradle @@ -69,3 +69,7 @@ bootJar { } bootJar.dependsOn generateVersionTxt + +test { + useJUnitPlatform() +} diff --git a/src-java/server42/server42-control/src/main/java/org/openkilda/server42/control/kafka/Gate.java b/src-java/server42/server42-control/src/main/java/org/openkilda/server42/control/kafka/Gate.java index a833def89ed..d15977d5689 100644 --- a/src-java/server42/server42-control/src/main/java/org/openkilda/server42/control/kafka/Gate.java +++ b/src-java/server42/server42-control/src/main/java/org/openkilda/server42/control/kafka/Gate.java @@ -109,12 +109,10 @@ public void onPartitionsAssigned(Map assignments, Consumer @KafkaHandler void listen(@Payload AddFlow data, @Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String switchIdKey) { - SwitchId switchId = new SwitchId(switchIdKey); - - Builder builder = CommandPacket.newBuilder(); Flow flow = Flow.newBuilder() .setFlowId(data.getFlowId()) + .setSwitchId(switchId.toMacAddress()) .setEncapsulationType(Flow.EncapsulationType.VLAN) .setTunnelId(data.getTunnelId()) .setTransitEncapsulationType(Flow.EncapsulationType.VLAN) @@ -122,14 +120,15 @@ void listen(@Payload AddFlow data, .setTransitTunnelId(switchToVlanMap.get(switchIdKey)) .setDirection(FlowDirection.toBoolean(data.getDirection())) .setUdpSrcPort(flowRttUdpSrcPortOffset + data.getPort()) - .setDstMac(switchId.toMacAddress()) + .setDstMac(data.getDstMac()) .setHashCode(data.hashCode()) .build(); FlowRttControl.AddFlow addFlow = FlowRttControl.AddFlow.newBuilder().setFlow(flow).build(); - builder.setType(Type.ADD_FLOW); - builder.addCommand(Any.pack(addFlow)); - CommandPacket packet = builder.build(); + CommandPacket packet = CommandPacket.newBuilder() + .setType(Type.ADD_FLOW) + .addCommand(Any.pack(addFlow)) + .build(); try { zeroMqClient.send(packet); } catch (InvalidProtocolBufferException e) { diff --git a/src-java/server42/server42-control/src/test/java/org/openkilda/server42/control/kafka/GateTest.java b/src-java/server42/server42-control/src/test/java/org/openkilda/server42/control/kafka/GateTest.java index 2e0732a7bed..b7459bd5eda 100644 --- a/src-java/server42/server42-control/src/test/java/org/openkilda/server42/control/kafka/GateTest.java +++ b/src-java/server42/server42-control/src/test/java/org/openkilda/server42/control/kafka/GateTest.java @@ -105,6 +105,7 @@ public void addFlow() throws Exception { .innerTunnelId(1002L) .direction(FlowDirection.REVERSE) .port(42) + .dstMac("1b:45:18:d6:71:ff") .build(); String switchId = "00:00:1b:45:18:d6:71:5a"; @@ -125,6 +126,7 @@ public void addFlow() throws Exception { assertThat(flow.getFlowId()).isEqualTo(addFlow.getFlowId()); assertThat(flow.getTunnelId()).isEqualTo(addFlow.getTunnelId()); assertThat(flow.getInnerTunnelId()).isEqualTo(addFlow.getInnerTunnelId()); + assertThat(flow.getDstMac()).isEqualTo(addFlow.getDstMac()); assertThat(flow.getDirection()).isEqualTo(FlowDirection.toBoolean(addFlow.getDirection())); assertThat(flow.getUdpSrcPort()).isEqualTo(flowRttUdpSrcPortOffset + addFlow.getPort()); @@ -135,8 +137,6 @@ public void addFlow() throws Exception { assertThat(flow.getTransitTunnelId()).isEqualTo(vlan); } }); - - assertThat(flow.getDstMac()).isSubstringOf(switchId).isNotEqualTo(switchId); } @Test diff --git a/src-java/stats-topology/stats-storm-topology/src/main/java/org/openkilda/wfm/topology/stats/service/KildaEntryCacheService.java b/src-java/stats-topology/stats-storm-topology/src/main/java/org/openkilda/wfm/topology/stats/service/KildaEntryCacheService.java index 70a86f64f9a..3704e4c0f1f 100644 --- a/src-java/stats-topology/stats-storm-topology/src/main/java/org/openkilda/wfm/topology/stats/service/KildaEntryCacheService.java +++ b/src-java/stats-topology/stats-storm-topology/src/main/java/org/openkilda/wfm/topology/stats/service/KildaEntryCacheService.java @@ -47,6 +47,7 @@ import org.openkilda.model.SwitchId; import org.openkilda.model.YFlow; import org.openkilda.model.cookie.FlowSegmentCookie; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.persistence.PersistenceManager; import org.openkilda.persistence.repositories.FlowRepository; import org.openkilda.persistence.repositories.HaFlowRepository; @@ -458,7 +459,7 @@ private void processHaCookies( SwitchId sw = path.get(i).getSwitchId(); FlowSegmentCookie modifiedCookie = isShared - ? cookie.toBuilder().subType(FlowSegmentCookie.FlowSubType.SHARED).build() : cookie; + ? cookie.toBuilder().subType(FlowSubType.SHARED).build() : cookie; //ingress if (i == 0) { cacheHandler.handle(newHaFlowDescriptor(sw, INGRESS, haFlowId, modifiedCookie, diff --git a/src-java/stats-topology/stats-storm-topology/src/test/java/org/openkilda/wfm/topology/stats/StatsTopologyBaseTest.java b/src-java/stats-topology/stats-storm-topology/src/test/java/org/openkilda/wfm/topology/stats/StatsTopologyBaseTest.java index 7c8458dc193..75a23a38f71 100644 --- a/src-java/stats-topology/stats-storm-topology/src/test/java/org/openkilda/wfm/topology/stats/StatsTopologyBaseTest.java +++ b/src-java/stats-topology/stats-storm-topology/src/test/java/org/openkilda/wfm/topology/stats/StatsTopologyBaseTest.java @@ -35,6 +35,7 @@ import org.openkilda.model.SwitchId; import org.openkilda.model.cookie.CookieBase.CookieType; import org.openkilda.model.cookie.FlowSegmentCookie; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.persistence.inmemory.InMemoryGraphPersistenceManager; import org.openkilda.persistence.repositories.FlowPathRepository; import org.openkilda.persistence.repositories.FlowRepository; @@ -137,16 +138,16 @@ public class StatsTopologyBaseTest extends AbstractStormTest { protected static final FlowSegmentCookie COOKIE_FORWARD = FlowSegmentCookie.builder().flowEffectiveId(1) .direction(FlowPathDirection.FORWARD).build(); protected static final FlowSegmentCookie COOKIE_FORWARD_SUBFLOW_1 = COOKIE_FORWARD.toBuilder() - .subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1).build(); + .subType(FlowSubType.HA_SUB_FLOW_1).build(); protected static final FlowSegmentCookie COOKIE_FORWARD_SUBFLOW_2 = COOKIE_FORWARD.toBuilder() - .subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2).build(); + .subType(FlowSubType.HA_SUB_FLOW_2).build(); protected static final FlowSegmentCookie COOKIE_REVERSE = FlowSegmentCookie.builder().flowEffectiveId(2) .direction(FlowPathDirection.REVERSE).build(); protected static final FlowSegmentCookie COOKIE_REVERSE_SUBFLOW_1 = COOKIE_REVERSE.toBuilder() - .subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1).build(); + .subType(FlowSubType.HA_SUB_FLOW_1).build(); protected static final FlowSegmentCookie COOKIE_REVERSE_SUBFLOW_2 = COOKIE_REVERSE.toBuilder() - .subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2).build(); + .subType(FlowSubType.HA_SUB_FLOW_2).build(); protected static final long MAIN_COOKIE = 15; protected static final long PROTECTED_COOKIE = 17; protected static final FlowSegmentCookie MAIN_FORWARD_COOKIE = new FlowSegmentCookie(FORWARD, MAIN_COOKIE); diff --git a/src-java/stats-topology/stats-storm-topology/src/test/java/org/openkilda/wfm/topology/stats/StatsTopologyHaFlowTest.java b/src-java/stats-topology/stats-storm-topology/src/test/java/org/openkilda/wfm/topology/stats/StatsTopologyHaFlowTest.java index 2a1b9e52ad8..8284185d45f 100644 --- a/src-java/stats-topology/stats-storm-topology/src/test/java/org/openkilda/wfm/topology/stats/StatsTopologyHaFlowTest.java +++ b/src-java/stats-topology/stats-storm-topology/src/test/java/org/openkilda/wfm/topology/stats/StatsTopologyHaFlowTest.java @@ -50,6 +50,7 @@ import org.openkilda.model.Switch; import org.openkilda.model.SwitchId; import org.openkilda.model.cookie.FlowSegmentCookie; +import org.openkilda.model.cookie.FlowSubType; import org.openkilda.wfm.share.mappers.FlowPathMapper; import com.google.common.collect.Lists; @@ -202,7 +203,7 @@ public void haFlowStatsUpdateTest() { false, false, true, SUB_FLOW_ID_SHARED); FlowSegmentCookie cookieForwardSubflowUpdated1 = haFlowUpdated.getForwardPath() - .getCookie().toBuilder().subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1).build(); + .getCookie().toBuilder().subType(FlowSubType.HA_SUB_FLOW_1).build(); FlowStatsEntry forwardTransitPoint2Updated = new FlowStatsEntry( 1, cookieForwardSubflowUpdated1.getValue(), 130L, 270L, 10, 10); sendStatsMessage(new FlowStatsData(SWITCH_ID_4, Collections.singletonList(forwardTransitPoint2Updated))); @@ -215,7 +216,7 @@ public void haFlowStatsUpdateTest() { validateHaFlowStats(HA_FLOW_ID_1, forwardEgressPoint1Updated, cookieForwardSubflowUpdated1, SWITCH_ID_5, false, true, false, SUB_FLOW_ID_1); FlowSegmentCookie cookieForwardSubflowUpdated2 = haFlowUpdated.getForwardPath() - .getCookie().toBuilder().subType(FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2).build(); + .getCookie().toBuilder().subType(FlowSubType.HA_SUB_FLOW_2).build(); FlowStatsEntry forwardEgressPoint2Updated = new FlowStatsEntry( 1, cookieForwardSubflowUpdated2.getValue(), 110L, 250L, 10, 10); sendStatsMessage(new FlowStatsData(SWITCH_ID_6, Collections.singletonList(forwardEgressPoint2Updated))); @@ -1032,17 +1033,17 @@ private HaFlow createHaFlowYShape() { haFlowPathRepository.add(haPath2); FlowPath flowPathForward1 = createPathWithSegments(SUB_PATH_ID_1, haPath1, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, null, switch1, switch2, switch3, switch4, switch5); + FlowSubType.HA_SUB_FLOW_1, null, switch1, switch2, switch3, switch4, switch5); FlowPath flowPathForward2 = createPathWithSegments(SUB_PATH_ID_2, haPath1, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, null, switch1, switch2, switch3, switch6); + FlowSubType.HA_SUB_FLOW_2, null, switch1, switch2, switch3, switch6); haPath1.setSubPaths(Lists.newArrayList(flowPathForward1, flowPathForward2)); haPath1.setHaSubFlows(Lists.newArrayList(subFlow1, subFlow2)); FlowPath flowPathReverse1 = createPathWithSegments(SUB_PATH_ID_3, haPath2, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, METER_ID_REVERSE_SUB_PATH_1, + FlowSubType.HA_SUB_FLOW_1, METER_ID_REVERSE_SUB_PATH_1, switch5, switch4, switch3, switch2, switch1); FlowPath flowPathReverse2 = createPathWithSegments(SUB_PATH_ID_4, haPath2, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, + FlowSubType.HA_SUB_FLOW_2, METER_ID_REVERSE_SUB_PATH_2, switch6, switch3, switch2, switch1); haPath2.setSubPaths(Lists.newArrayList(flowPathReverse1, flowPathReverse2)); haPath2.setHaSubFlows(Lists.newArrayList(subFlow2, subFlow1)); @@ -1093,16 +1094,16 @@ private HaFlow createHaFlowVShape() { haFlowPathRepository.add(haPath2); FlowPath flowPathForward1 = create2SwitchPath(SUB_PATH_ID_1, haPath1, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, switch2, switch3, null); + FlowSubType.HA_SUB_FLOW_1, switch2, switch3, null); FlowPath flowPathForward2 = create2SwitchPath(SUB_PATH_ID_2, haPath1, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, switch2, switch4, null); + FlowSubType.HA_SUB_FLOW_2, switch2, switch4, null); haPath1.setSubPaths(Lists.newArrayList(flowPathForward1, flowPathForward2)); haPath1.setHaSubFlows(Lists.newArrayList(subFlow1, subFlow2)); FlowPath flowPathReverse1 = create2SwitchPath(SUB_PATH_ID_3, haPath2, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, switch3, switch2, METER_ID_REVERSE_SUB_PATH_1); + FlowSubType.HA_SUB_FLOW_1, switch3, switch2, METER_ID_REVERSE_SUB_PATH_1); FlowPath flowPathReverse2 = create2SwitchPath(SUB_PATH_ID_4, haPath2, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, switch4, switch2, METER_ID_REVERSE_SUB_PATH_2); + FlowSubType.HA_SUB_FLOW_2, switch4, switch2, METER_ID_REVERSE_SUB_PATH_2); haPath2.setSubPaths(Lists.newArrayList(flowPathReverse1, flowPathReverse2)); haPath2.setHaSubFlows(Lists.newArrayList(subFlow2, subFlow1)); @@ -1152,16 +1153,16 @@ private HaFlow createHaFlowIShape3Switches() { haFlowPathRepository.add(haPath2); FlowPath flowPathForward1 = createPathWithSegments(SUB_PATH_ID_1, haPath1, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, null, switch1, switch2, switch3); + FlowSubType.HA_SUB_FLOW_1, null, switch1, switch2, switch3); FlowPath flowPathForward2 = create2SwitchPath(SUB_PATH_ID_2, haPath1, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, switch1, switch2, null); + FlowSubType.HA_SUB_FLOW_2, switch1, switch2, null); haPath1.setSubPaths(Lists.newArrayList(flowPathForward1, flowPathForward2)); haPath1.setHaSubFlows(Lists.newArrayList(subFlow1, subFlow2)); FlowPath flowPathReverse1 = createPathWithSegments(SUB_PATH_ID_3, haPath2, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, METER_ID_REVERSE_SUB_PATH_1, switch3, switch2, switch1); + FlowSubType.HA_SUB_FLOW_1, METER_ID_REVERSE_SUB_PATH_1, switch3, switch2, switch1); FlowPath flowPathReverse2 = create2SwitchPath(SUB_PATH_ID_4, haPath2, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, switch2, switch1, METER_ID_REVERSE_SUB_PATH_2); + FlowSubType.HA_SUB_FLOW_2, switch2, switch1, METER_ID_REVERSE_SUB_PATH_2); haPath2.setSubPaths(Lists.newArrayList(flowPathReverse1, flowPathReverse2)); haPath2.setHaSubFlows(Lists.newArrayList(subFlow2, subFlow1)); @@ -1208,16 +1209,16 @@ private HaFlow createHaFlowIShape2Switches() { haFlowPathRepository.add(haPath2); FlowPath flowPathForward1 = create2SwitchPath(SUB_PATH_ID_1, haPath1, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, switch1, switch2, null); + FlowSubType.HA_SUB_FLOW_1, switch1, switch2, null); FlowPath flowPathForward2 = create2SwitchPath(SUB_PATH_ID_2, haPath1, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, switch1, switch2, null); + FlowSubType.HA_SUB_FLOW_2, switch1, switch2, null); haPath1.setSubPaths(Lists.newArrayList(flowPathForward1, flowPathForward2)); haPath1.setHaSubFlows(Lists.newArrayList(subFlow1, subFlow2)); FlowPath flowPathReverse1 = create2SwitchPath(SUB_PATH_ID_3, haPath2, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, switch2, switch1, METER_ID_REVERSE_SUB_PATH_1); + FlowSubType.HA_SUB_FLOW_1, switch2, switch1, METER_ID_REVERSE_SUB_PATH_1); FlowPath flowPathReverse2 = create2SwitchPath(SUB_PATH_ID_4, haPath2, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, switch2, switch1, METER_ID_REVERSE_SUB_PATH_2); + FlowSubType.HA_SUB_FLOW_2, switch2, switch1, METER_ID_REVERSE_SUB_PATH_2); haPath2.setSubPaths(Lists.newArrayList(flowPathReverse1, flowPathReverse2)); haPath2.setHaSubFlows(Lists.newArrayList(subFlow2, subFlow1)); @@ -1264,16 +1265,16 @@ private HaFlow createHaFlowIShapeShared() { haFlowPathRepository.add(haPath2); FlowPath flowPathForward1 = create2SwitchPath(SUB_PATH_ID_1, haPath1, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, switch1, switch2, null); + FlowSubType.HA_SUB_FLOW_1, switch1, switch2, null); FlowPath flowPathForward2 = createPathWithSegments(SUB_PATH_ID_2, haPath1, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, null, switch1); + FlowSubType.HA_SUB_FLOW_2, null, switch1); haPath1.setSubPaths(Lists.newArrayList(flowPathForward1, flowPathForward2)); haPath1.setHaSubFlows(Lists.newArrayList(subFlow1, subFlow2)); FlowPath flowPathReverse1 = create2SwitchPath(SUB_PATH_ID_3, haPath2, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, switch2, switch1, METER_ID_REVERSE_SUB_PATH_1); + FlowSubType.HA_SUB_FLOW_1, switch2, switch1, METER_ID_REVERSE_SUB_PATH_1); FlowPath flowPathReverse2 = createPathWithSegments(SUB_PATH_ID_4, haPath2, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, METER_ID_REVERSE_SUB_PATH_2, switch1); + FlowSubType.HA_SUB_FLOW_2, METER_ID_REVERSE_SUB_PATH_2, switch1); haPath2.setSubPaths(Lists.newArrayList(flowPathReverse1, flowPathReverse2)); haPath2.setHaSubFlows(Lists.newArrayList(subFlow2, subFlow1)); @@ -1284,7 +1285,7 @@ private HaFlow createHaFlowIShapeShared() { } private FlowPath createPathWithSegments( - PathId pathId, HaFlowPath haFlowPath, HaSubFlow haSubFlow, FlowSegmentCookie.FlowSubType cookieSubType, + PathId pathId, HaFlowPath haFlowPath, HaSubFlow haSubFlow, FlowSubType cookieSubType, MeterId meterId, Switch... switches) { FlowPath path = buildPath(pathId, haFlowPath, switches[0], switches[switches.length - 1]); path.setMeterId(meterId); @@ -1296,7 +1297,7 @@ private FlowPath createPathWithSegments( } private FlowPath buildPathWithSegments( - PathId pathId, HaFlowPath haFlowPath, HaSubFlow haSubFlow, FlowSegmentCookie.FlowSubType cookieSubType, + PathId pathId, HaFlowPath haFlowPath, HaSubFlow haSubFlow, FlowSubType cookieSubType, MeterId meterId, Switch... switches) { FlowPath path = buildPath(pathId, haFlowPath, switches[0], switches[switches.length - 1]); path.setMeterId(meterId); @@ -1307,7 +1308,7 @@ private FlowPath buildPathWithSegments( } private FlowPath create2SwitchPath( - PathId pathId, HaFlowPath haFlowPath, HaSubFlow haSubFlow, FlowSegmentCookie.FlowSubType cookieSubType, + PathId pathId, HaFlowPath haFlowPath, HaSubFlow haSubFlow, FlowSubType cookieSubType, Switch switch1, Switch switch2, MeterId meterId) { FlowPath path = buildPath(pathId, haFlowPath, switch1, switch2); path.setMeterId(meterId); @@ -1421,17 +1422,17 @@ private HaFlow buildHaFlowWithoutPersistence(Switch switch1, Switch switch2, Swi FlowPath flowPathForward1 = buildPathWithSegments(SUB_PATH_ID_1, haPath1, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, null, switch1, switch2, switch3, switch4, switch5); + FlowSubType.HA_SUB_FLOW_1, null, switch1, switch2, switch3, switch4, switch5); FlowPath flowPathForward2 = buildPathWithSegments(SUB_PATH_ID_2, haPath1, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, null, switch1, switch2, switch3, switch6); + FlowSubType.HA_SUB_FLOW_2, null, switch1, switch2, switch3, switch6); haPath1.setSubPaths(Lists.newArrayList(flowPathForward1, flowPathForward2)); haPath1.setHaSubFlows(Lists.newArrayList(subFlow1, subFlow2)); FlowPath flowPathReverse1 = buildPathWithSegments(SUB_PATH_ID_3, haPath2, subFlow1, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_1, METER_ID_REVERSE_SUB_PATH_1, + FlowSubType.HA_SUB_FLOW_1, METER_ID_REVERSE_SUB_PATH_1, switch5, switch4, switch3, switch2, switch1); FlowPath flowPathReverse2 = buildPathWithSegments(SUB_PATH_ID_4, haPath2, subFlow2, - FlowSegmentCookie.FlowSubType.HA_SUB_FLOW_2, + FlowSubType.HA_SUB_FLOW_2, METER_ID_REVERSE_SUB_PATH_2, switch6, switch3, switch2, switch1); haPath2.setSubPaths(Lists.newArrayList(flowPathReverse1, flowPathReverse2)); haPath2.setHaSubFlows(Lists.newArrayList(subFlow2, subFlow1)); diff --git a/src-java/swmanager-topology/swmanager-storm-topology/src/main/java/org/openkilda/wfm/topology/switchmanager/SwitchManagerTopologyConfig.java b/src-java/swmanager-topology/swmanager-storm-topology/src/main/java/org/openkilda/wfm/topology/switchmanager/SwitchManagerTopologyConfig.java index a3683fc2ee8..25d4a573bc8 100644 --- a/src-java/swmanager-topology/swmanager-storm-topology/src/main/java/org/openkilda/wfm/topology/switchmanager/SwitchManagerTopologyConfig.java +++ b/src-java/swmanager-topology/swmanager-storm-topology/src/main/java/org/openkilda/wfm/topology/switchmanager/SwitchManagerTopologyConfig.java @@ -78,7 +78,7 @@ default String getGrpcResponseTopic() { int getLagPortOffset(); @Key("lag.port.max.number") - @Default("2999") + @Default("2047") int getLagPortMaxNumber(); @Key("lag.port.pool.chunks.count") From 6856a79d3896413f4b24d8f5345004b472c47383 Mon Sep 17 00:00:00 2001 From: Pablo Murillo Date: Fri, 26 Jan 2024 13:27:55 +0100 Subject: [PATCH 2/5] use one bit from RoutingMetadata TYPE_FIELD for HA_SUB_FLOW_TYPE_FIELD --- .../utils/metadata/MetadataBase.java | 2 +- .../utils/metadata/RoutingMetadata.java | 4 ++-- .../utils/metadata/RoutingMetadataTest.java | 4 ++-- .../rulemanager/utils/RoutingMetadata.java | 9 +++++---- .../utils/RoutingMetadataTest.java | 20 +++++++++---------- 5 files changed, 20 insertions(+), 19 deletions(-) diff --git a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/MetadataBase.java b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/MetadataBase.java index e828efadca4..a1dfdb812a9 100644 --- a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/MetadataBase.java +++ b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/MetadataBase.java @@ -29,7 +29,7 @@ @EqualsAndHashCode(of = {"value", "mask"}) public abstract class MetadataBase { // update ALL_FIELDS if modify fields list - static final BitField TYPE_FIELD = new BitField(0x0000_0000_F000_0000L); + static final BitField TYPE_FIELD = new BitField(0x0000_0000_E000_0000L); // used by unit tests to check fields intersections static final BitField[] ALL_FIELDS = new BitField[]{TYPE_FIELD}; diff --git a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/RoutingMetadata.java b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/RoutingMetadata.java index 8c195d4aa6c..fbf4e584a26 100644 --- a/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/RoutingMetadata.java +++ b/src-java/floodlight-service/floodlight-modules/src/main/java/org/openkilda/floodlight/utils/metadata/RoutingMetadata.java @@ -28,7 +28,7 @@ public class RoutingMetadata extends MetadataBase { // update ALL_FIELDS if modify fields list - // used by parent -> 0x0000_0000_F000_0000L + // used by parent -> 0x0000_0000_E000_0000L private static final BitField LLDP_MARKER_FLAG = new BitField(0x0000_0000_0000_0001L); private static final BitField ONE_SWITCH_FLOW_FLAG = new BitField(0x0000_0000_0000_0002L); private static final BitField ARP_MARKER_FLAG = new BitField(0x0000_0000_0000_0004L); @@ -37,7 +37,7 @@ public class RoutingMetadata extends MetadataBase { // NOTE: port count was increased from 128 to 4096. At this moment only 1000 ports can be used // on Noviflow switches. But according to open flow specs port count could be up to 65536. // So we increased port count to maximum possible value. - private static final BitField INPUT_PORT_FIELD = new BitField(0x0000_0000_0FFE_0000L); + private static final BitField INPUT_PORT_FIELD = new BitField(0x0000_0000_0FFF_0000L); static final long MAX_INPUT_PORT = INPUT_PORT_FIELD.getMask() >> INPUT_PORT_FIELD.getOffset(); diff --git a/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/utils/metadata/RoutingMetadataTest.java b/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/utils/metadata/RoutingMetadataTest.java index 7cd470c510c..003edb0382c 100644 --- a/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/utils/metadata/RoutingMetadataTest.java +++ b/src-java/floodlight-service/floodlight-modules/src/test/java/org/openkilda/floodlight/utils/metadata/RoutingMetadataTest.java @@ -31,8 +31,8 @@ public void testFieldsIntersection() { @Test public void testInputPortMetadata() { - int offset = 17; - for (int port = 0; port <= 2047; port++) { + int offset = 16; + for (int port = 0; port <= 4095; port++) { RoutingMetadata metadata = RoutingMetadata.builder().inputPort(port).build(new HashSet<>()); long withoutType = ~TYPE_FIELD.getMask() & metadata.getValue().getValue(); Assertions.assertEquals(port, withoutType >> offset); diff --git a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/utils/RoutingMetadata.java b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/utils/RoutingMetadata.java index 3515efbaed1..4d450ea8e4e 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/utils/RoutingMetadata.java +++ b/src-java/rule-manager/rule-manager-implementation/src/main/java/org/openkilda/rulemanager/utils/RoutingMetadata.java @@ -31,17 +31,18 @@ @EqualsAndHashCode(of = {"value", "mask"}) public class RoutingMetadata { // update ALL_FIELDS if modify fields list - private static final BitField TYPE_FIELD = new BitField(0x0000_0000_F000_0000L); + private static final BitField TYPE_FIELD = new BitField(0x0000_0000_E000_0000L); + private static final BitField HA_SUB_FLOW_TYPE_FIELD = new BitField(0x0000_0000_1000_0000L); private static final BitField LLDP_MARKER_FLAG = new BitField(0x0000_0000_0000_0001L); private static final BitField ONE_SWITCH_FLOW_FLAG = new BitField(0x0000_0000_0000_0002L); private static final BitField ARP_MARKER_FLAG = new BitField(0x0000_0000_0000_0004L); private static final BitField OUTER_VLAN_PRESENCE_FLAG = new BitField(0x0000_0000_0000_0008L); - private static final BitField HA_SUB_FLOW_TYPE_FIELD = new BitField(0x0000_0000_0001_0000L); private static final BitField OUTER_VLAN_FIELD = new BitField(0x0000_0000_0000_FFF0L); - // NOTE: port count is 2048. At this moment only 1000 ports can be used + + // NOTE: port count is 4096. At this moment only 1000 ports can be used // on Noviflow switches. But according to open flow specs port count could be up to 65536. // So we increased port count to maximum possible value. - private static final BitField INPUT_PORT_FIELD = new BitField(0x0000_0000_0FFE_0000L); + private static final BitField INPUT_PORT_FIELD = new BitField(0x0000_0000_0FFF_0000L); public static final int FULL_MASK = -1; static final long MAX_INPUT_PORT = INPUT_PORT_FIELD.getMask() >> INPUT_PORT_FIELD.getOffset(); diff --git a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/utils/RoutingMetadataTest.java b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/utils/RoutingMetadataTest.java index ba4201b892c..9dfc56b26e8 100644 --- a/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/utils/RoutingMetadataTest.java +++ b/src-java/rule-manager/rule-manager-implementation/src/test/java/org/openkilda/rulemanager/utils/RoutingMetadataTest.java @@ -32,8 +32,8 @@ public class RoutingMetadataTest { public void buildRoutingMetadata() { RoutingMetadata routingMetadata = RoutingMetadata.builder().build(FEATURES); - Assertions.assertEquals(0x0000_0000_F000_0000L, routingMetadata.getMask()); - Assertions.assertEquals(0x0000_0000_1000_0000L, routingMetadata.getValue()); + Assertions.assertEquals(0x0000_0000_E000_0000L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_2000_0000L, routingMetadata.getValue()); } @Test @@ -42,30 +42,30 @@ public void buildRoutingMetadataHaFlow() { .haSubFlowType(HaSubFlowType.HA_SUB_FLOW_1) .build(FEATURES); - Assertions.assertEquals(0x0000_0000_F001_0000L, routingMetadata.getMask()); - Assertions.assertEquals(0x0000_0000_1000_0000L, routingMetadata.getValue()); + Assertions.assertEquals(0x0000_0000_F000_0000L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_2000_0000L, routingMetadata.getValue()); routingMetadata = RoutingMetadata.builder() .haSubFlowType(HaSubFlowType.HA_SUB_FLOW_2) .build(FEATURES); - Assertions.assertEquals(0x0000_0000_F001_0000L, routingMetadata.getMask()); - Assertions.assertEquals(0x0000_0000_1001_0000L, routingMetadata.getValue()); + Assertions.assertEquals(0x0000_0000_F000_0000L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_3000_0000L, routingMetadata.getValue()); } @Test public void buildRoutingMetadataHaFlowWithOuterVlan() { RoutingMetadata routingMetadata = RoutingMetadata.builder().outerVlanId(1).build(FEATURES); - Assertions.assertEquals(0x0000_0000_F000_FFF8L, routingMetadata.getMask()); - Assertions.assertEquals(0x0000_0000_1000_0018L, routingMetadata.getValue()); + Assertions.assertEquals(0x0000_0000_E000_FFF8L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_2000_0018L, routingMetadata.getValue()); routingMetadata = RoutingMetadata.builder() .haSubFlowType(HaSubFlowType.HA_SUB_FLOW_1) .outerVlanId(1) .build(FEATURES); - Assertions.assertEquals(0x0000_0000_F001_FFF8L, routingMetadata.getMask()); - Assertions.assertEquals(0x0000_0000_1000_0018L, routingMetadata.getValue()); + Assertions.assertEquals(0x0000_0000_F000_FFF8L, routingMetadata.getMask()); + Assertions.assertEquals(0x0000_0000_2000_0018L, routingMetadata.getValue()); } } From 3d92193a168dec7ce0e2b3be1d3d517c7d8cf64e Mon Sep 17 00:00:00 2001 From: Yuliia Miroshnychenko Date: Fri, 17 Nov 2023 10:46:48 +0100 Subject: [PATCH 3/5] [TEST]: 5208: Server42 RTT: HaFlow --- confd/vars/main.yaml | 6 + .../helpers/SwitchHelper.groovy | 68 +++++ .../helpers/TopologyHelper.groovy | 18 ++ .../helpers/model/SwitchRules.groovy | 21 +- .../spec/server42/Server42FlowRttSpec.groovy | 238 ++++++++---------- .../server42/Server42HaFlowRttSpec.groovy | 197 +++++++++++++++ .../src/test/resources/topology.yaml | 5 + 7 files changed, 419 insertions(+), 134 deletions(-) create mode 100644 src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42HaFlowRttSpec.groovy diff --git a/confd/vars/main.yaml b/confd/vars/main.yaml index 871838444e1..affcd2428b4 100644 --- a/confd/vars/main.yaml +++ b/confd/vars/main.yaml @@ -291,16 +291,22 @@ kilda_server42_control_switch_to_vlan_1000: "1000=00:00:d7:61:46:7b:46:69,00:00: kilda_server42_control_switch_to_vlan_2000: "2000=00:00:55:dd:06:49:d9:61,00:00:84:1e:39:d5:dd:40" kilda_server42_control_switch_to_vlan_1002: "1002=00:00:00:00:00:00:00:02" kilda_server42_control_switch_to_vlan_1003: "1003=00:00:00:00:00:00:00:03" +kilda_server42_control_switch_to_vlan_1007: "1007=00:00:00:00:00:00:00:07" kilda_server42_control_switch_to_vlan_1102: "1102=00:00:00:00:00:01:00:02" kilda_server42_control_switch_to_vlan_1103: "1103=00:00:00:00:00:01:00:03" +kilda_server42_control_switch_to_vlan_1107: "1107=00:00:00:00:00:01:00:07" kilda_server42_control_switch_to_vlan_1202: "1202=00:00:00:00:00:02:00:02" kilda_server42_control_switch_to_vlan_1203: "1203=00:00:00:00:00:02:00:03" +kilda_server42_control_switch_to_vlan_1207: "1207=00:00:00:00:00:02:00:07" kilda_server42_control_switch_to_vlan_1302: "1302=00:00:00:00:00:03:00:02" kilda_server42_control_switch_to_vlan_1303: "1303=00:00:00:00:00:03:00:03" +kilda_server42_control_switch_to_vlan_1307: "1307=00:00:00:00:00:03:00:07" kilda_server42_control_switch_to_vlan_1402: "1402=00:00:00:00:00:04:00:02" kilda_server42_control_switch_to_vlan_1403: "1403=00:00:00:00:00:04:00:03" +kilda_server42_control_switch_to_vlan_1407: "1407=00:00:00:00:00:04:00:07" kilda_server42_control_switch_to_vlan_1502: "1502=00:00:00:00:00:05:00:02" kilda_server42_control_switch_to_vlan_1503: "1503=00:00:00:00:00:05:00:03" +kilda_server42_control_switch_to_vlan_1507: "1507=00:00:00:00:00:05:00:07" kilda_server42_control_kafka_group_id: "server42-control" kilda_server42_control_zeromq_connection_host: "tcp://server42-server.pendev:5555" diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy index a39e6398f81..0db4744a502 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy @@ -42,6 +42,7 @@ import static org.springframework.beans.factory.config.ConfigurableBeanFactory.S import org.openkilda.messaging.info.event.IslChangeType import org.openkilda.messaging.info.event.SwitchChangeType +import org.openkilda.messaging.info.rule.FlowEntry import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.MeterId import org.openkilda.model.SwitchFeature @@ -752,4 +753,71 @@ class SwitchHelper { static SwitchPropertiesDto getCachedSwProps(SwitchId switchId) { return getCachedAllSwProps().find { it.switchId == switchId } } + + static def setServer42FlowRttForSwitch(Switch sw, boolean isServer42FlowRttEnabled, boolean isS42ToggleOn = true) { + def originalProps = northbound.get().getSwitchProperties(sw.dpId) + if (originalProps.server42FlowRtt != isServer42FlowRttEnabled) { + def s42Config = sw.prop + northbound.get().updateSwitchProperties(sw.dpId, originalProps.jacksonCopy().tap { + server42FlowRtt = isServer42FlowRttEnabled + server42MacAddress = s42Config ? s42Config.server42MacAddress : null + server42Port = s42Config ? s42Config.server42Port : null + server42Vlan = s42Config ? s42Config.server42Vlan : null + }) + } + int expectedNumberOfS42Rules = (isS42ToggleOn && isServer42FlowRttEnabled) ? getExpectedS42SwitchRulesBasedOnVxlanSupport(sw.dpId) : 0 + Wrappers.wait(RULES_INSTALLATION_TIME) { + assert getS42SwitchRules(sw.dpId).size() == expectedNumberOfS42Rules + } + return originalProps.server42FlowRtt + } + + static List getS42SwitchRules(SwitchId swId) { + northbound.get().getSwitchRules(swId).flowEntries + .findAll { it.cookie in [SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE, SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE] } + } + + static int getExpectedS42SwitchRulesBasedOnVxlanSupport(SwitchId swId) { + //one rule per vlan/vxlan + isVxlanEnabled(swId) ? 2 : 1 + } + + static void waitForS42SwRulesSetup(boolean isS42ToggleOn = true) { + List switchDetails = northboundV2.get().getAllSwitchProperties().switchProperties + + withPool { + Wrappers.wait(RULES_INSTALLATION_TIME) { + switchDetails.eachParallel { sw -> + def expectedRulesNumber = (isS42ToggleOn && sw.server42FlowRtt) ? getExpectedS42SwitchRulesBasedOnVxlanSupport(sw.switchId) : 0 + assert getS42SwitchRules(sw.switchId).size() == expectedRulesNumber + } + } + } + } + + static void verifyAbsenceOfServer42FlowRttRules(Set switches) { + //make sure that s42 rules are deleted + withPool { + Wrappers.wait(RULES_INSTALLATION_TIME) { + switches.eachParallel { sw -> + assert northbound.get().getSwitchRules(sw.dpId).flowEntries.findAll { + new Cookie(it.cookie).getType() in [CookieType.SERVER_42_FLOW_RTT_INPUT, + CookieType.SERVER_42_FLOW_RTT_INGRESS] + }.empty + } + } + } + } + + static def revertToOriginSwitchSetup(def initialSwitchRttProps, boolean isS42ToggleOn = true) { + initialSwitchRttProps.each { sw, state -> setServer42FlowRttForSwitch(sw, state, isS42ToggleOn) } + initialSwitchRttProps.keySet().each { Switch sw -> + Wrappers.wait(RULES_INSTALLATION_TIME) { + def actualCookies = northbound.get().getSwitchRules(sw.dpId).flowEntries*.cookie + actualCookies.removeAll(actualCookies.intersect(sw.defaultCookies)) + assert actualCookies.isEmpty(), "Switch: ${sw.dpId}." + + "\nDefault rules: \n${sw.defaultCookies} \nNon-default rules: \n${actualCookies}" + } + } + } } \ No newline at end of file diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/TopologyHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/TopologyHelper.groovy index a8a60896a7f..dfba5432503 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/TopologyHelper.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/TopologyHelper.groovy @@ -123,6 +123,24 @@ class TopologyHelper { } } + SwitchTriplet findSwitchTripletWithSharedEpInTheMiddleOfTheChainServer42Support() { + def server42switches = topology.getActiveServer42Switches() + return switchTriplets.findAll(SwitchTriplet.ALL_ENDPOINTS_DIFFERENT).find { + it.shared.dpId in server42switches.dpId + && it.ep1.dpId in server42switches.dpId + && it.ep2.dpId in server42switches.dpId + && (it.pathsEp1[0].size() == 2 && it.pathsEp2[0].size() == 2) } + } + + SwitchTriplet findSwitchTripletWithSharedEpThatIsNotNeighbourToEp1AndEp2Server42Support() { + def server42switches = topology.getActiveServer42Switches() + return switchTriplets.findAll(SwitchTriplet.ALL_ENDPOINTS_DIFFERENT).find { + it.shared.dpId in server42switches.dpId + && it.ep1.dpId in server42switches.dpId + && it.ep2.dpId in server42switches.dpId + && (it.pathsEp1[0].size() > 2 && it.pathsEp2[0].size() > 2) } + } + SwitchTriplet findSwitchTripletForHaFlowWithProtectedPaths() { return switchTriplets.find { if (!SwitchTriplet.ALL_ENDPOINTS_DIFFERENT(it)) { diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy index 817fa800a3b..f7201a434ea 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy @@ -5,12 +5,12 @@ import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.FlowMeter import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie +import org.openkilda.model.cookie.CookieBase.CookieType import org.openkilda.northbound.dto.v1.flows.PathDiscrepancyDto import org.openkilda.northbound.dto.v2.haflows.HaFlow import org.openkilda.testing.service.database.Database import org.openkilda.testing.service.northbound.NorthboundService - class SwitchRules { NorthboundService northboundService Database database @@ -41,6 +41,10 @@ class SwitchRules { northboundService.deleteSwitchRules(switchId, flowEntry.getCookie()) } + void delete(long cookie) { + northboundService.deleteSwitchRules(switchId, cookie) + } + static Set missingRuleCookieIds(Collection missingRules) { return missingRules.collect {new Long((it.getRule() =~ COOKIE_ID_IN_RULE_DISCREPANCY_STRING_REGEX)[0])} } @@ -60,4 +64,19 @@ class SwitchRules { throw new IllegalArgumentException("Unknown encapsulation " + encapsulationType) } } + + List getRulesByCookieType(CookieType cookieType) { + northboundService.getSwitchRules(switchId).flowEntries + .findAll { new Cookie(it.cookie).getType() == cookieType } + } + + List getServer42FlowRules() { + northboundService.getSwitchRules(switchId).flowEntries + .findAll { new Cookie(it.cookie).getType() in [CookieType.SERVER_42_FLOW_RTT_INPUT, + CookieType.SERVER_42_FLOW_RTT_INGRESS] } + } + + List getRules() { + northboundService.getSwitchRules(switchId).flowEntries + } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42FlowRttSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42FlowRttSpec.groovy index 0372e1ba9d8..7856bbb8e55 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42FlowRttSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42FlowRttSpec.groovy @@ -1,8 +1,6 @@ package org.openkilda.functionaltests.spec.server42 -import static groovyx.gpars.GParsPool.withPool import static java.util.concurrent.TimeUnit.SECONDS -import static org.assertj.core.api.Assertions.assertThat import static org.junit.jupiter.api.Assumptions.assumeTrue import static org.openkilda.functionaltests.ResourceLockConstants.S42_TOGGLE import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE @@ -15,7 +13,6 @@ import static org.openkilda.functionaltests.model.stats.Origin.FLOW_MONITORING import static org.openkilda.functionaltests.model.stats.Origin.SERVER_42 import static org.openkilda.functionaltests.model.switches.Manufacturer.WB5164 import static org.openkilda.model.FlowEncapsulationType.VXLAN -import static org.openkilda.model.SwitchFeature.KILDA_OVS_PUSH_POP_MATCH_VXLAN import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE import static org.openkilda.testing.Constants.RULES_DELETION_TIME @@ -30,10 +27,10 @@ import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers import org.openkilda.functionaltests.helpers.model.SwitchPair import org.openkilda.functionaltests.helpers.model.SwitchPairs +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.functionaltests.model.stats.FlowStats import org.openkilda.messaging.model.system.FeatureTogglesDto import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.SwitchFeature import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType import org.openkilda.northbound.dto.v2.flows.FlowPatchEndpoint @@ -41,7 +38,6 @@ import org.openkilda.northbound.dto.v2.flows.FlowPatchV2 import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.northbound.dto.v2.flows.SwapFlowPayload import org.openkilda.northbound.dto.v2.switches.LagPortRequest -import org.openkilda.testing.model.topology.TopologyDefinition.Switch import groovy.time.TimeCategory import org.springframework.beans.factory.annotation.Autowired @@ -71,6 +67,10 @@ class Server42FlowRttSpec extends HealthCheckSpecification { @Value('${flow.sla.check.interval.seconds}') Integer flowSlaCheckIntervalSeconds + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory + @Tags(TOPOLOGY_DEPENDENT) @IterationTag(tags = [HARDWARE], iterationNameRegex = /(NS|WB)/) def "Create a #flowDescription flow with server42 Rtt feature and check datapoints in tsdb"() { @@ -78,12 +78,13 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def switchPair = switchPairFilter(switchPairs.all().withBothSwitchesConnectedToServer42()).random() when: "Set server42FlowRtt toggle to true" - def flowRttFeatureStartState = changeFlowRttToggle(true) + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src and dst switches" def server42Switch = switchPair.src - def initialSwitchRtt = [server42Switch, switchPair.dst] - .collectEntries { [it, changeFlowRttSwitch(it, true)] } + def initialSwitchProps = [server42Switch, switchPair.dst].collectEntries { [it, switchHelper.setServer42FlowRttForSwitch(it, true)] } and: "Create a flow" def flow = flowHelperV2.randomFlow(switchPair) @@ -96,7 +97,10 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } cleanup: "Revert system to original state" - revertToOrigin([flow], flowRttFeatureStartState, initialSwitchRtt) + flow && flowHelperV2.deleteFlow(flow.flowId) && switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchProps.keySet()) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchProps && switchHelper.revertToOriginSwitchSetup(initialSwitchProps, flowRttToggleInitialState) + where: flowDescription | switchPairFilter | flowTap @@ -130,14 +134,15 @@ class Server42FlowRttSpec extends HealthCheckSpecification { SwitchPair switchPair = switchPairs.all().withBothSwitchesConnectedToServer42().random() and: "server42FlowRtt feature toggle is set to true" - def flowRttFeatureStartState = changeFlowRttToggle(true) + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src and dst switches" def server42Switch = switchPair.src - def initialSwitchRtt = [server42Switch, switchPair.dst].collectEntries { [it, changeFlowRttSwitch(it, true)] } + def initialSwitchProps = [server42Switch, switchPair.dst].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } when: "Create a flow for forward metric" - def flowCreateTime = new Date() def flow = flowHelperV2.randomFlow(switchPair) flowHelperV2.addFlow(flow) @@ -151,17 +156,14 @@ class Server42FlowRttSpec extends HealthCheckSpecification { then: "Server42 input/ingress rules are installed" Wrappers.wait(RULES_INSTALLATION_TIME) { - [switchPair.src, switchPair.dst].each { + [switchPair.src, switchPair.dst].each { sw -> /** - one rule of each type for one flow; * - no SERVER_42_FLOW_RTT_INGRESS cookie in singleTable; * - SERVER_42_FLOW_RTT_INGRESS is installed for each different flow port * (if there are 10 flows on port number 5, then there will be installed one INPUT rule); * - SERVER_42_FLOW_RTT_INGRESS is installed for each flow. */ - assert northbound.getSwitchRules(it.dpId).flowEntries.findAll { - new Cookie(it.cookie).getType() in [CookieType.SERVER_42_FLOW_RTT_INPUT, - CookieType.SERVER_42_FLOW_RTT_INGRESS] - }.size() == 4 + assert switchRulesFactory.get(sw.dpId).getServer42FlowRules().cookie.size() == 4 } } @@ -171,11 +173,14 @@ class Server42FlowRttSpec extends HealthCheckSpecification { and: "Check if stats for forward and reverse flows are available" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert flowStats.of(reversedFlow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() } cleanup: "Revert system to original state" - revertToOrigin([flow, reversedFlow], flowRttFeatureStartState, initialSwitchRtt) + [flow, reversedFlow].findAll().each { flowHelperV2.deleteFlow(it.flowId) } + switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchProps.keySet()) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchProps && switchHelper.revertToOriginSwitchSetup(initialSwitchProps, flowRttToggleInitialState) } def "Flow rtt stats are available only if both global and switch toggles are 'on' on both endpoints"() { @@ -184,10 +189,12 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def statsWaitSeconds = 4 and: "server42FlowRtt toggle is turned off" - def flowRttFeatureStartState = changeFlowRttToggle(false) + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(false).build()) + switchHelper.waitForS42SwRulesSetup(false) and: "server42FlowRtt is turned off on src and dst" - def initialSwitchRtt = [switchPair.src, switchPair.dst].collectEntries { [it, changeFlowRttSwitch(it, false)] } + def initialSwitchProps = [switchPair.src, switchPair.dst].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, false, false)] } and: "Flow for forward metric is created" def flow = flowHelperV2.randomFlow(switchPair) @@ -212,7 +219,8 @@ class Server42FlowRttSpec extends HealthCheckSpecification { flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() when: "Enable global rtt toggle" - changeFlowRttToggle(true) + northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() and: "Wait for several seconds" def checkpointTime = new Date().getTime() @@ -225,23 +233,23 @@ class Server42FlowRttSpec extends HealthCheckSpecification { flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() when: "Enable switch rtt toggle on src and dst" - changeFlowRttSwitch(switchPair.src, true) - changeFlowRttSwitch(switchPair.dst, true) + switchHelper.setServer42FlowRttForSwitch(switchPair.src, true) + switchHelper.setServer42FlowRttForSwitch(switchPair.dst, true) checkpointTime = new Date().getTime() then: "Stats for forward and reverse flow are available" - Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + SERVER42_STATS_LAG, 1) { assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) //https://github.com/telstra/open-kilda/issues/4678 //assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) } when: "Disable switch rtt toggle on dst (still enabled on src)" - changeFlowRttSwitch(switchPair.dst, false) + switchHelper.setServer42FlowRttForSwitch(switchPair.dst, false) checkpointTime = new Date().getTime() then: "Stats for forward and reverse flow are available" - Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + WAIT_OFFSET, 1) { + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + SERVER42_STATS_LAG, 1) { def stats = flowStats.of(flow.getFlowId()) assert stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) //https://github.com/telstra/open-kilda/issues/4678 @@ -249,11 +257,11 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } when: "Disable global toggle" - changeFlowRttToggle(false) + northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(false).build()) and: "Wait for several seconds" - checkpointTime = new Date().getTime() SECONDS.sleep(statsWaitSeconds) + checkpointTime = new Date().getTime() then: "Expect no flow rtt stats for forward flow" !flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) @@ -262,7 +270,10 @@ class Server42FlowRttSpec extends HealthCheckSpecification { !flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) cleanup: "Revert system to original state" - revertToOrigin([flow, reversedFlow], flowRttFeatureStartState, initialSwitchRtt) + [flow, reversedFlow].findAll().each { flowHelperV2.deleteFlow(it.flowId) } + switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchProps.keySet()) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchProps && switchHelper.revertToOriginSwitchSetup(initialSwitchProps, flowRttToggleInitialState) } @Tags([TOPOLOGY_DEPENDENT]) @@ -271,8 +282,11 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().withBothSwitchesConnectedToSameServer42Instance().random() and: "server42FlowRtt feature enabled globally and on src/dst switch" - def flowRttFeatureStartState = changeFlowRttToggle(true) - def initialSwitchRtt = [switchPair.src, switchPair.dst].collectEntries { [it, changeFlowRttSwitch(it, true)] } + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() + + def initialSwitchProps = [switchPair.src, switchPair.dst].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } when: "Create a flow" def checkpointTime = new Date() @@ -291,7 +305,7 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } when: "Disable flow rtt on dst switch" - changeFlowRttSwitch(switchPair.dst, false) + switchHelper.setServer42FlowRttForSwitch(switchPair.dst, false) Wrappers.wait(RULES_INSTALLATION_TIME, 3) { assert !switchHelper.validateAndCollectFoundDiscrepancies(switchPair.dst.dpId).isPresent() } @@ -307,7 +321,9 @@ class Server42FlowRttSpec extends HealthCheckSpecification { !flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) cleanup: "Revert system to original state" - revertToOrigin([flow], flowRttFeatureStartState, initialSwitchRtt) + flow && flowHelperV2.deleteFlow(flow.flowId) && switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchProps.keySet()) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchProps && switchHelper.revertToOriginSwitchSetup(initialSwitchProps, flowRttToggleInitialState) } @Tags(HARDWARE) //not supported on a local env (the 'stub' service doesn't send real traffic through a switch) @@ -315,12 +331,14 @@ class Server42FlowRttSpec extends HealthCheckSpecification { given: "A switch pair connected to server42" def switchPair = switchPairs.all().withBothSwitchesConnectedToServer42().random() //enable server42 in featureToggle and on the switches - def flowRttFeatureStartState = changeFlowRttToggle(true) + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() + def server42Switch = switchPair.src - def initialSwitchRtt = [server42Switch, switchPair.dst].collectEntries { [it, changeFlowRttSwitch(it, true)] } + def initialSwitchProps = [server42Switch, switchPair.dst].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } and: "A flow on the given switch pair" - def flowCreateTime = new Date() def flow = flowHelperV2.randomFlow(switchPair) flowHelperV2.addFlow(flow) @@ -330,10 +348,9 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } when: "Delete ingress server42 rule related to the flow on the src switch" - def cookieToDelete = northbound.getSwitchRules(switchPair.src.dpId).flowEntries.find { - new Cookie(it.cookie).getType() == CookieType.SERVER_42_FLOW_RTT_INGRESS - }.cookie - northbound.deleteSwitchRules(switchPair.src.dpId, cookieToDelete) + def switchRules = switchRulesFactory.get(switchPair.src.dpId) + def cookieToDelete = switchRules.getRulesByCookieType(CookieType.SERVER_42_FLOW_RTT_INGRESS).first().cookie + switchRules.delete(cookieToDelete) then: "System detects missing rule on the src switch" Wrappers.wait(RULES_DELETION_TIME) { @@ -369,9 +386,7 @@ class Server42FlowRttSpec extends HealthCheckSpecification { then: "Missing ingress server42 rule is reinstalled on the src switch" Wrappers.wait(RULES_INSTALLATION_TIME) { assert !switchHelper.validateAndCollectFoundDiscrepancies(switchPair.src.dpId).isPresent() - assert northbound.getSwitchRules(switchPair.src.dpId).flowEntries.findAll { - new Cookie(it.cookie).getType() == CookieType.SERVER_42_FLOW_RTT_INGRESS - }*.cookie.size() == 1 + assert switchRules.getRulesByCookieType(CookieType.SERVER_42_FLOW_RTT_INGRESS).cookie.size() == 1 } def timeWhenMissingRuleIsReinstalled = new Date().getTime() @@ -381,7 +396,9 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } cleanup: "Revert system to original state" - revertToOrigin([flow], flowRttFeatureStartState, initialSwitchRtt) + flow && flowHelperV2.deleteFlow(flow.flowId) && switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchProps.keySet()) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchProps && switchHelper.revertToOriginSwitchSetup(initialSwitchProps, flowRttToggleInitialState) } @Tags(LOW_PRIORITY) @@ -396,11 +413,13 @@ class Server42FlowRttSpec extends HealthCheckSpecification { .getReversed() and: "server42 is enabled on the src sw of the first switch pair" - def flowRttFeatureStartState = changeFlowRttToggle(true) - changeFlowRttSwitch(fl1SwPair.src, true) + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() + + switchHelper.setServer42FlowRttForSwitch(fl1SwPair.src, true) and: "Two flows on the given switch pairs" - def flowCreateTime = new Date() def flow1 = flowHelperV2.randomFlow(fl1SwPair) def flow2 = flowHelperV2.randomFlow(fl2SwPair) flowHelperV2.addFlow(flow1) @@ -459,8 +478,9 @@ class Server42FlowRttSpec extends HealthCheckSpecification { .hasNonZeroValuesAfter(timeWhenEndpointWereSwapped + 1000) cleanup: - flowRttFeatureStartState && changeFlowRttToggle(flowRttFeatureStartState) - fl1SwPair && changeFlowRttSwitch(fl1SwPair.src, true) + [flow1, flow2].findAll().each { flowHelperV2.deleteFlow(it.flowId) } + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + fl1SwPair && switchHelper.setServer42FlowRttForSwitch(fl1SwPair.src, true, flowRttToggleInitialState) } def "Rtt statistic is available for a flow in case switch is not connected to server42"() { @@ -468,11 +488,13 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().withOnlySourceSwitchConnectedToServer42().random() when: "Set server42FlowRtt toggle to true" - def flowRttFeatureStartState = changeFlowRttToggle(true) + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src switch" def initialSrcSwS42Props = switchHelper.getCachedSwProps(switchPair.src.dpId).server42FlowRtt - changeFlowRttSwitch(switchPair.src, true) + switchHelper.setServer42FlowRttForSwitch(switchPair.src, true) and: "Create a flow" def flow = flowHelperV2.randomFlow(switchPair) @@ -491,7 +513,7 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } when: "Disable server42FlowRtt on the src switch" - changeFlowRttSwitch(switchPair.src, false) + switchHelper.setServer42FlowRttForSwitch(switchPair.src, false) then: "Stats from flow monitoring feature for forward direction are available" Wrappers.wait(flowSlaCheckIntervalSeconds + WAIT_OFFSET * 2, 1) { @@ -499,8 +521,9 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } cleanup: "Revert system to original state" - flowRttFeatureStartState && changeFlowRttToggle(flowRttFeatureStartState) - initialSrcSwS42Props && changeFlowRttSwitch(switchPair.src, initialSrcSwS42Props) + flow && flowHelperV2.deleteFlow(flow.flowId) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSrcSwS42Props && switchHelper.setServer42FlowRttForSwitch(switchPair.src, initialSrcSwS42Props, flowRttToggleInitialState) } @Tags(HARDWARE) //not supported on a local env (the 'stub' service doesn't send real traffic through a switch) @@ -510,11 +533,13 @@ class Server42FlowRttSpec extends HealthCheckSpecification { assumeTrue(switchPair != null, "Was not able to find a switchPair with a server42 connection") and: "server42FlowRtt toggle is set to true" - def flowRttFeatureStartState = changeFlowRttToggle(true) + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src and dst switches" def server42Switch = switchPair.src - def initialSwitchRtt = [server42Switch, switchPair.dst].collectEntries { [it, changeFlowRttSwitch(it, true)] } + def initialSwitchProps = [server42Switch, switchPair.dst].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } and: "A flow" def flow = flowHelperV2.randomFlow(switchPair) @@ -550,7 +575,9 @@ class Server42FlowRttSpec extends HealthCheckSpecification { switchHelper.synchronizeAndCollectFixedDiscrepancies(switchPair.toList()*.getDpId()).isEmpty() cleanup: "Revert system to original state" - revertToOrigin([flow], flowRttFeatureStartState, initialSwitchRtt) + flow && flowHelperV2.deleteFlow(flow.flowId) && switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchProps.keySet()) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchProps && switchHelper.revertToOriginSwitchSetup(initialSwitchProps, flowRttToggleInitialState) where: data << [ @@ -582,8 +609,11 @@ class Server42FlowRttSpec extends HealthCheckSpecification { given: "Two active switches, src has server42 connected with incorrect config in swProps" def switchPair = switchPairs.all().withOnlySourceSwitchConnectedToServer42().random() - def flowRttFeatureStartState = changeFlowRttToggle(true) - def initialFlowRttSw = changeFlowRttSwitch(switchPair.src, true) + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() + + def initialSwitchProps = [switchPair.src].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(switchPair.src, true)] } when: "Update the server42 in switch properties on ths src switch(incorrect port)" def newS42Port = topology.getAllowedPortsForSwitch(topology.activeSwitches.find { @@ -596,10 +626,10 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def swPropIsWrong = true then: "server42 rules on the switch are updated" - def amountOfS42Rules = (switchPair.src.features.contains(SwitchFeature.NOVIFLOW_PUSH_POP_VXLAN) - || switchPair.src.features.contains(KILDA_OVS_PUSH_POP_MATCH_VXLAN)) ? 2 : 1 + def amountOfS42Rules = switchHelper.getExpectedS42SwitchRulesBasedOnVxlanSupport(switchPair.src.dpId) + def switchRules = switchRulesFactory.get(switchPair.src.dpId) Wrappers.wait(RULES_INSTALLATION_TIME) { - def s42Rules = northbound.getSwitchRules(switchPair.src.dpId).flowEntries.findAll { + def s42Rules = switchRules.getRules().findAll { it.cookie in [SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE, SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE] } assert s42Rules.size() == amountOfS42Rules @@ -610,7 +640,6 @@ class Server42FlowRttSpec extends HealthCheckSpecification { !switchHelper.synchronizeAndCollectFixedDiscrepancies(switchPair.src.dpId).isPresent() when: "Create a flow on the given switch pair" - def flowCreateTime = new Date() def flow = flowHelperV2.randomFlow(switchPair) flowHelperV2.addFlow(flow) @@ -626,9 +655,9 @@ class Server42FlowRttSpec extends HealthCheckSpecification { then: "server42 related rules are updated according to the new config" Wrappers.wait(RULES_INSTALLATION_TIME) { - def swRules = northbound.getSwitchRules(switchPair.src.dpId).flowEntries + def swRules = switchRules.getRules() def flowS42Rules = swRules.findAll { - new Cookie(it.cookie).getType() in [CookieType.SERVER_42_INPUT, CookieType.SERVER_42_INGRESS] + new Cookie(it.cookie).getType() in [CookieType.SERVER_42_FLOW_RTT_INPUT, CookieType.SERVER_42_FLOW_RTT_INGRESS] } def swS42Rules = swRules.findAll { it.cookie in [SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE, SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE] @@ -649,8 +678,9 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } cleanup: - flowRttFeatureStartState && changeFlowRttToggle(flowRttFeatureStartState) - switchPair && changeFlowRttSwitch(switchPair.src, initialFlowRttSw) + flow && flowHelperV2.deleteFlow(flow.flowId) && switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchProps.keySet()) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchProps && switchHelper.revertToOriginSwitchSetup(initialSwitchProps, flowRttToggleInitialState) swPropIsWrong && northbound.updateSwitchProperties(switchPair.src.dpId, originalSrcSwPros) } @@ -660,13 +690,12 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().withBothSwitchesConnectedToServer42().random() and: "server42FlowRtt toggle is set to true" - def flowRttFeatureStartState = changeFlowRttToggle(true) + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src/dst switches" - def initialSrcSwS42Props = switchHelper.getCachedSwProps(switchPair.src.dpId).server42FlowRtt - def initialDstSwS42Props = switchHelper.getCachedSwProps(switchPair.dst.dpId).server42FlowRtt - changeFlowRttSwitch(switchPair.src, true) - changeFlowRttSwitch(switchPair.dst, true) + def initialSwitchProps = [switchPair.src, switchPair.dst].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } when: "Create a LAG port on the src switch" def portsForLag = topology.getAllowedPortsForSwitch(switchPair.src)[-2, -1] @@ -674,7 +703,6 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def lagPort = northboundV2.createLagLogicalPort(switchPair.src.dpId, payload).logicalPortNumber and: "Create a flow" - def flowCreateTime = new Date() def flow = flowHelperV2.randomFlow(switchPair).tap { it.source.portNumber = lagPort } @@ -687,65 +715,9 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } cleanup: "Revert system to original state" + flow && flowHelperV2.deleteFlow(flow.flowId) && switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchProps.keySet()) lagPort && northboundV2.deleteLagLogicalPort(switchPair.src.dpId, lagPort) - flowRttFeatureStartState && changeFlowRttToggle(flowRttFeatureStartState) - initialSrcSwS42Props && changeFlowRttSwitch(switchPair.src, initialSrcSwS42Props) - initialDstSwS42Props && changeFlowRttSwitch(switchPair.dst, initialSrcSwS42Props) - } - - def changeFlowRttSwitch(Switch sw, boolean requiredState) { - def originalProps = northbound.getSwitchProperties(sw.dpId) - if (originalProps.server42FlowRtt != requiredState) { - def s42Config = sw.prop - northbound.updateSwitchProperties(sw.dpId, originalProps.jacksonCopy().tap { - server42FlowRtt = requiredState - server42MacAddress = s42Config ? s42Config.server42MacAddress : null - server42Port = s42Config ? s42Config.server42Port : null - server42Vlan = s42Config ? s42Config.server42Vlan : null - }) - } - Wrappers.wait(RULES_INSTALLATION_TIME) { - def amountOfS42Rules = (sw.features.contains(SwitchFeature.NOVIFLOW_PUSH_POP_VXLAN) - || sw.features.contains(KILDA_OVS_PUSH_POP_MATCH_VXLAN)) ? 2 : 1 - def s42Rules = northbound.getSwitchRules(sw.dpId).flowEntries.findAll { - it.cookie in [SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE, - SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE] - } - assert requiredState ? (s42Rules.size() == amountOfS42Rules) : s42Rules.empty - } - return originalProps.server42FlowRtt - } - - def changeFlowRttToggle(boolean requiredState) { - def originalState = northbound.featureToggles.server42FlowRtt - if (originalState != requiredState) { - northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(requiredState).build()) - } - //not going to check rules on every switch in the system. sleep does the trick fine - sleep(3000) - return originalState - } - - def revertToOrigin(flows, flowRttFeatureStartState, initialSwitchRtt) { - flows.each { flowHelperV2.deleteFlow(it.flowId) } - //make sure that s42 rules are deleted - withPool { - Wrappers.wait(RULES_INSTALLATION_TIME) { - initialSwitchRtt.keySet().eachParallel { sw -> - assert northbound.getSwitchRules(sw.dpId).flowEntries.findAll { - new Cookie(it.cookie).getType() in [CookieType.SERVER_42_FLOW_RTT_INPUT, - CookieType.SERVER_42_FLOW_RTT_INGRESS] - }.empty - } - } - } - flowRttFeatureStartState != null && changeFlowRttToggle(flowRttFeatureStartState) - initialSwitchRtt.each { sw, state -> changeFlowRttSwitch(sw, state) } - initialSwitchRtt.keySet().each { Switch sw -> - Wrappers.wait(RULES_INSTALLATION_TIME) { - assertThat(northbound.getSwitchRules(sw.dpId).flowEntries*.cookie.toArray()).as(sw.dpId.toString()) - .containsExactlyInAnyOrder(*sw.defaultCookies).as(sw.dpId.toString()) - } - } + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchProps && switchHelper.revertToOriginSwitchSetup(initialSwitchProps, flowRttToggleInitialState) } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42HaFlowRttSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42HaFlowRttSpec.groovy new file mode 100644 index 00000000000..fad01c4a9b4 --- /dev/null +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42HaFlowRttSpec.groovy @@ -0,0 +1,197 @@ +package org.openkilda.functionaltests.spec.server42 + +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.functionaltests.model.stats.Direction.FORWARD +import static org.openkilda.functionaltests.model.stats.Direction.REVERSE +import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RTT +import static org.openkilda.messaging.payload.flow.FlowEncapsulationType.VXLAN +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.STATS_FROM_SERVER42_LOGGING_TIMEOUT +import static org.openkilda.testing.Constants.WAIT_OFFSET + +import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.HaFlowFactory +import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.model.HaFlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.helpers.model.SwitchTriplet +import org.openkilda.functionaltests.model.stats.FlowStats +import org.openkilda.functionaltests.model.stats.Origin +import org.openkilda.messaging.model.system.FeatureTogglesDto +import org.openkilda.messaging.payload.flow.FlowState +import org.openkilda.model.cookie.CookieBase.CookieType + +import org.springframework.beans.factory.annotation.Autowired +import spock.lang.Shared + +class Server42HaFlowRttSpec extends HealthCheckSpecification { + + @Shared + @Autowired + HaFlowFactory haFlowFactory + + @Shared + @Autowired + FlowStats flowStats + + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory + + @Tags(TOPOLOGY_DEPENDENT) + def "Create an Ha-Flow (#description) with server42 Rtt feature and check datapoints in tsdb"() { + given: "Three active switches with server42 connected" + assumeTrue((topology.getActiveServer42Switches().size() >= 3), "Unable to find active server42") + + def swT = topologyHelper.findSwitchTripletWithSharedEpInTheMiddleOfTheChainServer42Support() + assert swT, "There is no switch triplet for the further ha-flow creation" + + when: "Set server42FlowRtt toggle to true" + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() + + and: "server42FlowRtt is enabled on all switches" + def initialSwitchesProps = [swT.shared, swT.ep1, swT.ep2].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } + + and: "Create Ha-Flow" + HaFlowExtended haFlow = haFlowBuilder(swT).build().waitForBeingInState(FlowState.UP) + + then: "Check if stats for FORWARD and REVERSE directions are available for the first sub-Flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + def subFlow1Stats = flowStats.of(haFlow.subFlows.first().flowId) + assert subFlow1Stats.get(FLOW_RTT, FORWARD, Origin.SERVER_42).hasNonZeroValues() + assert subFlow1Stats.get(FLOW_RTT, REVERSE, Origin.SERVER_42).hasNonZeroValues() + } + + and: "Check if stats for FORWARD and REVERSE directions are available for the second sub-Flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + def subFlow2Stats = flowStats.of(haFlow.subFlows.last().flowId) + assert subFlow2Stats.get(FLOW_RTT, FORWARD, Origin.SERVER_42).hasNonZeroValues() + assert subFlow2Stats.get(FLOW_RTT, REVERSE, Origin.SERVER_42).hasNonZeroValues() + } + + cleanup: "Revert system to original state" + haFlow && haFlow.delete() && switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchesProps.keySet()) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchesProps && switchHelper.revertToOriginSwitchSetup(initialSwitchesProps, flowRttToggleInitialState) + + where: + description | haFlowBuilder + "default flow encapsulation TRANSIT_VLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withSharedEndpointFullPort().withEp1FullPort().withEp2FullPort() } + "default flow encapsulation VXLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withSharedEndpointFullPort().withEp1FullPort().withEp2FullPort().withEncapsulationType(VXLAN) } + "shared ep is the full port and encapsulation VXLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withSharedEndpointFullPort().withEncapsulationType(VXLAN) } + "shared ep qnq and encapsulation TRANSIT_VLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withSharedEndpointQnQ() } + "tagged flow encapsulation VXLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withEncapsulationType(VXLAN) } + "ep1 and ep2 are on the same switch and port and encapsulation TRANSIT_VLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withEp1AndEp2SameSwitchAndPort() } + "ep1 is the full port and encapsulation TRANSIT_VLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withEp1FullPort() } + "all endpoints qnq and encapsulation TRANSIT_VLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withEp1QnQ().withEp2QnQ().withSharedEndpointQnQ() } + "protected path" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withProtectedPath(true) } + "ep1+ep2 qnq and encapsulation TRANSIT_VLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withEp2QnQ().withEp1QnQ() } +// known issue(qnq and VXLAN(ovs)) https://github.com/telstra/open-kilda/issues/4572 +// "ep2 qnq and encapsulation VXLAN" | { SwitchTriplet switchTriplet -> haFlowFactory.getBuilder(switchTriplet).withEp2QnQ().withEncapsulationType(VXLAN) } + + } + + @Tags(HARDWARE) //not supported on a local env (the 'stub' service doesn't send real traffic through a switch) + def "Able to synchronize an Ha-Flow(shared path: #isHaFlowWithSharedPath) with the following installation of missing server42 rules"() { + given: "Three active switches with server42 connected" + assert swT, "There is no switch triplet for the ha-flow creation" + + and: "Set server42FlowRtt toggle to true" + def flowRttToggleInitialState = northbound.featureToggles.server42FlowRtt + !flowRttToggleInitialState && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(true).build()) + switchHelper.waitForS42SwRulesSetup() + + and: "server42FlowRtt is enabled on all switches" + def initialSwitchesProps = [swT.shared, swT.ep1, swT.ep2].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } + + and: "Create Ha-Flow" + HaFlowExtended haFlow = haFlowFactory.getRandom(swT) + assert isHaFlowWithSharedPath ? northboundV2.getHaFlowPaths(haFlow.haFlowId).sharedPath.forward : !northboundV2.getHaFlowPaths(haFlow.haFlowId).sharedPath.forward + + and: "Verify server42 rtt stats are available for both sub-Flows in forward and reverse direction" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + def subFlow1Stats = flowStats.of(haFlow.subFlows.first().getFlowId()) + assert subFlow1Stats.get(FLOW_RTT, FORWARD, Origin.SERVER_42).hasNonZeroValues() + assert subFlow1Stats.get(FLOW_RTT, REVERSE, Origin.SERVER_42).hasNonZeroValues() + + def subFlow2Stats = flowStats.of(haFlow.subFlows.last().getFlowId()) + assert subFlow2Stats.get(FLOW_RTT, FORWARD, Origin.SERVER_42).hasNonZeroValues() + assert subFlow2Stats.get(FLOW_RTT, REVERSE, Origin.SERVER_42).hasNonZeroValues() + } + + when: "Delete ingress server42 rule(s) related to the flow on the shared switch" + //if ha-flow doesn't have shared path, shared endpoint is y-point and has server42 flow rtt ingress rule per sub-Flows + def switchRules = switchRulesFactory.get(swT.shared.dpId) + def cookiesToDelete = switchRules.getRulesByCookieType(CookieType.SERVER_42_FLOW_RTT_INGRESS).cookie + cookiesToDelete.each { switchRules.delete(it) } + + then: "System detects missing rule on the shared switch" + Wrappers.wait(RULES_DELETION_TIME) { + assert northbound.validateSwitch(swT.shared.dpId).rules.missing.sort() == cookiesToDelete.sort() + } + + and: "Ha-Flow is valid and UP" + haFlow.validate().subFlowValidationResults.each { validationInfo -> + if (validationInfo.direction == "forward") { + assert !validationInfo.asExpected + } else { + assert validationInfo.asExpected + } + } + haFlow.retrieveDetails().status == FlowState.UP + + def timeWhenMissingRuleIsDetected = new Date().getTime() + + and: "The server42 stats for both sub-Flows in forward direction are not increased" + !flowStats.of(haFlow.subFlows.first().flowId).get(FLOW_RTT, FORWARD, Origin.SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + !flowStats.of(haFlow.subFlows.last().flowId).get(FLOW_RTT, FORWARD, Origin.SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + + and: "The server42 stats for both sub-Flows in reverse direction are increased" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + WAIT_OFFSET) { + assert flowStats.of(haFlow.subFlows.first().flowId).get(FLOW_RTT, REVERSE, Origin.SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + assert flowStats.of(haFlow.subFlows.last().flowId).get(FLOW_RTT, REVERSE, Origin.SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + } + + when: "Synchronize the Ha-Flow" + haFlow.sync() + + then: "Missing ingress server42 rules are reinstalled on the shared switch" + int expectedCookiesNumber = isHaFlowWithSharedPath ? 1 : 2 + Wrappers.wait(RULES_INSTALLATION_TIME) { + assert northbound.validateSwitch(swT.shared.dpId).rules.missing.empty + assert switchRules.getRulesByCookieType(CookieType.SERVER_42_FLOW_RTT_INGRESS).cookie.size() == expectedCookiesNumber + } + def timeWhenMissingRuleIsReinstalled = new Date().getTime() + + then: "The server42 stats for both FORWARD and REVERSE directions are available for the first sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + WAIT_OFFSET, 1) { + def stats = flowStats.of(haFlow.subFlows.first().flowId) + assert stats.get(FLOW_RTT, FORWARD, Origin.SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsReinstalled) + assert stats.get(FLOW_RTT, REVERSE, Origin.SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsReinstalled) + } + + and: "The server42 stats for both FORWARD and REVERSE directions are available for the second sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + WAIT_OFFSET) { + def stats = flowStats.of(haFlow.subFlows.last().flowId) + assert stats.get(FLOW_RTT, FORWARD, Origin.SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + assert stats.get(FLOW_RTT, REVERSE, Origin.SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + } + + cleanup: "Revert system to original state" + haFlow && haFlow.delete() && switchHelper.verifyAbsenceOfServer42FlowRttRules(initialSwitchesProps.keySet()) + flowRttToggleInitialState != null && northbound.toggleFeature(FeatureTogglesDto.builder().server42FlowRtt(flowRttToggleInitialState).build()) + initialSwitchesProps && switchHelper.revertToOriginSwitchSetup(initialSwitchesProps, flowRttToggleInitialState) + + where: + isHaFlowWithSharedPath | swT +// This case is disabled due to changes in hardware env (switch replacement is required). +// true | topologyHelper.findSwitchTripletWithSharedEpThatIsNotNeighbourToEp1AndEp2Server42Support() + false | topologyHelper.findSwitchTripletWithSharedEpInTheMiddleOfTheChainServer42Support() + } +} diff --git a/src-java/testing/functional-tests/src/test/resources/topology.yaml b/src-java/testing/functional-tests/src/test/resources/topology.yaml index 03235aa4b2c..ba59cdfea3a 100644 --- a/src-java/testing/functional-tests/src/test/resources/topology.yaml +++ b/src-java/testing/functional-tests/src/test/resources/topology.yaml @@ -58,6 +58,11 @@ switches: out_ports: - port: 10 vlan_range: 101..150 + prop: + server42_flow_rtt: true + server42_port: 13 + server42_mac_address: 02:70:63:61:70:00 + server42_vlan: 1007 - name: ofsw8 dp_id: 00:00:00:00:00:00:00:08 From c5b74538db7ff312db70dd8c42eb5f323cf0a5fc Mon Sep 17 00:00:00 2001 From: Pablo Murillo Date: Tue, 19 Dec 2023 17:55:45 +0000 Subject: [PATCH 4/5] remove opentsdb and hbase --- .env.example | 1 - .../topology.properties.tmpl | 5 - .../docker-compose/docker-compose.tmpl | 53 +----- confd/templates/makefile/makefile.tmpl | 4 +- .../opentsdb-topology/opentsdb-topology.tmpl | 10 -- confd/vars/main.yaml | 2 - docker/hbase/Dockerfile | 35 ---- docker/hbase/hbase-conf/hbase-env.sh | 137 -------------- docker/hbase/hbase-conf/hbase-site.xml | 72 -------- docker/hbase/hbase-conf/start-hbase | 6 - docker/opentsdb/Dockerfile | 33 ---- docker/opentsdb/app/create_tables.py | 57 ------ docker/opentsdb/app/start-opentsdb | 6 - docker/opentsdb/app/wait-for-it.sh | 169 ------------------ docker/opentsdb/conf/opentsdb.conf | 71 -------- src-java/opentsdb-topology/README.md | 9 +- 16 files changed, 7 insertions(+), 663 deletions(-) delete mode 100644 docker/hbase/Dockerfile delete mode 100644 docker/hbase/hbase-conf/hbase-env.sh delete mode 100644 docker/hbase/hbase-conf/hbase-site.xml delete mode 100755 docker/hbase/hbase-conf/start-hbase delete mode 100644 docker/opentsdb/Dockerfile delete mode 100644 docker/opentsdb/app/create_tables.py delete mode 100644 docker/opentsdb/app/start-opentsdb delete mode 100644 docker/opentsdb/app/wait-for-it.sh delete mode 100644 docker/opentsdb/conf/opentsdb.conf diff --git a/.env.example b/.env.example index ec0a5dcaf02..4f53d825a0e 100644 --- a/.env.example +++ b/.env.example @@ -7,7 +7,6 @@ LAB_API_MEM_LIMIT=512m NEO4J_MEM_LIMIT=2g ZOOKEEPER_MEM_LIMIT=2g KAFKA_MEM_LIMIT=2g -HBASE_MEM_LIMIT=2g FL_1_MEM_LIMIT=2g FL_2_MEM_LIMIT=2g FL_STATS_MEM_LIMIT=2g diff --git a/confd/templates/base-storm-topology/topology.properties.tmpl b/confd/templates/base-storm-topology/topology.properties.tmpl index 4a6fc2972cc..a0386aadacc 100644 --- a/confd/templates/base-storm-topology/topology.properties.tmpl +++ b/confd/templates/base-storm-topology/topology.properties.tmpl @@ -54,11 +54,6 @@ zookeeper.reconnect_delay={{ getv "/kilda_zookeeper_reconnect_delay_ms"}} persistence.implementation.default = {{ getv "/kilda_persistence_default_implementation" }} persistence.implementation.area.history = {{ getv "/kilda_persistence_history_implementation" }} -{{if getv "/kilda_opentsdb_hosts"}} -opentsdb.target.opentsdb = http://{{ getv "/kilda_opentsdb_hosts" }}:{{ getv "/kilda_opentsdb_port" }} -{{else}} -opentsdb.target.opentsdb = -{{end}} {{if getv "/kilda_victoriametrics_host"}} opentsdb.target.victoriametrics = http://{{ getv "/kilda_victoriametrics_host" }}:{{ getv "/kilda_victoriametrics_write_port" }}{{ getv "/kilda_victoriametrics_path" }} {{else}} diff --git a/confd/templates/docker-compose/docker-compose.tmpl b/confd/templates/docker-compose/docker-compose.tmpl index fae1ded4d98..909595e1d8c 100644 --- a/confd/templates/docker-compose/docker-compose.tmpl +++ b/confd/templates/docker-compose/docker-compose.tmpl @@ -271,27 +271,6 @@ services: environment: - WFM_TOPOLOGIES_MODE - hbase: - container_name: hbase - hostname: hbase.pendev - image: kilda/hbase:latest - command: /opt/hbase/bin/start-hbase - volumes: - - hbase_data:/data/hbase - depends_on: - zookeeper: - condition: service_healthy - healthcheck: - test: ["CMD-SHELL", "jps | grep --silent HMaster"] - interval: 30s - timeout: 10s - retries: 3 - networks: - default: - aliases: - - hbase.pendev - mem_limit: ${HBASE_MEM_LIMIT:-2g} - storm-nimbus: container_name: storm-nimbus hostname: nimbus.pendev @@ -300,10 +279,6 @@ services: depends_on: zookeeper: condition: service_healthy -{{if not (exists "/no_opentsdb")}} - opentsdb: - condition: service_started -{{end}} {{if not (exists "/no_victoriametrics")}} victoriametrics: condition: service_started @@ -358,10 +333,6 @@ services: logstash: condition: service_healthy {{end}} -{{if not (exists "/no_opentsdb")}} - opentsdb: - condition: service_started -{{end}} {{if not (exists "/no_victoriametrics")}} victoriametrics: condition: service_started @@ -558,25 +529,6 @@ services: - grpc-speaker.pendev mem_limit: ${GRPC_MEM_LIMIT:-2g} {{end}} -{{if not (exists "/no_opentsdb")}} - opentsdb: - container_name: opentsdb - hostname: opentsdb.pendev - image: kilda/opentsdb:latest - command: /app/wait-for-it.sh -t 120 -h hbase.pendev -p 9090 -- /app/start-opentsdb - depends_on: - zookeeper: - condition: service_healthy - hbase: - condition: service_healthy - ports: - - "4242:4242" - networks: - default: - aliases: - - opentsdb.pendev - mem_limit: ${OTSDB_MEM_LIMIT:-2g} -{{end}} {{if not (exists "/no_victoriametrics")}} victoriametrics: container_name: victoriametrics @@ -606,9 +558,7 @@ services: depends_on: northbound: condition: service_started -{{if not (exists "/no_opentsdb")}} opentsdb: - condition: service_started -{{end}}{{if not (exists "/no_victoriametrics")}} victoriametrics: +{{if not (exists "/no_victoriametrics")}} victoriametrics: condition: service_started {{end}} networks: default: @@ -809,7 +759,6 @@ volumes: zookeeper_data: kafka_data: app_server_data: - hbase_data: odb1_data: sql_data: {{if not (exists "/single_orientdb")}} odb2_data: diff --git a/confd/templates/makefile/makefile.tmpl b/confd/templates/makefile/makefile.tmpl index 7d7024d4e72..580ff2e51cd 100644 --- a/confd/templates/makefile/makefile.tmpl +++ b/confd/templates/makefile/makefile.tmpl @@ -8,10 +8,8 @@ build-base: {{if not (exists "/no_grpc_stub")}}build-grpc-stub {{end}}build-lock docker build -t kilda/base-ubuntu:latest docker/base/kilda-base-ubuntu/ docker build -t kilda/zookeeper:latest docker/zookeeper docker build -t kilda/kafka:latest docker/kafka - docker build -t kilda/hbase:latest docker/hbase docker build -t kilda/storm:latest docker/storm -{{if not (exists "/no_opentsdb")}} docker build -t kilda/opentsdb:latest docker/opentsdb -{{end}}{{if not (exists "/no_logstash")}} docker build -t kilda/logstash:latest docker/logstash +{{if not (exists "/no_logstash")}} docker build -t kilda/logstash:latest docker/logstash {{end}}{{if not (exists "/no_elasticsearch")}} docker build -t kilda/elasticsearch:latest docker/elasticsearch {{end}} $(MAKE) -C src-python/lab-service find-python-requirements docker build -t kilda/base-lab-service:latest docker/base/kilda-base-lab-service/ {{if (exists "/ovs_vxlan")}} --build-arg=OVS_VERSION=kilda.v2.15.1.3{{end}} diff --git a/confd/templates/opentsdb-topology/opentsdb-topology.tmpl b/confd/templates/opentsdb-topology/opentsdb-topology.tmpl index 576dafbcdfa..3ff635a8aac 100644 --- a/confd/templates/opentsdb-topology/opentsdb-topology.tmpl +++ b/confd/templates/opentsdb-topology/opentsdb-topology.tmpl @@ -9,8 +9,6 @@ config: # spout definitions spouts: - - id: "input.opentsdb" - parallelism: {{ getv "/kilda_opentsdb_num_spouts" }} - id: "input.victoriametrics" parallelism: {{ getv "/kilda_opentsdb_num_spouts" }} - id: "zookeeper.spout" @@ -18,19 +16,11 @@ spouts: # bolt definitions bolts: - - id: "DatapointParseBolt.opentsdb" - parallelism: {{ getv "/kilda_opentsdb_num_datapointparserbolt" }} - numTasks: {{ getv "/kilda_opentsdb_workers_datapointparserbolt" }} - id: "DatapointParseBolt.victoriametrics" parallelism: {{ getv "/kilda_opentsdb_num_datapointparserbolt" }} numTasks: {{ getv "/kilda_opentsdb_workers_datapointparserbolt" }} - - id: "OpenTsdbFilterBolt.opentsdb" - parallelism: {{ getv "/kilda_opentsdb_num_opentsdbfilterbolt" }} - id: "OpenTsdbFilterBolt.victoriametrics" parallelism: {{ getv "/kilda_opentsdb_num_opentsdbfilterbolt" }} - - id: "output.opentsdb" - parallelism: {{ getv "/kilda_opentsdb_num_output_bolt" }} - numTasks: {{ getv "/kilda_opentsdb_tasks_output_bolt" }} - id: "output.victoriametrics" parallelism: {{ getv "/kilda_opentsdb_num_output_bolt" }} numTasks: {{ getv "/kilda_opentsdb_tasks_output_bolt" }} diff --git a/confd/vars/main.yaml b/confd/vars/main.yaml index 871838444e1..af367d6db6b 100644 --- a/confd/vars/main.yaml +++ b/confd/vars/main.yaml @@ -18,8 +18,6 @@ kilda_zookeeper_state_root: "kilda" kilda_zookeeper_reconnect_delay_ms: 100 kilda_zookeeper_reconnect_delay_ms_server42: 10000 kilda_zookeeper_reconnect_delay_ms_speaker: 1000 -kilda_opentsdb_hosts: "opentsdb.pendev" -kilda_opentsdb_port: "4242" kilda_victoriametrics_host: "victoriametrics.pendev" kilda_victoriametrics_write_port: "4242" kilda_victoriametrics_read_port: "8428" diff --git a/docker/hbase/Dockerfile b/docker/hbase/Dockerfile deleted file mode 100644 index 1bd77de317c..00000000000 --- a/docker/hbase/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright 2017 Telstra Open Source -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -ARG base_image=kilda/base-ubuntu -FROM ${base_image} - -ENV PACKAGE hbase-1.2.4 - -WORKDIR /tmp/ - -RUN wget -q https://archive.apache.org/dist/hbase/1.2.4/${PACKAGE}-bin.tar.gz \ - && wget -q https://archive.apache.org/dist/hbase/1.2.4/${PACKAGE}-bin.tar.gz.md5 \ - && sed 's/\ //g' ${PACKAGE}-bin.tar.gz.md5 > $PACKAGE.tmp.md5 \ - && awk -F ":" '{print $2 " " $1}' $PACKAGE.tmp.md5 > ${PACKAGE}-bin.tar.gz.md5 \ - && md5sum -c ${PACKAGE}-bin.tar.gz.md5 \ - && tar -xzf ${PACKAGE}-bin.tar.gz --directory /opt/ \ - && ln -s /opt/$PACKAGE /opt/hbase \ - && rm -rfv /tmp/* - -WORKDIR /opt/hbase/ - -COPY hbase-conf/hbase-env.sh hbase-conf/hbase-site.xml /opt/hbase/conf/ -COPY hbase-conf/start-hbase /opt/hbase/bin/start-hbase diff --git a/docker/hbase/hbase-conf/hbase-env.sh b/docker/hbase/hbase-conf/hbase-env.sh deleted file mode 100644 index 171a5ecbb9c..00000000000 --- a/docker/hbase/hbase-conf/hbase-env.sh +++ /dev/null @@ -1,137 +0,0 @@ -# -#/** -# * Licensed to the Apache Software Foundation (ASF) under one -# * or more contributor license agreements. See the NOTICE file -# * distributed with this work for additional information -# * regarding copyright ownership. The ASF licenses this file -# * to you under the Apache License, Version 2.0 (the -# * "License"); you may not use this file except in compliance -# * with the License. You may obtain a copy of the License at -# * -# * http://www.apache.org/licenses/LICENSE-2.0 -# * -# * Unless required by applicable law or agreed to in writing, software -# * distributed under the License is distributed on an "AS IS" BASIS, -# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# * See the License for the specific language governing permissions and -# * limitations under the License. -# */ - -# Set environment variables here. - -# This script sets variables multiple times over the course of starting an hbase process, -# so try to keep things idempotent unless you want to take an even deeper look -# into the startup scripts (bin/hbase, etc.) - -# The java implementation to use. Java 1.7+ required. -export JAVA_HOME=/usr/ - -# Extra Java CLASSPATH elements. Optional. -# export HBASE_CLASSPATH= - -# The maximum amount of heap to use. Default is left to JVM default. -# export HBASE_HEAPSIZE=1G - -# Uncomment below if you intend to use off heap cache. For example, to allocate 8G of -# offheap, set the value to "8G". -# export HBASE_OFFHEAPSIZE=1G - -# Extra Java runtime options. -# Below are what we set by default. May only work with SUN JVM. -# For more on why as well as other possible settings, -# see http://wiki.apache.org/hadoop/PerformanceTuning -export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:+PrintFlagsFinal -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap" - -# Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+ -#export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m" -#export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m" - -# Uncomment one of the below three options to enable java garbage collection logging for the server-side processes. - -# This enables basic gc logging to the .out file. -# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps" - -# This enables basic gc logging to its own file. -# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR . -# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:" - -# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+. -# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR . -# export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc: -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M" - -# Uncomment one of the below three options to enable java garbage collection logging for the client processes. - -# This enables basic gc logging to the .out file. -# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps" - -# This enables basic gc logging to its own file. -# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR . -# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:" - -# This enables basic GC logging to its own file with automatic log rolling. Only applies to jdk 1.6.0_34+ and 1.7.0_2+. -# If FILE-PATH is not replaced, the log file(.gc) would still be generated in the HBASE_LOG_DIR . -# export CLIENT_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc: -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=512M" - -# See the package documentation for org.apache.hadoop.hbase.io.hfile for other configurations -# needed setting up off-heap block caching. - -# Uncomment and adjust to enable JMX exporting -# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access. -# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html -# NOTE: HBase provides an alternative JMX implementation to fix the random ports issue, please see JMX -# section in HBase Reference Guide for instructions. - -# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" -# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10101" -# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10102" -# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103" -# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104" -# export HBASE_REST_OPTS="$HBASE_REST_OPTS $HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10105" - -# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default. -# export HBASE_REGIONSERVERS=${HBASE_HOME}/conf/regionservers - -# Uncomment and adjust to keep all the Region Server pages mapped to be memory resident -#HBASE_REGIONSERVER_MLOCK=true -#HBASE_REGIONSERVER_UID="hbase" - -# File naming hosts on which backup HMaster will run. $HBASE_HOME/conf/backup-masters by default. -# export HBASE_BACKUP_MASTERS=${HBASE_HOME}/conf/backup-masters - -# Extra ssh options. Empty by default. -# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR" - -# Where log files are stored. $HBASE_HOME/logs by default. -# export HBASE_LOG_DIR=${HBASE_HOME}/logs - -# Enable remote JDWP debugging of major HBase processes. Meant for Core Developers -# export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8070" -# export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8071" -# export HBASE_THRIFT_OPTS="$HBASE_THRIFT_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8072" -# export HBASE_ZOOKEEPER_OPTS="$HBASE_ZOOKEEPER_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8073" - -# A string representing this instance of hbase. $USER by default. -# export HBASE_IDENT_STRING=$USER - -# The scheduling priority for daemon processes. See 'man nice'. -# export HBASE_NICENESS=10 - -# The directory where pid files are stored. /tmp by default. -# export HBASE_PID_DIR=/var/hadoop/pids - -# Seconds to sleep between slave commands. Unset by default. This -# can be useful in large clusters, where, e.g., slave rsyncs can -# otherwise arrive faster than the master can service them. -# export HBASE_SLAVE_SLEEP=0.1 - -# Tell HBase whether it should manage it's own instance of Zookeeper or not. -export HBASE_MANAGES_ZK=false - -# The default log rolling policy is RFA, where the log file is rolled as per the size defined for the -# RFA appender. Please refer to the log4j.properties file to see more details on this appender. -# In case one needs to do log rolling on a date change, one should set the environment property -# HBASE_ROOT_LOGGER to ",DRFA". -# For example: -# HBASE_ROOT_LOGGER=INFO,DRFA -# The reason for changing default to RFA is to avoid the boundary case of filling out disk space as -# DRFA doesn't put any cap on the log size. Please refer to HBase-5655 for more context. diff --git a/docker/hbase/hbase-conf/hbase-site.xml b/docker/hbase/hbase-conf/hbase-site.xml deleted file mode 100644 index c04ab64f877..00000000000 --- a/docker/hbase/hbase-conf/hbase-site.xml +++ /dev/null @@ -1,72 +0,0 @@ - - - - - - hbase.cluster.distributed - true - - - hbase.rootdir - file:///data/hbase - - - hbase.master.port - 60000 - - - hbase.master.info.port - 60010 - - - hbase.regionserver.port - 60020 - - - hbase.regionserver.info.port - 60030 - - - hbase.regionserver.ipc.address - 0.0.0.0 - - - hbase.rest.port - 8070 - - - hbase.rest.info.port - 8090 - - - hbase.zookeeper.quorum - zookeeper.pendev:2181 - - - hbase.thrift.port - 9070 - - - hbase.thrift.info.port - 9080 - - diff --git a/docker/hbase/hbase-conf/start-hbase b/docker/hbase/hbase-conf/start-hbase deleted file mode 100755 index 7035875be1b..00000000000 --- a/docker/hbase/hbase-conf/start-hbase +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash - -/opt/hbase/bin/hbase-daemon.sh start rest -/opt/hbase/bin/hbase-daemon.sh start thrift -/opt/hbase/bin/hbase regionserver start > logregion.log 2>&1 & -/opt/hbase/bin/hbase master start --localRegionServers=0 diff --git a/docker/opentsdb/Dockerfile b/docker/opentsdb/Dockerfile deleted file mode 100644 index d0817c82585..00000000000 --- a/docker/opentsdb/Dockerfile +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2017 Telstra Open Source -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -ARG base_image=kilda/hbase -FROM ${base_image} - -WORKDIR /tmp/ - -COPY app /app - -RUN wget -q https://github.com/OpenTSDB/opentsdb/releases/download/v2.3.0/opentsdb-2.3.0_all.deb \ - && apt-get update -q \ - && apt-get install -yq --no-install-recommends \ - gnuplot \ - gcc \ - && python3 -m pip install happybase==1.2.0 \ - && dpkg -i opentsdb-2.3.0_all.deb \ - && chmod 777 /app/* \ - && rm -rfv /var/lib/apt/lists/* /tmp/* /var/tmp/* - -COPY conf/opentsdb.conf /etc/opentsdb/opentsdb.conf diff --git a/docker/opentsdb/app/create_tables.py b/docker/opentsdb/app/create_tables.py deleted file mode 100644 index d83db8b931e..00000000000 --- a/docker/opentsdb/app/create_tables.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python -# Copyright 2017 Telstra Open Source -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - - -import happybase -import sys - -host = 'hbase.pendev' -port = 9090 - -new_tables = ['tsdb', 'tsdb-uid', 'tsdb-tree', 'tsdb-meta'] - -connection = happybase.Connection(host=host, port=port) -existing_tables = connection.tables() - - -def create_table(table): - if (table == 'tsdb') or (table == 'tsdb-tree'): - families = {'t': dict(max_versions=1, compression='none', - bloom_filter_type='ROW')} - elif table == 'tsdb-uid': - families = {'id': dict(compression='none', bloom_filter_type='ROW'), - 'name': dict(compression='none', bloom_filter_type='ROW')} - elif table == 'tsdb-meta': - families = {'name': dict(compression='none', bloom_filter_type='ROW')} - else: - sys.exit("Unknown table {} was requested.".format(table)) - - print("Creating %s" % table) - connection.create_table(table, families) - if bytes(table, 'utf-8') not in connection.tables(): - sys.exit("Could not create {}".format(table)) - - -for table in new_tables[:]: - if table in existing_tables: - print("%s exist" % table) - new_tables.remove(table) - -if len(new_tables) > 0: - for table in new_tables: - create_table(table) -else: - print("All OpenTSDB tables already created") diff --git a/docker/opentsdb/app/start-opentsdb b/docker/opentsdb/app/start-opentsdb deleted file mode 100644 index 0dcca9bc857..00000000000 --- a/docker/opentsdb/app/start-opentsdb +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -echo "checking tables in hbase" -python3 /app/create_tables.py - -echo "starting opentsdb" -/usr/share/opentsdb/bin/tsdb tsd --port=4242 --staticroot=/usr/share/opentsdb/static --cachedir=/tmp --auto-metric diff --git a/docker/opentsdb/app/wait-for-it.sh b/docker/opentsdb/app/wait-for-it.sh deleted file mode 100644 index 3251e3ea017..00000000000 --- a/docker/opentsdb/app/wait-for-it.sh +++ /dev/null @@ -1,169 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2017 Telstra Open Source -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# Use this script to test if a given TCP host/port are available - -cmdname=$(basename $0) - -echoerr() { if [[ $QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } - -usage() -{ - cat << USAGE >&2 -Usage: - $cmdname host:port [-s] [-t timeout] [-- command args] - -h HOST | --host=HOST Host or IP under test - -p PORT | --port=PORT TCP port under test - Alternatively, you specify the host and port as host:port - -s | --strict Only execute subcommand if the test succeeds - -q | --quiet Don't output any status messages - -t TIMEOUT | --timeout=TIMEOUT - Timeout in seconds, zero for no timeout - -- COMMAND ARGS Execute command with args after the test finishes -USAGE - exit 1 -} -wait_for() -{ - if [[ $TIMEOUT -gt 0 ]]; then - echoerr "$cmdname: waiting $TIMEOUT seconds for $HOST:$PORT" - else - echoerr "$cmdname: waiting for $HOST:$PORT without a timeout" - fi - start_ts=$(date +%s) - while : - do - (echo > /dev/tcp/$HOST/$PORT) >/dev/null 2>&1 - result=$? - if [[ $result -eq 0 ]]; then - end_ts=$(date +%s) - echoerr "$cmdname: $HOST:$PORT is available after $((end_ts - start_ts)) seconds" - break - fi - sleep 1 - done - return $result -} -wait_for_wrapper() -{ - # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 - if [[ $QUIET -eq 1 ]]; then - timeout $TIMEOUT $0 --quiet --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & - else - timeout $TIMEOUT $0 --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & - fi - PID=$! - trap "kill -INT -$PID" INT - wait $PID - RESULT=$? - if [[ $RESULT -ne 0 ]]; then - echoerr "$cmdname: timeout occurred after waiting $TIMEOUT seconds for $HOST:$PORT" - fi - return $RESULT -} -# process arguments -while [[ $# -gt 0 ]] -do - case "$1" in - *:* ) - hostport=(${1//:/ }) - HOST=${hostport[0]} - PORT=${hostport[1]} - shift 1 - ;; - --child) - CHILD=1 - shift 1 - ;; - -q | --quiet) - QUIET=1 - shift 1 - ;; - -s | --strict) - STRICT=1 - shift 1 - ;; - -h) - HOST="$2" - if [[ $HOST == "" ]]; then break; fi - shift 2 - ;; - --host=*) - HOST="${1#*=}" - shift 1 - ;; - -p) - PORT="$2" - if [[ $PORT == "" ]]; then break; fi - shift 2 - ;; - --port=*) - PORT="${1#*=}" - shift 1 - ;; - -t) - TIMEOUT="$2" - if [[ $TIMEOUT == "" ]]; then break; fi - shift 2 - ;; - --timeout=*) - TIMEOUT="${1#*=}" - shift 1 - ;; - --) - shift - CLI="$@" - break - ;; - --help) - usage - ;; - *) - echoerr "Unknown argument: $1" - usage - ;; - esac -done -if [[ "$HOST" == "" || "$PORT" == "" ]]; then - echoerr "Error: you need to provide a host and port to test." - usage -fi -TIMEOUT=${TIMEOUT:-15} -STRICT=${STRICT:-0} -CHILD=${CHILD:-0} -QUIET=${QUIET:-0} -if [[ $CHILD -gt 0 ]]; then - wait_for - RESULT=$? - exit $RESULT -else - if [[ $TIMEOUT -gt 0 ]]; then - wait_for_wrapper - RESULT=$? - else - wait_for - RESULT=$? - fi -fi -if [[ $CLI != "" ]]; then - if [[ $RESULT -ne 0 && $STRICT -eq 1 ]]; then - echoerr "$cmdname: strict mode, refusing to execute subprocess" - exit $RESULT - fi - exec $CLI -else - exit $RESULT -fi diff --git a/docker/opentsdb/conf/opentsdb.conf b/docker/opentsdb/conf/opentsdb.conf deleted file mode 100644 index 15fd1041add..00000000000 --- a/docker/opentsdb/conf/opentsdb.conf +++ /dev/null @@ -1,71 +0,0 @@ -# --------- NETWORK ---------- -# The TCP port TSD should use for communications -# *** REQUIRED *** -tsd.network.port = 4242 - -# The IPv4 network address to bind to, defaults to all addresses -# tsd.network.bind = 0.0.0.0 - -# Disable Nagel's algorithm. Default is True -#tsd.network.tcp_no_delay = true - -# Determines whether or not to send keepalive packets to peers, default -# is True -#tsd.network.keep_alive = true - -# Determines if the same socket should be used for new connections, default -# is True -#tsd.network.reuse_address = true - -# Number of worker threads dedicated to Netty, defaults to # of CPUs * 2 -#tsd.network.worker_threads = 8 - -# Whether or not to use NIO or tradditional blocking IO, defaults to True -#tsd.network.async_io = true - -# ----------- HTTP ----------- -# The location of static files for the HTTP GUI interface. -# *** REQUIRED *** -tsd.http.staticroot = /usr/share/opentsdb/static/ - -# Where TSD should write it's cache files to -# *** REQUIRED *** -tsd.http.cachedir = /tmp/opentsdb - -# --------- CORE ---------- -# Whether or not to automatically create UIDs for new metric types, default -# is False -tsd.core.auto_create_metrics = true -tsd.core.auto_create_tagks = true -tsd.core.auto_create_tagvs = true - -# Full path to a directory containing plugins for OpenTSDB -tsd.core.plugin_path = /usr/share/opentsdb/plugins - -# --------- STORAGE ---------- -# Whether or not to enable data compaction in HBase, default is True -#tsd.storage.enable_compaction = true - -tsd.storage.fix_duplicates=true - -# How often, in milliseconds, to flush the data point queue to storage, -# default is 1,000 -# tsd.storage.flush_interval = 1000 - -# Name of the HBase table where data points are stored, default is "tsdb" -#tsd.storage.hbase.data_table = tsdb - -# Name of the HBase table where UID information is stored, default is "tsdb-uid"#tsd.storage.hbase.uid_table = tsdb-uid - -# Path under which the znode for the -ROOT- region is located, default is "/hbase" -#tsd.storage.hbase.zk_basedir = /hbase_unsecure - -# A comma separated list of Zookeeper hosts to connect to, with or without -# port specifiers, default is "localhost" -tsd.storage.hbase.zk_quorum = zookeeper.pendev:2181 - -# Whether or not to enable incoming chunk support for the HTTP RPC -tsd.http.request.enable_chunked = true -tsd.http.request.max_chunk = 16384 - -tsd.storage.max_tags = 12 diff --git a/src-java/opentsdb-topology/README.md b/src-java/opentsdb-topology/README.md index a6ab50e8d26..fe78355aa51 100644 --- a/src-java/opentsdb-topology/README.md +++ b/src-java/opentsdb-topology/README.md @@ -1,9 +1,10 @@ # WFM (WorkFlow Manager) - OpenTSDB Topology -This sub-project holds the Storm topology "OpenTSDB" that is used to submit -the statistics from OpenKilda to OpenTSDB. +This sub-project holds the Storm topology "OpenTSDB" that is used to submit +the statistics from OpenKilda to OpenTSDB or another TSDB with writting OpenTSDB +capabilities like Victoria Metrics. -## Deployment, Configuration, Testing and Debugging tips +## Deployment, Configuration, Testing and Debugging tips The guidelines, recommendations and commands from `base-topology/README.md` -are applicable to this topology. \ No newline at end of file +are applicable to this topology. \ No newline at end of file From f65fc5dfb09c718783cbe8b796fe8ee097e85771 Mon Sep 17 00:00:00 2001 From: Pablo Murillo Date: Mon, 22 Apr 2024 10:49:42 +0200 Subject: [PATCH 5/5] update CHANGELOG.md --- CHANGELOG.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 44893a12b68..7499aa19d2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog -## v1.157.0 (15/04/2024) + +## v1.158.0 (23/04/2024) + +### Features: +- [#5353](https://github.com/telstra/open-kilda/pull/5353) 5208 add ha flow support into server42 + + +### Improvements: +- [#5481](https://github.com/telstra/open-kilda/pull/5481) [TEST]: 5208: Server42 RTT: HaFlow [**tests**] +- [#5522](https://github.com/telstra/open-kilda/pull/5522) remove opentsdb and hbase (Issue: [#4925](https://github.com/telstra/open-kilda/issues/4925)) + + +For the complete list of changes, check out [the commit log](https://github.com/telstra/open-kilda/compare/v1.157.0...v1.158.0). + +--- + +## v1.157.0 (17/04/2024) ### Bug Fixes: - [#5632](https://github.com/telstra/open-kilda/pull/5632) GUI hotfix. [**gui**]