Skip to content

Commit

Permalink
Merge branch 'release-1.102.0'
Browse files Browse the repository at this point in the history
  • Loading branch information
nikitamarchenko committed Aug 18, 2021
2 parents c65310a + b948c1a commit ee16400
Show file tree
Hide file tree
Showing 272 changed files with 10,711 additions and 11,830 deletions.
29 changes: 29 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,34 @@
# Changelog

## v1.102.0 (13/08/2021)

### Features:
- [#4416](https://github.com/telstra/open-kilda/pull/4416) LAG: preparation [**storm-topologies**]
- [#4397](https://github.com/telstra/open-kilda/pull/4397) Added LAG DB objects
- [#4371](https://github.com/telstra/open-kilda/pull/4371) Add design for port LAGs [**docs**]

### Bug Fixes:
- [#4432](https://github.com/telstra/open-kilda/pull/4432) Fix index pattern
- [#4402](https://github.com/telstra/open-kilda/pull/4402) Fix missing ISL RTT rules after InstallDefaults operation (Issue: [#4389](https://github.com/telstra/open-kilda/issues/4389)) [**floodlight**]
- [#4415](https://github.com/telstra/open-kilda/pull/4415) Added missed Logical port type in GRPC stub

### Improvements:
- [#4419](https://github.com/telstra/open-kilda/pull/4419) Changed log level of "drop async response" message [**storm-topologies**]
- [#4420](https://github.com/telstra/open-kilda/pull/4420) add test for #4411 [**tests**]
- [#4390](https://github.com/telstra/open-kilda/pull/4390) Introduce Stats Messaging (+ notifications on flow events) [**storm-topologies**]
- [#4428](https://github.com/telstra/open-kilda/pull/4428) Make logstash index configurable
- [#4410](https://github.com/telstra/open-kilda/pull/4410) ignore test: qinq + swapEndpoint [**tests**]
- [#4413](https://github.com/telstra/open-kilda/pull/4413) fix test mirror+connectedDevice in singleTable [**tests**]

### Other changes:
- [#4365](https://github.com/telstra/open-kilda/pull/4365) Parallel topologies, parallel tests [**tests**]
- [#4406](https://github.com/telstra/open-kilda/pull/4406) Update some tests to iterate over all switch models [**tests**]

For the complete list of changes, check out [the commit log](https://github.com/telstra/open-kilda/compare/v1.101.0...v1.102.0).

### Affected Components:
orientdb, grpc, flow, flow-hs, stats, network, fl

## v1.101.0 (09/08/2021)

### Features:
Expand Down
1 change: 1 addition & 0 deletions confd/templates/atdd-staging/kilda.properties.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ latency.discovery.interval.multiplier = {{ getv "/kilda_latency_discovery_interv

flow.ping.interval=5
docker.host=localhost
parallel.topologies = 3

latency.update.interval = {{ getv "/kilda_latency_update_interval" }}
flow.sla.check.interval.seconds = {{ getv "/kilda_flow_sla_check_interval_seconds" }}
2 changes: 2 additions & 0 deletions confd/templates/base-storm-topology/topology.properties.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,9 @@ port.up.down.throttling.delay.seconds.warm.up = {{ getv "/kilda_port_up_down_thr
port.up.down.throttling.delay.seconds.cool.down = {{ getv "/kilda_port_up_down_throttling_delay_seconds_cool_down" }}
port.antiflap.stats.dumping.interval.seconds = 60

lag.port.offset = {{ getv "/kilda_lag_port_offset" }}
bfd.port.offset = {{ getv "/kilda_bfd_port_offset" }}
bfd.port.max.number = {{ getv "/kilda_bfd_port_max_number" }}

zookeeper.connect_string = {{ getv "/kilda_zookeeper_hosts"}}/{{ getv "/kilda_zookeeper_state_root" }}
zookeeper.reconnect_delay={{ getv "/kilda_zookeeper_reconnect_delay_ms"}}
Expand Down
2 changes: 1 addition & 1 deletion confd/templates/logstash/logstash.conf.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,6 @@ filter {
output {
elasticsearch {
hosts => "{{ getv "/kilda_logging_elasticsearch_hosts" }}"
index => "kilda-%{+YYYY.MM.dd}"
index => "{{ getv "/kilda_logging_elasticsearch_logstash_index_pattern" }}"
}
}
15 changes: 14 additions & 1 deletion confd/vars/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,9 @@ kilda_port_up_down_throttling_delay_seconds_min: 1
kilda_port_up_down_throttling_delay_seconds_warm_up: 3
kilda_port_up_down_throttling_delay_seconds_cool_down: 7

kilda_bfd_port_offset: 200
kilda_lag_port_offset: 2000
kilda_bfd_port_offset: 1000
kilda_bfd_port_max_number: 1999
kilda_bfd_interval_ms: 350
kilda_bfd_multiplier: 3

Expand All @@ -141,6 +143,7 @@ kilda_logging_elasticsearch_hosts: "elasticsearch.pendev:9200"
kilda_logging_elasticsearch_user: "kilda"
kilda_logging_elasticsearch_pass: "kilda"
kilda_logging_elasticsearch_index: "kilda-*"
kilda_logging_elasticsearch_logstash_index_pattern: "kilda-%{+YYYY.MM.dd}"
kilda_logging_fl_loglevel: "DEBUG"
kilda_logging_fl_logaccess: True
kilda_logging_port_storm: 5001
Expand Down Expand Up @@ -187,6 +190,16 @@ kilda_server42_control_switch_to_vlan_1000: "1000=00:00:d7:61:46:7b:46:69,00:00:
kilda_server42_control_switch_to_vlan_2000: "2000=00:00:55:dd:06:49:d9:61,00:00:84:1e:39:d5:dd:40"
kilda_server42_control_switch_to_vlan_1002: "1002=00:00:00:00:00:00:00:02"
kilda_server42_control_switch_to_vlan_1003: "1003=00:00:00:00:00:00:00:03,00:00:00:00:00:00:00:08"
kilda_server42_control_switch_to_vlan_1102: "1102=00:00:00:00:00:01:00:02"
kilda_server42_control_switch_to_vlan_1103: "1103=00:00:00:00:00:01:00:03,00:00:00:00:00:01:00:08"
kilda_server42_control_switch_to_vlan_1202: "1202=00:00:00:00:00:02:00:02"
kilda_server42_control_switch_to_vlan_1203: "1203=00:00:00:00:00:02:00:03,00:00:00:00:00:02:00:08"
kilda_server42_control_switch_to_vlan_1302: "1302=00:00:00:00:00:03:00:02"
kilda_server42_control_switch_to_vlan_1303: "1303=00:00:00:00:00:03:00:03,00:00:00:00:00:03:00:08"
kilda_server42_control_switch_to_vlan_1402: "1402=00:00:00:00:00:04:00:02"
kilda_server42_control_switch_to_vlan_1403: "1403=00:00:00:00:00:04:00:03,00:00:00:00:00:04:00:08"
kilda_server42_control_switch_to_vlan_1502: "1502=00:00:00:00:00:05:00:02"
kilda_server42_control_switch_to_vlan_1503: "1503=00:00:00:00:00:05:00:03,00:00:00:00:00:05:00:08"

kilda_server42_control_kafka_group_id: "server42-control"
kilda_server42_control_zeromq_connection_host: "tcp://server42-server.pendev:5555"
Expand Down
38 changes: 38 additions & 0 deletions docker/db-migration/migrations/011-add-lag-classes.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
databaseChangeLog:
- changeSet:
id: tag
author: snikitin
changes:
- tagDatabase:
tag: 011-add-lag-classes

- changeSet:
id: add_lag_logical_port_class
author: snikitin
changes:
- sql: "CREATE CLASS comprises IF NOT EXISTS EXTENDS E"
- sql: "CREATE CLASS lag_logical_port IF NOT EXISTS EXTENDS V"
- sql: "CREATE PROPERTY lag_logical_port.switch_id IF NOT EXISTS STRING"
- sql: "CREATE PROPERTY lag_logical_port.logical_port_number IF NOT EXISTS INTEGER"
- sql: "CREATE INDEX lag_logical_port.switch_id NOTUNIQUE_HASH_INDEX"
- sql: "CREATE INDEX lag_logical_port_unique on lag_logical_port (switch_id, logical_port_number) UNIQUE_HASH_INDEX"
rollback:
- sql: "DELETE VERTEX lag_logical_port"
- sql: "DROP INDEX lag_logical_port.switch_id"
- sql: "DROP INDEX lag_logical_port_unique"
- sql: "DROP CLASS lag_logical_port IF EXISTS"
- sql: "DROP CLASS comprises IF EXISTS"
- changeSet:
id: add_physical_port_class
author: snikitin
changes:
- sql: "CREATE CLASS physical_port IF NOT EXISTS EXTENDS V"
- sql: "CREATE PROPERTY physical_port.switch_id IF NOT EXISTS STRING"
- sql: "CREATE PROPERTY physical_port.port_number IF NOT EXISTS INTEGER"
- sql: "CREATE INDEX physical_port.switch_id NOTUNIQUE_HASH_INDEX"
- sql: "CREATE INDEX physical_port_unique on physical_port (switch_id, port_number) UNIQUE_HASH_INDEX"
rollback:
- sql: "DELETE VERTEX physical_port"
- sql: "DROP INDEX physical_port.switch_id"
- sql: "DROP INDEX physical_port_unique"
- sql: "DROP CLASS physical_port IF EXISTS"
3 changes: 3 additions & 0 deletions docker/db-migration/migrations/root.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,6 @@ databaseChangeLog:
- include:
relativeToChangelogFile: true
file: 010-enforce-definite-types-on-history-objects.yaml
- include:
relativeToChangelogFile: true
file: 011-add-lag-classes.yaml
50 changes: 50 additions & 0 deletions docs/design/LAG-for-ports/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
# LAG for ports

## Overview

Link aggregation provides ability to combine multiple physical connections into one logical connection to improve resiliency. Link aggregation group (LAG) is a group of ports associated with the logical port on the switch.

## API

* Get existing LAG logical ports on the switch `GET /v2/switches/{switch_id}/lags`. Response example:
~~~
[
{
"logical_port_number": 2001,
"port_numbers": [1, 2, 3]
},
...
]
~~~

* Create LAG logical port on the switch `POST /v2/switches/{switch_id}/lags` with body:
~~~
{
"port_numbers": [1, 2, 3]
}
~~~
Response example:
~~~
{
"logical_port_number": 2001,
"port_numbers": [1, 2, 3]
}
~~~

* Delete LAG logical port on the switch `DELETE /v2/switches/{switch_id}/lags/{logical_port_number}`.


## Details
All logical port related commands are sent to the switches using gRPC speaker.

Open-kilda calculate logical port number using the following rule: 2000 + min of the physical ports number in the LAG. It is not allowed to have one physical port in two LAGs so this rule will provide unique logical port number for any correct port configuration. LAG logical port configuration should be validated before any create operation to avoid inconsistency.

Currently, open-kilda doesn't have any port related information representation in database. We need to save LAG logical port configuration into database to have ability to restore configuration on the switch. Information about LAGs stored as a separate models in order to provide minimal impact on already existing data structures.

![domain-model](./domain-model.png)

Open-kilda uses a switch-port pair to represent a flow endpoint. LAG ports created in this way may be used as a flow endpoint on one or both flow sides to provide flow resiliency.

## Additional changes

During switch/flow validate and sync LAG ports configuration should be checked and installed if required.
Binary file added docs/design/LAG-for-ports/domain-model.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
23 changes: 23 additions & 0 deletions docs/design/LAG-for-ports/domain-model.puml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
@startuml

title LAGs Domain Model

class LagLogicalPort {
switch_id
logical_port_number
==
physicalPorts() : PhysicalPort[]
__
unique constraint on switch_id+logical_port_number
}

class PhysicalPort {
switch_id
port_number
--
unique constraint on switch_id+port_number
}

LagLogicalPort o--> PhysicalPort

@enduml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
package org.openkilda.wfm;

import static org.openkilda.wfm.share.zk.ZooKeeperSpout.FIELD_ID_LIFECYCLE_EVENT;
import static org.openkilda.wfm.topology.utils.KafkaRecordTranslator.FIELD_ID_PAYLOAD;

import org.openkilda.bluegreen.LifecycleEvent;
import org.openkilda.bluegreen.Signal;
Expand All @@ -26,7 +27,6 @@
import org.openkilda.wfm.share.metrics.MeterRegistryHolder;
import org.openkilda.wfm.share.metrics.PushToStreamMeterRegistry;
import org.openkilda.wfm.share.zk.ZkStreams;
import org.openkilda.wfm.topology.AbstractTopology;

import lombok.AccessLevel;
import lombok.Getter;
Expand All @@ -36,6 +36,7 @@
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import org.apache.storm.tuple.Values;
import org.slf4j.Logger;
Expand All @@ -49,6 +50,7 @@

public abstract class AbstractBolt extends BaseRichBolt {
public static final String FIELD_ID_CONTEXT = "context";
public static final Fields METER_STREAM_FIELDS = new Fields(FIELD_ID_PAYLOAD);

protected transient Logger log = makeLog();
protected boolean active = false;
Expand Down Expand Up @@ -343,7 +345,7 @@ protected static String formatTuplePayload(Tuple input) {
@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
if (meterOutputStream != null) {
declarer.declareStream(meterOutputStream, AbstractTopology.fieldMessage);
declarer.declareStream(meterOutputStream, METER_STREAM_FIELDS);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
public final class CoordinatorBolt extends AbstractBolt {
public static final String ID = "coordinator.bolt";
public static final String INCOME_STREAM = "coordinator.command";
public static final Fields FIELDS_KEY = new Fields(MessageKafkaTranslator.FIELD_ID_KEY);

private Map<String, Callback> callbacks = new HashMap<>();
private SortedMap<Long, Set<String>> timeouts = new TreeMap<>();
Expand All @@ -60,7 +61,7 @@ protected void handleInput(Tuple input) {
}

private void handleCommand(Tuple input) {
String key = input.getStringByField(MessageKafkaTranslator.KEY_FIELD);
String key = input.getStringByField(MessageKafkaTranslator.FIELD_ID_KEY);
CoordinatorCommand command = (CoordinatorCommand) input.getValueByField(COMMAND_FIELD);
switch (command) {
case REQUEST_CALLBACK:
Expand Down Expand Up @@ -116,7 +117,7 @@ void tick(Long currentTime) {

@Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
Fields fields = new Fields(MessageKafkaTranslator.KEY_FIELD, FIELD_ID_CONTEXT);
Fields fields = new Fields(MessageKafkaTranslator.FIELD_ID_KEY, FIELD_ID_CONTEXT);
declarer.declare(true, fields);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ protected final void unhandledInput(Tuple input) {
}

protected void unhandledInput(String key, Tuple input) {
log.error("{} drop worker async response. because {} key is not listed in pending response list [{}]",
log.info("{} drop worker async response. because {} key is not listed in pending response list [{}]",
getComponentId(), key, formatTuplePayload(input));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@
import org.apache.storm.topology.IRichSpout;
import org.apache.storm.topology.SpoutDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.tuple.Fields;
import org.kohsuke.args4j.CmdLineException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -97,11 +96,6 @@ public abstract class AbstractTopology<T extends AbstractTopologyConfig> impleme

public static final String BOLT_ID_CTRL_ROUTE = "ctrl.route";

public static final String KEY_FIELD = "key";
public static final String MESSAGE_FIELD = "message";
public static final Fields fieldMessage = new Fields(MESSAGE_FIELD);
public static final Fields FIELDS_KEY = new Fields(KEY_FIELD);

protected final String topologyName;

protected final KafkaNamingStrategy kafkaNamingStrategy;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,6 @@
import org.apache.storm.tuple.Values;

abstract class GenericKafkaRecordTranslator<D> extends KafkaRecordTranslator<String, D, D> {
// use FIELD_ID_KEY instead
@Deprecated
public static final String KEY_FIELD = FIELD_ID_KEY;
public static final Fields STREAM_FIELDS = new Fields(FIELD_ID_KEY, FIELD_ID_PAYLOAD, FIELD_ID_CONTEXT);

@Override
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import static org.openkilda.messaging.Utils.TOPOLOGY_NAME;
import static org.openkilda.wfm.protocol.BoltToBoltMessage.FIELD_ID_CORRELATION_ID;
import static org.openkilda.wfm.protocol.JsonMessage.FIELD_ID_JSON;
import static org.openkilda.wfm.topology.AbstractTopology.MESSAGE_FIELD;

import org.openkilda.messaging.Message;
import org.openkilda.wfm.AbstractBolt;
Expand Down Expand Up @@ -129,8 +128,8 @@ public static Optional<CommandContext> extract(Tuple input) {
}
}

if (fields.contains(MESSAGE_FIELD)) {
Object messageField = input.getValueByField(MESSAGE_FIELD);
if (fields.contains(KafkaRecordTranslator.FIELD_ID_PAYLOAD)) {
Object messageField = input.getValueByField(KafkaRecordTranslator.FIELD_ID_PAYLOAD);
if (messageField instanceof Message) {
Optional<CommandContext> context = Optional.ofNullable(((Message) messageField).getCorrelationId())
.map(String::trim)
Expand Down
Loading

0 comments on commit ee16400

Please sign in to comment.