Skip to content

Commit

Permalink
Fixing ci2 (#360)
Browse files Browse the repository at this point in the history
* Enhanced error messages
* Made `transportFactory` as non-autocomputable
* Cleaned all reactor deprecations
* Updated `reactor` (to `2020.0.6`)
* Fixed CI (@segabriel )
  • Loading branch information
artem-v authored Apr 14, 2021
1 parent 8b28122 commit c1cea57
Show file tree
Hide file tree
Showing 32 changed files with 504 additions and 296 deletions.
4 changes: 3 additions & 1 deletion .github/workflows/branch-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,6 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.ORGANIZATION_TOKEN }}
- name: Maven Verify
run: mvn verify -B
run: |
sudo echo "127.0.0.1 $(eval hostname)" | sudo tee -a /etc/hosts
mvn verify -B
1 change: 1 addition & 0 deletions .github/workflows/pre-release-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ jobs:
server-password: GITHUB_TOKEN
- name: Deploy pre-release version to GitHub Packages
run: |
sudo echo "127.0.0.1 $(eval hostname)" | sudo tee -a /etc/hosts
pre_release_version=${{ github.event.release.tag_name }}
echo Pre-release version $pre_release_version
mvn versions:set -DnewVersion=$pre_release_version -DgenerateBackupPoms=false
Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/release-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,9 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.ORGANIZATION_TOKEN }}
- name: Maven Verify
run: mvn verify -B
run: |
sudo echo "127.0.0.1 $(eval hostname)" | sudo tee -a /etc/hosts
mvn verify -B
- name: Configure git
run: |
git config --global user.email "${GITHUB_ACTOR}@users.noreply.github.com"
Expand Down
4 changes: 3 additions & 1 deletion cluster-api/pom.xml
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>

<parent>
Expand Down
4 changes: 3 additions & 1 deletion cluster/pom.xml
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>

<parent>
Expand Down
103 changes: 66 additions & 37 deletions cluster/src/main/java/io/scalecube/cluster/ClusterImpl.java
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
package io.scalecube.cluster;

import static reactor.core.publisher.Sinks.EmitResult.FAIL_NON_SERIALIZED;

import io.scalecube.cluster.fdetector.FailureDetectorConfig;
import io.scalecube.cluster.fdetector.FailureDetectorImpl;
import io.scalecube.cluster.gossip.GossipConfig;
Expand Down Expand Up @@ -43,11 +45,12 @@
import reactor.core.Disposable;
import reactor.core.Disposables;
import reactor.core.Exceptions;
import reactor.core.publisher.DirectProcessor;
import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;
import reactor.core.publisher.Mono;
import reactor.core.publisher.MonoProcessor;
import reactor.core.publisher.SignalType;
import reactor.core.publisher.Sinks;
import reactor.core.publisher.Sinks.EmitFailureHandler;
import reactor.core.publisher.Sinks.EmitResult;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;

Expand Down Expand Up @@ -79,17 +82,17 @@ public final class ClusterImpl implements Cluster {
cluster -> new ClusterMessageHandler() {};

// Subject
private final DirectProcessor<MembershipEvent> membershipEvents = DirectProcessor.create();
private final FluxSink<MembershipEvent> membershipSink = membershipEvents.sink();
private final Sinks.Many<MembershipEvent> membershipSink =
Sinks.many().multicast().directBestEffort();

// Disposables
private final Disposable.Composite actionsDisposables = Disposables.composite();

// Lifecycle
private final MonoProcessor<Void> start = MonoProcessor.create();
private final MonoProcessor<Void> onStart = MonoProcessor.create();
private final MonoProcessor<Void> shutdown = MonoProcessor.create();
private final MonoProcessor<Void> onShutdown = MonoProcessor.create();
private final Sinks.One<Void> start = Sinks.one();
private final Sinks.One<Void> onStart = Sinks.one();
private final Sinks.One<Void> shutdown = Sinks.one();
private final Sinks.One<Void> onShutdown = Sinks.one();

// Cluster components
private Transport transport;
Expand Down Expand Up @@ -119,14 +122,16 @@ private ClusterImpl(ClusterImpl that) {

private void initLifecycle() {
start
.asMono()
.then(doStart())
.doOnSuccess(avoid -> onStart.onComplete())
.doOnError(onStart::onError)
.doOnSuccess(avoid -> onStart.emitEmpty(RetryEmitFailureHandler.INSTANCE))
.doOnError(th -> onStart.emitError(th, RetryEmitFailureHandler.INSTANCE))
.subscribe(null, th -> LOGGER.error("[{}][doStart] Exception occurred:", localMember, th));

shutdown
.asMono()
.then(doShutdown())
.doFinally(s -> onShutdown.onComplete())
.doFinally(s -> onShutdown.emitEmpty(RetryEmitFailureHandler.INSTANCE))
.subscribe(
null,
th ->
Expand Down Expand Up @@ -232,8 +237,8 @@ public ClusterImpl handler(Function<Cluster, ClusterMessageHandler> handler) {
public Mono<Cluster> start() {
return Mono.defer(
() -> {
start.onComplete();
return onStart.thenReturn(this);
start.emitEmpty(RetryEmitFailureHandler.INSTANCE);
return onStart.asMono().thenReturn(this);
});
}

Expand All @@ -248,9 +253,9 @@ private Mono<Cluster> doStart() {
private Mono<Cluster> doStart0() {
return TransportImpl.bind(config.transportConfig())
.flatMap(
transport1 -> {
localMember = createLocalMember(transport1.address());
transport = new SenderAwareTransport(transport1, localMember.address());
boundTransport -> {
localMember = createLocalMember(boundTransport.address());
transport = new SenderAwareTransport(boundTransport, localMember.address());

cidGenerator = new CorrelationIdGenerator(localMember.id());
scheduler = Schedulers.newSingle("sc-cluster-" + localMember.address().port(), true);
Expand All @@ -260,7 +265,7 @@ private Mono<Cluster> doStart0() {
new FailureDetectorImpl(
localMember,
transport,
membershipEvents.onBackpressureBuffer(),
membershipSink.asFlux().onBackpressureBuffer(),
config.failureDetectorConfig(),
scheduler,
cidGenerator);
Expand All @@ -269,7 +274,7 @@ private Mono<Cluster> doStart0() {
new GossipProtocolImpl(
localMember,
transport,
membershipEvents.onBackpressureBuffer(),
membershipSink.asFlux().onBackpressureBuffer(),
config.gossipConfig(),
scheduler);

Expand All @@ -294,8 +299,11 @@ private Mono<Cluster> doStart0() {
membership
.listen()
/*.publishOn(scheduler)*/
// Dont uncomment, already beign executed inside sc-cluster thread
.subscribe(membershipSink::next, this::onError, membershipSink::complete));
// Dont uncomment, already beign executed inside scalecube-cluster thread
.subscribe(
event -> membershipSink.emitNext(event, RetryEmitFailureHandler.INSTANCE),
ex -> LOGGER.error("[{}][membership][error] cause:", localMember, ex),
() -> membershipSink.emitComplete(RetryEmitFailureHandler.INSTANCE)));

return Mono.fromRunnable(() -> failureDetector.start())
.then(Mono.fromRunnable(() -> gossip.start()))
Expand All @@ -317,30 +325,45 @@ private void validateConfiguration() {
if (metadataCodec == null) {
Object metadata = config.metadata();
if (metadata != null && !(metadata instanceof Serializable)) {
throw new IllegalArgumentException(
"Invalid cluster configuration: metadata must be Serializable");
throw new IllegalArgumentException("Invalid cluster config: metadata must be Serializable");
}
}

Objects.requireNonNull(
config.transportConfig().transportFactory(),
"Invalid cluster config: transportFactory must be specified");

Objects.requireNonNull(
config.transportConfig().messageCodec(),
"Invalid cluster configuration: transport.messageCodec must be specified");
"Invalid cluster config: messageCodec must be specified");

Objects.requireNonNull(
config.membershipConfig().namespace(),
"Invalid cluster configuration: membership.namespace must be specified");
"Invalid cluster config: membership namespace must be specified");

if (!NAMESPACE_PATTERN.matcher(config.membershipConfig().namespace()).matches()) {
throw new IllegalArgumentException(
"Invalid cluster config: membership.namespace format is invalid");
"Invalid cluster config: membership namespace format is invalid");
}
}

private void startHandler() {
ClusterMessageHandler handler = this.handler.apply(this);
actionsDisposables.add(listenMessage().subscribe(handler::onMessage, this::onError));
actionsDisposables.add(listenMembership().subscribe(handler::onMembershipEvent, this::onError));
actionsDisposables.add(listenGossip().subscribe(handler::onGossip, this::onError));
actionsDisposables.add(
listenMessage()
.subscribe(
handler::onMessage,
ex -> LOGGER.error("[{}][onMessage][error] cause:", localMember, ex)));
actionsDisposables.add(
listenMembership()
.subscribe(
handler::onMembershipEvent,
ex -> LOGGER.error("[{}][onMembershipEvent][error] cause:", localMember, ex)));
actionsDisposables.add(
listenGossip()
.subscribe(
handler::onGossip,
ex -> LOGGER.error("[{}][onGossip][error] cause:", localMember, ex)));
}

private void startJmxMonitor() {
Expand All @@ -357,10 +380,6 @@ private void startJmxMonitor() {
}
}

private void onError(Throwable th) {
LOGGER.error("[{}] Received unexpected error:", localMember, th);
}

private Flux<Message> listenMessage() {
// filter out system messages
return transport.listen().filter(msg -> !SYSTEM_MESSAGES.contains(msg.qualifier()));
Expand All @@ -373,7 +392,7 @@ private Flux<Message> listenGossip() {

private Flux<MembershipEvent> listenMembership() {
// listen on live stream
return membershipEvents.onBackpressureBuffer();
return membershipSink.asFlux().onBackpressureBuffer();
}

/**
Expand Down Expand Up @@ -481,7 +500,7 @@ public <T> Mono<Void> updateMetadata(T metadata) {

@Override
public void shutdown() {
shutdown.onComplete();
shutdown.emitEmpty(RetryEmitFailureHandler.INSTANCE);
}

private Mono<Void> doShutdown() {
Expand Down Expand Up @@ -524,12 +543,12 @@ private Mono<Void> dispose() {

@Override
public Mono<Void> onShutdown() {
return onShutdown;
return onShutdown.asMono();
}

@Override
public boolean isShutdown() {
return onShutdown.isDisposed();
return onShutdown.asMono().toFuture().isDone();
}

private static class SenderAwareTransport implements Transport {
Expand Down Expand Up @@ -581,4 +600,14 @@ private Message enhanceWithSender(Message message) {
return Message.with(message).sender(address).build();
}
}

private static class RetryEmitFailureHandler implements EmitFailureHandler {

private static final RetryEmitFailureHandler INSTANCE = new RetryEmitFailureHandler();

@Override
public boolean onEmitFailure(SignalType signalType, EmitResult emitResult) {
return emitResult == FAIL_NON_SERIALIZED;
}
}
}
Loading

0 comments on commit c1cea57

Please sign in to comment.