Skip to content

Commit 62661be

Browse files
[!465] - Migrate to artery remoting
# New features and improvements - Brand new artery remoting instead of classic remoting for Akka. - Remove waspctl binary build during merge request pipeline to save money # Breaking changes Migrate by default to artery remoting. # Migration Change the following akka related configurations: from: ``` akka remote { artery.enabled = false classic.netty.tcp { port = 0 hostname = "localhost" } } cluster { seed-nodes = ["akka.tcp://WASP@localhost:2892"] } actor { provider = "akka.cluster.ClusterActorRefProvider" } } ``` to: ``` akka { remote { artery { enabled = true canonical.port = 0 canonical.hostname = "localhost" } } cluster { seed-nodes = ["akka://WASP@localhost:2892"] } actor { provider = "cluster" } } ``` Be sure to remove/translate any `akka.remote.classic` configuration entry. # Bug fixes None. # How this feature was tested Existing pipelines. # Related issue Closes #582
1 parent 972920f commit 62661be

File tree

23 files changed

+116
-137
lines changed

23 files changed

+116
-137
lines changed

.gitlab-ci.yml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,6 @@ waspctl:
159159
only:
160160
- /^develop$/
161161
- /(^release\/v([0-9]+)\.([0-9]+))/
162-
- merge_requests
163162
- tags
164163
needs:
165164
- generate-open-api

consumers-spark/src/main/resources/reference.conf

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ wasp {
1212
}
1313

1414
actor {
15-
1615
serializers {
1716
kryo = "io.altoo.akka.serialization.kryo.KryoSerializer"
1817
}
@@ -66,10 +65,10 @@ wasp {
6665

6766
}
6867
remote {
69-
artery.enabled = false
70-
classic.netty.tcp {
71-
port = 0
72-
hostname = "localhost"
68+
artery {
69+
enabled = true
70+
canonical.port = 0
71+
canonical.hostname = "localhost"
7372
}
7473
}
7574
}

consumers-spark/src/test/resources/application.conf

Lines changed: 16 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -8,25 +8,20 @@ akka {
88

99

1010
remote {
11-
artery.enabled = false
12-
classic {
13-
log-remote-lifecycle-events = off
14-
enabled-transports = ["akka.remote.classic.netty.tcp"]
15-
netty.tcp {
16-
port = 2892
17-
hostname = "localhost"
18-
}
11+
artery.canonical {
12+
port = 2892
13+
hostname = "localhost"
1914
}
2015
}
2116

2217
actor {
23-
provider = "akka.cluster.ClusterActorRefProvider"
18+
provider = "cluster"
2419
debug.fsm = true
2520
}
2621

2722
cluster {
2823
log-info = on
29-
seed-nodes = ["akka.tcp://WASP@localhost:2892"]
24+
seed-nodes = ["akka://WASP@localhost:2892"]
3025
gossip-interval = 5s
3126
publish-stats-interval = 10s
3227
metrics.gossip-interval = 10s
@@ -46,24 +41,19 @@ default_multimaster {
4641

4742

4843
remote {
49-
artery.enabled = false
50-
classic {
51-
log-remote-lifecycle-events = off
52-
enabled-transports = ["akka.remote.classic.netty.tcp"]
53-
netty.tcp {
54-
hostname = "localhost"
55-
}
44+
artery {
45+
canonical.hostname = "localhost"
5646
}
5747
}
5848

5949
actor {
60-
provider = "akka.cluster.ClusterActorRefProvider"
50+
provider = "cluster"
6151
debug.fsm = true
6252
}
6353

6454
cluster {
6555
log-info = on
66-
seed-nodes = ["akka.tcp://WASP@localhost:2893"]
56+
seed-nodes = ["akka://WASP@localhost:2893"]
6757
gossip-interval = 5s
6858
publish-stats-interval = 10s
6959
metrics.gossip-interval = 10s
@@ -73,16 +63,16 @@ default_multimaster {
7363
}
7464

7565
system-0.akka {
76-
remote.classic.netty.tcp.port = 2893
77-
cluster.roles=["consumers-spark-streaming"]
66+
remote.artery.canonical.port = 2893
67+
cluster.roles = ["consumers-spark-streaming"]
7868
}
7969
system-1.akka {
80-
remote.classic.netty.tcp.port = 2894
81-
cluster.roles=["consumers-spark-streaming"]
70+
remote.artery.canonical.port = 2894
71+
cluster.roles = ["consumers-spark-streaming"]
8272

8373
}
8474
system-2.akka {
85-
remote.classic.netty.tcp.port = 2895
86-
cluster.roles=["consumers-spark-streaming"]
75+
remote.artery.canonical.port = 2895
76+
cluster.roles = ["consumers-spark-streaming"]
8777
}
88-
coordinator.akka.remote.classic.netty.tcp.port = 2896
78+
coordinator.akka.remote.artery.canonical.port = 2896

consumers-spark/src/test/scala/it/agilelab/bigdata/wasp/consumers/spark/streaming/actor/master/MultiMasterSpec.scala

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@ package it.agilelab.bigdata.wasp.consumers.spark.streaming.actor.master
33
import java.net.URLEncoder
44
import java.nio.charset.StandardCharsets
55
import java.util.UUID
6-
import java.util.concurrent.TimeUnit
76
import akka.actor.FSM.{CurrentState, SubscribeTransitionCallBack, Transition}
87
import akka.actor.{Actor, ActorRef, ActorSystem, PoisonPill, Props, Terminated}
98
import akka.cluster.ClusterEvent.{InitialStateAsEvents, MemberUp}
@@ -28,6 +27,7 @@ import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
2827

2928
import scala.concurrent.duration.Duration
3029
import scala.concurrent.{Await, Future}
30+
import scala.concurrent.duration._
3131

3232
class MultiMasterSpec
3333
extends WordSpecLike
@@ -40,9 +40,8 @@ class MultiMasterSpec
4040
import Protocol._
4141
import SparkConsumersStreamingMasterGuardian._
4242

43-
import scala.concurrent.duration._
4443

45-
val slowTimeout: FiniteDuration = FiniteDuration(5, TimeUnit.MINUTES)
44+
val slowTimeout: FiniteDuration = 5.minutes
4645

4746
def childCreatorFactory(probe: TestProbe): ChildCreator = { (_, name, factory) =>
4847
{
@@ -73,7 +72,7 @@ class MultiMasterSpec
7372
watchdogCreator,
7473
"collaborator",
7574
1.millisecond,
76-
FiniteDuration(5, TimeUnit.SECONDS)
75+
5.seconds
7776
)
7877

7978
val childCreator = childCreatorFactory(probe)
@@ -138,7 +137,7 @@ class MultiMasterSpec
138137
watchdogCreator,
139138
"collaborator",
140139
1.millisecond,
141-
FiniteDuration(5, TimeUnit.SECONDS),
140+
5.seconds,
142141
schedulingStrategy = new TestNodeLabelSchedulingStrategyFactory
143142
)
144143

@@ -212,7 +211,7 @@ class MultiMasterSpec
212211
watchdogCreator,
213212
"collaborator",
214213
1.millisecond,
215-
FiniteDuration(5, TimeUnit.SECONDS)
214+
5.seconds
216215
)
217216

218217
cluster("system-0", props, childCreator) { (cluster0, _, _, shutdown0) =>
@@ -302,7 +301,7 @@ class MultiMasterSpec
302301
watchdogCreator,
303302
"collaborator",
304303
1.millisecond,
305-
FiniteDuration(5, TimeUnit.SECONDS),
304+
5.seconds,
306305
Some(whoIsRunningTheSingletonProbe.ref)
307306
)
308307

@@ -347,7 +346,7 @@ class MultiMasterSpec
347346
watchdogCreator,
348347
"collaborator",
349348
1.second,
350-
FiniteDuration(5, TimeUnit.SECONDS)
349+
5.seconds
351350
)
352351

353352
cluster("system-0", props, childCreator) { (cluster0, _, _, shutdown0) =>
@@ -366,6 +365,7 @@ class MultiMasterSpec
366365

367366
Seq(cluster0, cluster1, cluster2).find(_.selfUniqueAddress == address).foreach { cluster =>
368367
cluster.leave(cluster.selfAddress)
368+
cluster.down(cluster.selfAddress)
369369
}
370370

371371
probe.expectMsgPF(slowTimeout) {
@@ -418,7 +418,7 @@ class MultiMasterSpec
418418
watchdogCreator,
419419
"collaborator",
420420
1.millisecond,
421-
FiniteDuration(5, TimeUnit.SECONDS),
421+
5.seconds,
422422
debugActor = Some(whoIsRunningTheSingletonProbe.ref)
423423
)
424424

@@ -520,7 +520,7 @@ class MultiMasterSpec
520520
watchdogCreator,
521521
"collaborator",
522522
1.millisecond,
523-
FiniteDuration(5, TimeUnit.SECONDS),
523+
5.seconds,
524524
Some(whoIsRunningTheSingletonProbe.ref)
525525
)
526526

@@ -566,6 +566,7 @@ class MultiMasterSpec
566566
Seq(cluster0, cluster1, cluster2).find(_.selfUniqueAddress == firstOneRunningTheSingleton).map {
567567
cluster =>
568568
cluster.leave(cluster.selfAddress)
569+
cluster.down(cluster.selfAddress)
569570
cluster.selfUniqueAddress
570571
}
571572

@@ -616,6 +617,8 @@ trait SystemUtils {
616617
val proxy = system.actorOf(ClusterSingletonProxy.props("singleton-manager", proxySettings))
617618

618619
val shutdown: () => Future[Terminated] = () => {
620+
cluster.leave(cluster.selfAddress)
621+
cluster.down(cluster.selfAddress)
619622
system.terminate()
620623
}
621624

@@ -677,6 +680,8 @@ trait SystemUtils {
677680
system.actorOf(CollaboratorActor.props(proxy, childCreator), "collaborator")
678681

679682
val shutdown: () => Future[Terminated] = () => {
683+
cluster.leave(cluster.selfAddress)
684+
cluster.down(cluster.selfAddress)
680685
system.terminate()
681686
}
682687

core/src/main/resources/reference.conf

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -89,25 +89,19 @@ wasp {
8989

9090

9191
remote {
92-
artery.enabled = false
93-
classic {
94-
log-remote-lifecycle-events = off
95-
enabled-transports = ["akka.remote.classic.netty.tcp"]
96-
netty.tcp {
97-
port = 2892
98-
hostname = "localhost"
99-
}
92+
artery {
93+
canonical.port = 2892
94+
canonical.hostname = "localhost"
10095
}
101-
10296
}
10397

10498
actor {
105-
provider = "akka.cluster.ClusterActorRefProvider"
99+
provider = "cluster"
106100
}
107101

108102
cluster {
109103
log-info = on
110-
seed-nodes = ["akka.tcp://WASP@localhost:2892"]
104+
seed-nodes = ["akka://WASP@localhost:2892"]
111105
gossip-interval = 5s
112106
publish-stats-interval = 10s
113107
metrics.gossip-interval = 10s

openshift/batch/docker-entrypoint.sh

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@ WASP_CLASSPATH=`cat /wasp.classpath`:/etc/hbase/conf/:/etc/hadoop/conf/
55
java -cp $WASP_CLASSPATH \
66
-Dconfig.file=/docker-environment.conf \
77
-Dwasp.process="---batch" \
8-
-Dwasp.akka.remote.artery.enabled=false \
9-
-Dwasp.akka.remote.classic.netty.tcp.hostname=${HOSTNAME} \
10-
-Dwasp.akka.remote.classic.netty.tcp.port=2892 \
8+
-Dwasp.akka.remote.artery.canonical.hostname=${HOSTNAME} \
9+
-Dwasp.akka.remote.artery.canonical.port=2892 \
1110
-Dlog4j.configurationFile=file:///log4j2.properties \
1211
it.agilelab.bigdata.wasp.consumers.spark.launcher.SparkConsumersBatchNodeLauncher

openshift/docker-compose.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ services:
1818
example.com:
1919
environment:
2020
WASP_MONGO_DB_CONNECTION: "mongodb://mongo.example.com:27017"
21-
WASP_AKKA_SEED_NODE: "akka.tcp://[email protected]:2892"
21+
WASP_AKKA_SEED_NODE: "akka://[email protected]:2892"
2222
WASP_KAFKA_HOSTNAME: "services.example.com"
2323
WASP_KAFKA_PORT: "9092"
2424
WASP_ZOOKEEPER_HOSTNAME: "services.example.com"
@@ -41,7 +41,7 @@ services:
4141
example.com:
4242
environment:
4343
WASP_MONGO_DB_CONNECTION: "mongodb://mongo.example.com:27017"
44-
WASP_AKKA_SEED_NODE: "akka.tcp://[email protected]:2892"
44+
WASP_AKKA_SEED_NODE: "akka://[email protected]:2892"
4545
WASP_KAFKA_HOSTNAME: "services.example.com"
4646
WASP_KAFKA_PORT: "9092"
4747
WASP_ZOOKEEPER_HOSTNAME: "services.example.com"
@@ -61,7 +61,7 @@ services:
6161
example.com:
6262
environment:
6363
WASP_MONGO_DB_CONNECTION: "mongodb://mongo.example.com:27017"
64-
WASP_AKKA_SEED_NODE: "akka.tcp://[email protected]:2892"
64+
WASP_AKKA_SEED_NODE: "akka://[email protected]:2892"
6565
WASP_KAFKA_HOSTNAME: "services.example.com"
6666
WASP_KAFKA_PORT: "9092"
6767
WASP_ZOOKEEPER_HOSTNAME: "services.example.com"
@@ -81,7 +81,7 @@ services:
8181
example.com:
8282
environment:
8383
WASP_MONGO_DB_CONNECTION: "mongodb://mongo.example.com:27017"
84-
WASP_AKKA_SEED_NODE: "akka.tcp://[email protected]:2892"
84+
WASP_AKKA_SEED_NODE: "akka://[email protected]:2892"
8585
WASP_KAFKA_HOSTNAME: "services.example.com"
8686
WASP_KAFKA_PORT: "9092"
8787
WASP_ZOOKEEPER_HOSTNAME: "services.example.com"

openshift/master/docker-entrypoint.sh

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@ WASP_CLASSPATH=`cat /wasp.classpath`:/etc/hbase/conf/
55
java -cp $WASP_CLASSPATH \
66
-Dconfig.file=/docker-environment.conf \
77
-Dwasp.process="---master" \
8-
-Dwasp.akka.remote.artery.enabled=false \
9-
-Dwasp.akka.remote.classic.netty.tcp.hostname=${HOSTNAME} \
10-
-Dwasp.akka.remote.classic.netty.tcp.port=2892 \
8+
-Dwasp.akka.remote.artery.canonical.hostname=${HOSTNAME} \
9+
-Dwasp.akka.remote.artery.canonical.port=2892 \
1110
-Dlog4j.configurationFile=file:///log4j2.properties \
1211
it.agilelab.bigdata.wasp.master.launcher.MasterNodeLauncher

openshift/producers/docker-entrypoint.sh

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@ WASP_CLASSPATH=`cat /wasp.classpath`:/etc/hbase/conf/
55
java -cp $WASP_CLASSPATH \
66
-Dconfig.file=/docker-environment.conf \
77
-Dwasp.process="---producer" \
8-
-Dwasp.akka.remote.artery.enabled=false \
9-
-Dwasp.akka.remote.classic.netty.tcp.hostname=${HOSTNAME} \
10-
-Dwasp.akka.remote.classic.netty.tcp.port=2892 \
8+
-Dwasp.akka.remote.artery.canonical.hostname=${HOSTNAME} \
9+
-Dwasp.akka.remote.artery.canonical.port=2892 \
1110
-Dlog4j.configurationFile=file:///log4j2.properties \
1211
it.agilelab.bigdata.wasp.producers.launcher.ProducersNodeLauncher

openshift/streaming/docker-entrypoint.sh

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@ WASP_CLASSPATH=`cat /wasp.classpath`:/etc/hbase/conf/:/etc/hadoop/conf/
55
java -cp $WASP_CLASSPATH \
66
-Dconfig.file=/docker-environment.conf \
77
-Dwasp.process="---streaming" \
8-
-Dwasp.akka.remote.artery.enabled=false \
9-
-Dwasp.akka.remote.classic.netty.tcp.hostname=${HOSTNAME} \
10-
-Dwasp.akka.remote.classic.netty.tcp.port=2892 \
8+
-Dwasp.akka.remote.artery.canonical.hostname=${HOSTNAME} \
9+
-Dwasp.akka.remote.artery.canonical.port=2892 \
1110
-Dlog4j.configurationFile=file:///log4j2.properties \
1211
it.agilelab.bigdata.wasp.consumers.spark.launcher.SparkConsumersStreamingNodeLauncher

0 commit comments

Comments
 (0)