Skip to content

fix: failing integration tests due to not waiting for TAZ cluster to fully failover #472

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tests/integration/container/utils/rds_test_utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,7 @@ def failover_cluster_and_wait_until_writer_changed(
sleep(1)
cluster_address = socket.gethostbyname(cluster_endpoint)

self.make_sure_instances_up(self.get_instance_ids())
self.logger.debug("Testing.FinishedFailover", initial_writer_id, str((perf_counter_ns() - start) / 1_000_000))

def failover_cluster(self, cluster_id: Optional[str] = None) -> None:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@

package integration.host;

import static org.junit.jupiter.api.Assertions.assertEquals;

import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.rekawek.toxiproxy.ToxiproxyClient;
Expand All @@ -44,6 +42,7 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Logger;
import org.testcontainers.containers.GenericContainer;
import org.testcontainers.containers.Network;
Expand All @@ -70,6 +69,7 @@ public class TestEnvironment implements AutoCloseable {
private static final TestEnvironmentConfiguration config = new TestEnvironmentConfiguration();
private static final boolean USE_OTLP_CONTAINER_FOR_TRACES = false;

private static final AtomicInteger ipAddressUsageRefCount = new AtomicInteger(0);
private final TestEnvironmentInfo info =
new TestEnvironmentInfo(); // only this info is passed to test container

Expand Down Expand Up @@ -102,12 +102,22 @@ private TestEnvironment(TestEnvironmentRequest request) {
}

public static TestEnvironment build(TestEnvironmentRequest request) throws IOException {
DatabaseEngineDeployment deployment = request.getDatabaseEngineDeployment();
if (deployment == DatabaseEngineDeployment.AURORA
|| deployment == DatabaseEngineDeployment.RDS
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure about RDS. It's not clear whether they need checks for topology.

|| deployment == DatabaseEngineDeployment.RDS_MULTI_AZ) {
// These environment require creating external database cluster that should be publicly available.
// Corresponding AWS Security Groups should be configured and the test task runner IP address
// should be whitelisted.
ipAddressUsageRefCount.incrementAndGet();
}

LOGGER.finest("Building test env: " + request.getEnvPreCreateIndex());
preCreateEnvironment(request.getEnvPreCreateIndex());

TestEnvironment env;

switch (request.getDatabaseEngineDeployment()) {
switch (deployment) {
case DOCKER:
env = new TestEnvironment(request);
initDatabaseParams(env);
Expand Down Expand Up @@ -894,7 +904,11 @@ public void close() throws Exception {

private void deleteDbCluster() {
if (!this.reuseAuroraDbCluster && !StringUtils.isNullOrEmpty(this.runnerIP)) {
auroraUtil.ec2DeauthorizesIP(runnerIP);
if (ipAddressUsageRefCount.decrementAndGet() == 0) {
// Another test environments are still in use of test task runner IP address.
// The last execute tst environment will do the cleanup.
auroraUtil.ec2DeauthorizesIP(runnerIP);
}
}

if (!this.reuseAuroraDbCluster) {
Expand Down