diff --git a/docker/quick-setup/distributed-sync-bridge-mapi/.config/gravitee.yml b/docker/quick-setup/distributed-sync-bridge-mapi/.config/gravitee.yml
new file mode 100644
index 00000000000..9e9f8da5147
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge-mapi/.config/gravitee.yml
@@ -0,0 +1,27 @@
+management:
+ type: http
+ http:
+ url: http://gateway-server:18092
+
+ratelimit:
+ type: mongodb
+ mongodb:
+ uri: mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000
+
+cluster:
+ type: hazelcast
+ hazelcast:
+ config-path: /opt/graviteeio-gateway/config/hazelcast.xml
+
+distributed-sync:
+ type: redis
+ redis:
+ host: redis-stack
+ port: 6379
+
+services:
+ sync:
+ repository:
+ enabled: true
+ distributed:
+ enabled: true
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync-bridge-mapi/.config/hazelcast.xml b/docker/quick-setup/distributed-sync-bridge-mapi/.config/hazelcast.xml
new file mode 100644
index 00000000000..0d633ec1a6e
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge-mapi/.config/hazelcast.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+ gio-apim-gateway-cluster-bridge
+
+ 5701
+
+
+
+
+ gateway_client
+ gateway_client_2
+ management_api
+
+
+
+
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync-bridge-mapi/.config/logback.xml b/docker/quick-setup/distributed-sync-bridge-mapi/.config/logback.xml
new file mode 100644
index 00000000000..330f05b7328
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge-mapi/.config/logback.xml
@@ -0,0 +1,65 @@
+
+
+
+
+
+
+
+
+
+ %d{HH:mm:ss.SSS} [%thread] [%X{api}] %-5level %logger{36} - %msg%n
+
+
+
+
+ ${gravitee.home}/logs/gravitee.log
+
+
+ ${gravitee.home}/logs/gravitee_%d{yyyy-MM-dd}.log
+
+
+ 30
+
+
+
+ %d{HH:mm:ss.SSS} [%thread] [%X{api}] %-5level %logger{36} - %msg%n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docker/quick-setup/distributed-sync-bridge-mapi/README.md b/docker/quick-setup/distributed-sync-bridge-mapi/README.md
new file mode 100644
index 00000000000..3099cef86c5
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge-mapi/README.md
@@ -0,0 +1,41 @@
+# MAPI as Bridge HTTP and Distributed Sync
+
+Here is a docker-compose to run APIM with two or three gateways:
+ - One as a **Bridge Server.** It can make calls to the database and expose HTTP endpoints to be able to call the database.
+ - Two as a **Bridge Client.** It calls the Gateway Bridge Server through HTTP to fetch data.
+
+You can classically call your apis through any of your client gateway, for example: `http://localhost:8082/myapi` or `http://localhost:8081/myapi` .
+
+To test the **Bridge Server**, you can call, for example, `http://localhost:18092/_bridge/apis` to list all the apis directly from database.
+
+## How to run ?
+
+⚠️ You need a license file to be able to run Enterprise Edition of APIM. Do not forget to add your license file into `./.license`.
+
+`APIM_VERSION={APIM_VERSION} docker-compose up -d `
+
+To be sure to fetch last version of images, you can do
+`export APIM_VERSION={APIM_VERSION} && docker-compose down -v && docker-compose pull && docker-compose up`
+
+If you want to run only one client gateway first and then the other, you can do
+`export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d redis-stack mongodb elasticsearch gateway_client management_api management_ui`
+
+and then to start the second client gateway, you can do
+`export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d gateway_client_2`
+
+To see what is inside Redis, for example the distributed sync data, you can do
+`redis-cli`
+
+and then inside redis-cli, you can do
+`Keys distributed*`
+
+## Scenario Testing
+
+Below is the scenario to test distributed sync process with MAPI as Bridge Server. Logs/Info/Success messages are printed as output.
+
+### Scenario: Test that a new gateway must sync without access to DB
+
+Run the script `test-bridge-crash-new-gateway-sync.sh`.
+
+The script above starts all the services along with bridge server.
+It then kills the bridge server and the database, adds a new gateway and verifies that it synced via redis.
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync-bridge-mapi/docker-compose.yml b/docker/quick-setup/distributed-sync-bridge-mapi/docker-compose.yml
new file mode 100644
index 00000000000..0c8fd6ca902
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge-mapi/docker-compose.yml
@@ -0,0 +1,155 @@
+networks:
+ frontend:
+ name: frontend
+ storage:
+ name: storage
+
+volumes:
+ data-elasticsearch:
+ data-mongo:
+
+services:
+ redis-stack:
+ image: 'redis/redis-stack:latest'
+ container_name: redis_stack
+ ports:
+ - '6379:6379'
+ networks:
+ - storage
+ - frontend
+
+ mongodb:
+ image: mongo:${MONGODB_VERSION:-6.0}
+ container_name: gio_apim_mongodb
+ restart: always
+ volumes:
+ - data-mongo:/data/db
+ - ./.logs/apim-mongodb:/var/log/mongodb
+ healthcheck:
+ test: mongosh --eval 'db.runCommand({serverStatus:1}).ok' --quiet | grep 1
+ interval: 5s
+ timeout: 3s
+ retries: 10
+ networks:
+ - storage
+
+ elasticsearch:
+ image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION:-8.17.2}
+ container_name: gio_apim_elasticsearch
+ restart: always
+ volumes:
+ - data-elasticsearch:/usr/share/elasticsearch/data
+ environment:
+ - http.host=0.0.0.0
+ - transport.host=0.0.0.0
+ - xpack.security.enabled=false
+ - cluster.name=elasticsearch
+ - bootstrap.memory_lock=true
+ - discovery.type=single-node
+ - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
+ ulimits:
+ memlock:
+ soft: -1
+ hard: -1
+ nofile: 65536
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:9200/_cluster/health?wait_for_status=yellow&timeout=5s" ]
+ interval: 5s
+ timeout: 3s
+ retries: 10
+ networks:
+ - storage
+
+ gateway_client:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-gateway:${APIM_VERSION:-latest}
+ container_name: gio_apim_gateway_client
+ restart: always
+ ports:
+ - "8082:8082"
+ links:
+ # Workaround used because of call to URI.create(uri) in WebClientFactory. If host contains "_", then host is null and the connection cannot be done.
+ - "management_api:gateway-server"
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ elasticsearch:
+ condition: service_healthy
+ volumes:
+ - ./.logs/apim-gateway-client:/opt/graviteeio-gateway/logs
+ - ./.license:/opt/graviteeio-gateway/license
+ - ./.config:/opt/graviteeio-gateway/config
+ environment:
+ - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200
+ networks:
+ - storage
+ - frontend
+
+ gateway_client_2:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-gateway:${APIM_VERSION:-latest}
+ container_name: gio_apim_gateway_client_2
+ restart: always
+ ports:
+ - "8081:8082"
+ links:
+ # Workaround used because of call to URI.create(uri) in WebClientFactory. If host contains "_", then host is null and the connection cannot be done.
+ - "management_api:gateway-server"
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ elasticsearch:
+ condition: service_healthy
+ volumes:
+ - ./.logs/apim-gateway-client:/opt/graviteeio-gateway/logs
+ - ./.license:/opt/graviteeio-gateway/license
+ - ./.config:/opt/graviteeio-gateway/config
+ environment:
+ - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200
+ networks:
+ - storage
+ - frontend
+
+ management_api:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-management-api:${APIM_VERSION:-latest}
+ container_name: gio_apim_management_api
+ restart: always
+ ports:
+ - "8083:8083"
+ - "18092:18092"
+ links:
+ - mongodb
+ - elasticsearch
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ elasticsearch:
+ condition: service_healthy
+ volumes:
+ - ./.logs/apim-management-api:/opt/graviteeio-management-api/logs
+ - ./.license:/opt/graviteeio-management-api/license
+ environment:
+ - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000
+ - gravitee_analytics_elasticsearch_endpoints_0=http://elasticsearch:9200
+ - gravitee_services_bridge_http_enabled=true
+ - gravitee_services_bridge_http_port=18092
+ - gravitee_services_bridge_http_host=0.0.0.0
+ - gravitee_services_bridge_http_authentication_type=none
+ - gravitee_services_bridge_http_secured=false
+ - gravitee_services_bridge_http_ssl_clientAuth=false
+ networks:
+ - storage
+ - frontend
+
+ management_ui:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-management-ui:${APIM_VERSION:-latest}
+ container_name: gio_apim_management_ui
+ restart: always
+ ports:
+ - "8084:8080"
+ depends_on:
+ - management_api
+ environment:
+ - MGMT_API_URL=http://localhost:8083/management/
+ volumes:
+ - ./.logs/apim-management-ui:/var/log/nginx
+ networks:
+ - frontend
diff --git a/docker/quick-setup/distributed-sync-bridge-mapi/test-bridge-crash-new-gateway-sync.sh b/docker/quick-setup/distributed-sync-bridge-mapi/test-bridge-crash-new-gateway-sync.sh
new file mode 100755
index 00000000000..fecbf4e23e2
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge-mapi/test-bridge-crash-new-gateway-sync.sh
@@ -0,0 +1,428 @@
+#!/bin/bash
+#
+# Copyright © 2015 The Gravitee team (http://gravitee.io)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# Exit immediately if a command exits with a non-zero status.
+set -e
+
+# --- Configuration ---
+export APIM_REGISTRY=${APIM_REGISTRY:-graviteeio.azurecr.io}
+export APIM_VERSION=${APIM_VERSION:-master-latest}
+
+# Match CONTAINER names
+BRIDGE_SERVER_CONTAINER_NAME="gio_apim_management_api"
+BRIDGE_CLIENT_1_CONTAINER_NAME="gio_apim_gateway_client"
+BRIDGE_CLIENT_2_CONTAINER_NAME="gio_apim_gateway_client_2"
+REDIS_CONTAINER_NAME="redis_stack"
+MONGO_CONTAINER_NAME="gio_apim_mongodb"
+MGMT_API_CONTAINER_NAME="gio_apim_management_api"
+ELASTIC_CONTAINER_NAME="gio_apim_elasticsearch"
+
+# Match SERVICE names
+REDIS_SERVICE_NAME="redis-stack"
+MONGO_SERVICE_NAME="mongodb"
+ELASTIC_SERVICE_NAME="elasticsearch"
+BRIDGE_SERVER_SERVICE_NAME="management_api"
+BRIDGE_CLIENT_1_SERVICE_NAME="gateway_client"
+BRIDGE_CLIENT_2_SERVICE_NAME="gateway_client_2"
+MGMT_API_SERVICE_NAME="management_api"
+
+API_CONTEXT_PATH="testDSPWithBridge"
+API_NAME="testDSPWithBridge" # Match the name in the payload
+
+BRIDGE_SERVER_PORT="18092"
+BRIDGE_CLIENT_1_PORT="8082"
+BRIDGE_CLIENT_2_PORT="8081"
+MGMT_API_URL="http://localhost:8083"
+MGMT_API_USER="admin"
+MGMT_API_PASS="admin"
+
+WAIT_TIME_INITIAL=60
+WAIT_TIME_SECONDARY=30
+
+# --- Helper Functions ---
+log_info() {
+ echo
+ echo "🔵 [INFO] $(date '+%Y-%m-%d %H:%M:%S') $1"
+}
+
+log_success() {
+ echo "✅ [SUCCESS] $(date '+%Y-%m-%d %H:%M:%S') $1"
+}
+
+log_error() {
+ echo "❌ [ERROR] $(date '+%Y-%m-%d %H:%M:%S') $1" >&2
+ # (Error logging remains the same)
+ echo "--- Last 20 lines of ${BRIDGE_SERVER_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${BRIDGE_SERVER_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${BRIDGE_SERVER_CONTAINER_NAME}"
+ echo "--- Last 20 lines of ${BRIDGE_CLIENT_1_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${BRIDGE_CLIENT_1_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${BRIDGE_CLIENT_1_CONTAINER_NAME}"
+ echo "--- Last 20 lines of ${BRIDGE_CLIENT_2_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${BRIDGE_CLIENT_2_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${BRIDGE_CLIENT_2_CONTAINER_NAME}"
+ exit 1
+}
+
+cleanup() {
+ log_info "Cleaning up Docker environment..."
+ if docker compose ps -q &> /dev/null; then
+ docker compose down -v --remove-orphans
+ else
+ log_info "No active docker compose project found to clean up."
+ fi
+ log_info "Cleanup complete."
+}
+
+# --- Main Test ---
+trap cleanup EXIT
+
+echo
+log_info "Test that a new gateway in a cluster user bridge must sync if the bridge crashes"
+echo
+
+log_info "Starting Bridge Server, First Client, and Core Services..."
+echo
+export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d redis-stack mongodb elasticsearch gateway_client management_api
+
+log_info "Waiting ${WAIT_TIME_INITIAL}s for services (especially Gateway) to initialize..."
+sleep $WAIT_TIME_INITIAL
+
+log_info "Verifying initial containers started..."
+echo
+docker compose ps --filter "name=${BRIDGE_SERVER_CONTAINER_NAME}" --filter "status=running" | grep -q $BRIDGE_SERVER_CONTAINER_NAME || log_error "${BRIDGE_SERVER_CONTAINER_NAME} failed to start."
+docker compose ps --filter "name=${BRIDGE_CLIENT_1_CONTAINER_NAME}" --filter "status=running" | grep -q $BRIDGE_CLIENT_1_CONTAINER_NAME || log_error "${BRIDGE_CLIENT_1_CONTAINER_NAME} failed to start."
+docker compose ps --filter "name=${MGMT_API_CONTAINER_NAME}" --filter "status=running" | grep -q $MGMT_API_CONTAINER_NAME || log_error "${MGMT_API_CONTAINER_NAME} failed to start."
+
+echo
+log_success "Initial containers are running."
+echo
+
+SERVER_LOG_OUTPUT=$(docker logs $BRIDGE_SERVER_CONTAINER_NAME)
+
+log_info "Printing last few log lines from Bridge Server (${BRIDGE_SERVER_CONTAINER_NAME})..."
+echo "-----------------------------------------------------"
+docker logs --tail 30 $BRIDGE_SERVER_CONTAINER_NAME
+echo "-----------------------------------------------------"
+echo
+
+echo
+if echo "$SERVER_LOG_OUTPUT" | grep -q "Sync service has been scheduled with delay"; then
+log_success "Bridge Server started"; fi
+echo
+if echo "$SERVER_LOG_OUTPUT" | grep -q "A node joined the cluster"; then
+log_success "Client joined the Bridge Server. Cluster is formed."; fi
+echo
+# --- Automated API Deployment using Import ---
+log_info "Importing API definition for '${API_NAME}' with context path '/${API_CONTEXT_PATH}'..."
+echo
+AUTH_HEADER=$(echo -n "${MGMT_API_USER}:${MGMT_API_PASS}" | base64)
+
+IMPORT_PAYLOAD=$(cat < /dev/null 2>&1; then
+ log_info "Redis index not found yet, waiting 5s more..."
+ sleep 5
+ if ! docker exec $REDIS_CONTAINER_NAME redis-cli FT.INFO "gravitee-sync-events-idx" > /dev/null 2>&1; then
+ log_error "Redis index 'gravitee-sync-events-idx' does not exist. Primary client likely failed to init DSP."
+ fi
+fi
+REDIS_KEYS_OUTPUT=$(docker exec $REDIS_CONTAINER_NAME redis-cli KEYS '*' || echo "KEYS failed")
+if [[ "$REDIS_KEYS_OUTPUT" == "KEYS failed" ]]; then
+ log_error "Failed to execute KEYS command in Redis."
+fi
+echo
+echo "--- Redis KEYS * Output ---"
+echo "$REDIS_KEYS_OUTPUT"
+echo "---------------------------"
+echo
+if ! echo "$REDIS_KEYS_OUTPUT" | grep -q -E 'distributed_event:|distributed_sync_state:'; then
+ log_error "No 'distributed_event:*' or 'distributed_sync_state:*' keys found in Redis. DSP sync likely failed."
+fi
+log_success "Found keys in Redis (verify DSP keys are present above)."
+
+log_info "Simulating Bridge failure: Stopping ${BRIDGE_SERVER_SERVICE_NAME} & ${MONGO_SERVICE_NAME}"
+docker compose stop management_api mongodb
+log_info "Waiting 5s..."
+sleep 5
+
+log_info "Attempting to start Second Bridge Client (${BRIDGE_CLIENT_2_SERVICE_NAME})..."
+export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d gateway_client_2
+
+log_info "Waiting ${WAIT_TIME_SECONDARY}s for Second Bridge Client to start and sync from Redis..."
+sleep $WAIT_TIME_SECONDARY
+
+log_info "Verifying Second Bridge Client started..."
+echo
+docker compose ps --filter "name=${BRIDGE_CLIENT_2_CONTAINER_NAME}" --filter "status=running" | grep -q $BRIDGE_CLIENT_2_CONTAINER_NAME || log_error "${BRIDGE_CLIENT_2_CONTAINER_NAME} failed to start."
+log_success "Second Bridge Client (${BRIDGE_CLIENT_2_CONTAINER_NAME}) is running."
+
+log_info "Printing last 15 log lines from Second Bridge Client (${BRIDGE_CLIENT_2_CONTAINER_NAME})..."
+echo "-----------------------------------------------------"
+docker logs --tail 15 $BRIDGE_CLIENT_2_CONTAINER_NAME
+echo "-----------------------------------------------------"
+echo
+
+log_info "Verifying Second Bridge Client synced API '/${API_CONTEXT_PATH}' from Redis..."
+echo
+sleep 5
+LOG_OUTPUT=$(docker logs $BRIDGE_CLIENT_2_CONTAINER_NAME)
+SYNC_LOG_FOUND=false
+DEPLOY_LOG_FOUND=false
+BRIDGE_ERROR_DURING_SYNC_FOUND=false
+
+if echo "$LOG_OUTPUT" | grep -q "i.g.g.s.s.p.d.s.AbstractDistributedSynchronizer"; then SYNC_LOG_FOUND=true; fi
+if echo "$LOG_OUTPUT" | grep -q "API id\[.*\] name\[${API_NAME}\] version\[.*\] has been deployed"; then DEPLOY_LOG_FOUND=true; fi
+if echo "$LOG_OUTPUT" | grep "Fetching event" -A 10 | grep -q -E "Failed|Error|Unable to sync.*http.*${BRIDGE_SERVER_NAME}"; then BRIDGE_ERROR_DURING_SYNC_FOUND=true; fi
+
+if ! $SYNC_LOG_FOUND; then
+ log_error "Second gateway logs do not show 'Fetching events from the distributed repository'."
+echo
+elif ! $DEPLOY_LOG_FOUND; then
+ log_error "Second gateway logs show 'Fetching events' but NOT deployment of API '${API_NAME}'."
+echo
+elif $BRIDGE_ERROR_DURING_SYNC_FOUND; then
+ log_info "NOTE: Second gateway might have logged errors trying to connect to the (down) bridge during startup/sync attempts, as expected."
+echo
+fi
+log_success "Second Bridge Client appears to have synced API '${API_NAME}' from Redis."
+echo
+
+log_info "Testing API access via Second Bridge Client (port ${BRIDGE_CLIENT_2_PORT})..."
+echo
+HTTP_STATUS_CLIENT2=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:${BRIDGE_CLIENT_2_PORT}/${API_CONTEXT_PATH} || echo "Curl failed")
+if [[ "$HTTP_STATUS_CLIENT2" == "Curl failed" ]]; then
+ log_error "Failed to connect to Second Bridge Client on port ${BRIDGE_CLIENT_2_PORT}."
+elif [ "$HTTP_STATUS_CLIENT2" -ne 200 ] && [ "$HTTP_STATUS_CLIENT2" -ne 401 ] && [ "$HTTP_STATUS_CLIENT2" -ne 403 ]; then
+ log_error "API call via Second Bridge Client failed with status code $HTTP_STATUS_CLIENT2."
+else
+ log_success "API call via Second Bridge Client returned expected status code $HTTP_STATUS_CLIENT2."
+fi
+
+echo
+log_success "Bridge & Primary Client Failure Test Scenario Completed Successfully!"
+echo
+
+exit 0
diff --git a/docker/quick-setup/distributed-sync-bridge/.config/gateway-client/gravitee.yml b/docker/quick-setup/distributed-sync-bridge/.config/gateway-client/gravitee.yml
new file mode 100644
index 00000000000..9e9f8da5147
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge/.config/gateway-client/gravitee.yml
@@ -0,0 +1,27 @@
+management:
+ type: http
+ http:
+ url: http://gateway-server:18092
+
+ratelimit:
+ type: mongodb
+ mongodb:
+ uri: mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000
+
+cluster:
+ type: hazelcast
+ hazelcast:
+ config-path: /opt/graviteeio-gateway/config/hazelcast.xml
+
+distributed-sync:
+ type: redis
+ redis:
+ host: redis-stack
+ port: 6379
+
+services:
+ sync:
+ repository:
+ enabled: true
+ distributed:
+ enabled: true
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync-bridge/.config/gateway-server/gravitee.yml b/docker/quick-setup/distributed-sync-bridge/.config/gateway-server/gravitee.yml
new file mode 100644
index 00000000000..341fe382b66
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge/.config/gateway-server/gravitee.yml
@@ -0,0 +1,34 @@
+management:
+ type: mongodb
+ mongodb:
+ uri: mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000
+
+ratelimit:
+ type: mongodb
+ mongodb:
+ uri: mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000
+
+cluster:
+ type: hazelcast
+ hazelcast:
+ config-path: /opt/graviteeio-gateway/config/hazelcast.xml
+
+distributed-sync:
+ type: redis
+ redis:
+ host: redis-stack
+ port: 6379
+
+services:
+ sync:
+ repository:
+ enabled: true
+ distributed:
+ enabled: true
+ bridge:
+ http:
+ enabled: true
+ port: 18092
+ host: 0.0.0.0
+ authentication:
+ type: none
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync-bridge/.config/hazelcast.xml b/docker/quick-setup/distributed-sync-bridge/.config/hazelcast.xml
new file mode 100644
index 00000000000..98e84ac6a78
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge/.config/hazelcast.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+ gio-apim-gateway-cluster-bridge
+
+ 5701
+
+
+
+
+ gateway_client
+ gateway_client_2
+ gateway_server
+
+
+
+
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync-bridge/.config/logback.xml b/docker/quick-setup/distributed-sync-bridge/.config/logback.xml
new file mode 100644
index 00000000000..330f05b7328
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge/.config/logback.xml
@@ -0,0 +1,65 @@
+
+
+
+
+
+
+
+
+
+ %d{HH:mm:ss.SSS} [%thread] [%X{api}] %-5level %logger{36} - %msg%n
+
+
+
+
+ ${gravitee.home}/logs/gravitee.log
+
+
+ ${gravitee.home}/logs/gravitee_%d{yyyy-MM-dd}.log
+
+
+ 30
+
+
+
+ %d{HH:mm:ss.SSS} [%thread] [%X{api}] %-5level %logger{36} - %msg%n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docker/quick-setup/distributed-sync-bridge/README.md b/docker/quick-setup/distributed-sync-bridge/README.md
new file mode 100644
index 00000000000..133327b6e7e
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge/README.md
@@ -0,0 +1,41 @@
+# Gateway with Bridge HTTP and Distributed Sync
+
+Here is a docker-compose to run APIM with two or three gateways:
+ - One as a **Bridge Server.** It can make calls to the database and expose HTTP endpoints to be able to call the database.
+ - Two as a **Bridge Client.** It calls the Gateway Bridge Server through HTTP to fetch data.
+
+You can classically call your apis through any of your client gateway, for example: `http://localhost:8082/myapi` or `http://localhost:8081/myapi` .
+
+To test the **Bridge Server**, you can call, for example, `http://localhost:18092/_bridge/apis` to list all the apis directly from database.
+
+## How to run ?
+
+⚠️ You need a license file to be able to run Enterprise Edition of APIM. Do not forget to add your license file into `./.license`.
+
+`APIM_VERSION={APIM_VERSION} docker-compose up -d `
+
+To be sure to fetch last version of images, you can do
+`export APIM_VERSION={APIM_VERSION} && docker-compose down -v && docker-compose pull && docker-compose up`
+
+If you want to run only one client gateway first and then the other, you can do
+`export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d redis-stack mongodb elasticsearch gateway_server gateway_client management_api management_ui`
+
+and then to start the second client gateway, you can do
+`export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d gateway_client_2`
+
+To see what is inside Redis, for example the distributed sync data, you can do
+`redis-cli`
+
+and then inside redis-cli, you can do
+`Keys distributed*`
+
+## Scenario Testing
+
+Below is the scenario to test distributed sync process with Bridge Server. Logs/Info/Success messages are printed as output.
+
+### Scenario: Test that a new gateway must sync without access to DB
+
+Run the script `test-bridge-crash-new-gateway-sync.sh`.
+
+The script above starts all the services along with bridge server.
+It then kills the bridge server and the database, adds a new gateway and verifies that it synced via redis.
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync-bridge/docker-compose.yml b/docker/quick-setup/distributed-sync-bridge/docker-compose.yml
new file mode 100644
index 00000000000..5e2a26d57d5
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge/docker-compose.yml
@@ -0,0 +1,184 @@
+#
+# Copyright © 2015 The Gravitee team (http://gravitee.io)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+networks:
+ frontend:
+ name: frontend
+ storage:
+ name: storage
+
+volumes:
+ data-elasticsearch:
+ data-mongo:
+
+services:
+ redis-stack:
+ image: redis/redis-stack:latest
+ container_name: redis_stack
+ ports:
+ - "6379:6379"
+ networks:
+ - storage
+ - frontend
+
+ mongodb:
+ image: mongo:${MONGODB_VERSION:-6.0}
+ container_name: gio_apim_mongodb
+ restart: always
+ volumes:
+ - data-mongo:/data/db
+ - ./.logs/apim-mongodb:/var/log/mongodb
+ healthcheck:
+ test: mongosh --eval 'db.runCommand({serverStatus:1}).ok' --quiet | grep 1
+ interval: 5s
+ timeout: 3s
+ retries: 10
+ networks:
+ - storage
+
+ elasticsearch:
+ image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION:-8.17.2}
+ container_name: gio_apim_elasticsearch
+ restart: always
+ volumes:
+ - data-elasticsearch:/usr/share/elasticsearch/data
+ environment:
+ - http.host=0.0.0.0
+ - transport.host=0.0.0.0
+ - xpack.security.enabled=false
+ - cluster.name=elasticsearch
+ - bootstrap.memory_lock=true
+ - discovery.type=single-node
+ - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
+ ulimits:
+ memlock:
+ soft: -1
+ hard: -1
+ nofile: 65536
+ healthcheck:
+ test: [ "CMD", "curl", "-f", "http://localhost:9200/_cluster/health?wait_for_status=yellow&timeout=5s" ]
+ interval: 5s
+ timeout: 3s
+ retries: 10
+ networks:
+ - storage
+
+ gateway_server:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-gateway:${APIM_VERSION:-latest}
+ container_name: gio_apim_gateway_server
+ restart: always
+ ports:
+ - "18092:18092"
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ volumes:
+ - ./.logs/apim-gateway-server:/opt/graviteeio-gateway/logs
+ - ./.license:/opt/graviteeio-gateway/license
+ - ./.config/gateway-server:/opt/graviteeio-gateway/config
+ - ./.config/hazelcast.xml:/opt/graviteeio-gateway/config/hazelcast.xml
+ - ./.config/logback.xml:/opt/graviteeio-gateway/config/logback.xml
+ environment:
+ - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200
+ networks:
+ - storage
+ - frontend
+
+ gateway_client:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-gateway:${APIM_VERSION:-latest}
+ container_name: gio_apim_gateway_client
+ restart: always
+ ports:
+ - "8082:8082"
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ elasticsearch:
+ condition: service_healthy
+ volumes:
+ - ./.logs/apim-gateway-client:/opt/graviteeio-gateway/logs
+ - ./.license:/opt/graviteeio-gateway/license
+ - ./.config/gateway-client:/opt/graviteeio-gateway/config
+ - ./.config/hazelcast.xml:/opt/graviteeio-gateway/config/hazelcast.xml
+ - ./.config/logback.xml:/opt/graviteeio-gateway/config/logback.xml
+ environment:
+ - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200
+ networks:
+ - storage
+ - frontend
+
+
+ gateway_client_2:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-gateway:${APIM_VERSION:-latest}
+ container_name: gio_apim_gateway_client_2
+ restart: always
+ ports:
+ - "8081:8082"
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ elasticsearch:
+ condition: service_healthy
+ volumes:
+ - ./.logs/apim-gateway-client-2:/opt/graviteeio-gateway/logs
+ - ./.license:/opt/graviteeio-gateway/license
+ - ./.config/gateway-client:/opt/graviteeio-gateway/config
+ - ./.config/hazelcast.xml:/opt/graviteeio-gateway/config/hazelcast.xml
+ - ./.config/logback.xml:/opt/graviteeio-gateway/config/logback.xml
+ environment:
+ - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200
+ networks:
+ - storage
+ - frontend
+
+ management_api:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-management-api:${APIM_VERSION:-latest}
+ container_name: gio_apim_management_api
+ restart: always
+ ports:
+ - "8083:8083"
+ links:
+ - mongodb
+ - elasticsearch
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ elasticsearch:
+ condition: service_healthy
+ volumes:
+ - ./.logs/apim-management-api:/opt/graviteeio-management-api/logs
+ - ./.license:/opt/graviteeio-management-api/license
+ environment:
+ - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000
+ - gravitee_analytics_elasticsearch_endpoints_0=http://elasticsearch:9200
+ networks:
+ - storage
+ - frontend
+
+ management_ui:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-management-ui:${APIM_VERSION:-latest}
+ container_name: gio_apim_management_ui
+ restart: always
+ ports:
+ - "8084:8080"
+ depends_on:
+ - management_api
+ environment:
+ - MGMT_API_URL=http://localhost:8083/management/
+ volumes:
+ - ./.logs/apim-management-ui:/var/log/nginx
+ networks:
+ - frontend
diff --git a/docker/quick-setup/distributed-sync-bridge/test-bridge-crash-new-gateway-sync.sh b/docker/quick-setup/distributed-sync-bridge/test-bridge-crash-new-gateway-sync.sh
new file mode 100755
index 00000000000..98ecf196fc4
--- /dev/null
+++ b/docker/quick-setup/distributed-sync-bridge/test-bridge-crash-new-gateway-sync.sh
@@ -0,0 +1,428 @@
+#!/bin/bash
+#
+# Copyright © 2015 The Gravitee team (http://gravitee.io)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# Exit immediately if a command exits with a non-zero status.
+set -e
+
+# --- Configuration ---
+export APIM_REGISTRY=${APIM_REGISTRY:-graviteeio.azurecr.io}
+export APIM_VERSION=${APIM_VERSION:-master-latest}
+
+# Match CONTAINER names
+BRIDGE_SERVER_CONTAINER_NAME="gio_apim_gateway_server"
+BRIDGE_CLIENT_1_CONTAINER_NAME="gio_apim_gateway_client"
+BRIDGE_CLIENT_2_CONTAINER_NAME="gio_apim_gateway_client_2"
+REDIS_CONTAINER_NAME="redis_stack"
+MONGO_CONTAINER_NAME="gio_apim_mongodb"
+MGMT_API_CONTAINER_NAME="gio_apim_management_api"
+ELASTIC_CONTAINER_NAME="gio_apim_elasticsearch"
+
+# Match SERVICE names
+REDIS_SERVICE_NAME="redis-stack"
+MONGO_SERVICE_NAME="mongodb"
+ELASTIC_SERVICE_NAME="elasticsearch"
+BRIDGE_SERVER_SERVICE_NAME="gateway_server"
+BRIDGE_CLIENT_1_SERVICE_NAME="gateway_client"
+BRIDGE_CLIENT_2_SERVICE_NAME="gateway_client_2"
+MGMT_API_SERVICE_NAME="management_api"
+
+API_CONTEXT_PATH="testDSPWithBridge"
+API_NAME="testDSPWithBridge" # Match the name in the payload
+
+BRIDGE_SERVER_PORT="18092"
+BRIDGE_CLIENT_1_PORT="8082"
+BRIDGE_CLIENT_2_PORT="8081"
+MGMT_API_URL="http://localhost:8083"
+MGMT_API_USER="admin"
+MGMT_API_PASS="admin"
+
+WAIT_TIME_INITIAL=60
+WAIT_TIME_SECONDARY=30
+
+# --- Helper Functions ---
+log_info() {
+ echo
+ echo "🔵 [INFO] $(date '+%Y-%m-%d %H:%M:%S') $1"
+}
+
+log_success() {
+ echo "✅ [SUCCESS] $(date '+%Y-%m-%d %H:%M:%S') $1"
+}
+
+log_error() {
+ echo "❌ [ERROR] $(date '+%Y-%m-%d %H:%M:%S') $1" >&2
+ # (Error logging remains the same)
+ echo "--- Last 20 lines of ${BRIDGE_SERVER_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${BRIDGE_SERVER_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${BRIDGE_SERVER_CONTAINER_NAME}"
+ echo "--- Last 20 lines of ${BRIDGE_CLIENT_1_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${BRIDGE_CLIENT_1_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${BRIDGE_CLIENT_1_CONTAINER_NAME}"
+ echo "--- Last 20 lines of ${BRIDGE_CLIENT_2_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${BRIDGE_CLIENT_2_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${BRIDGE_CLIENT_2_CONTAINER_NAME}"
+ exit 1
+}
+
+cleanup() {
+ log_info "Cleaning up Docker environment..."
+ if docker compose ps -q &> /dev/null; then
+ docker compose down -v --remove-orphans
+ else
+ log_info "No active docker compose project found to clean up."
+ fi
+ log_info "Cleanup complete."
+}
+
+# --- Main Test ---
+trap cleanup EXIT
+
+echo
+log_info "Test that a new gateway in a cluster user bridge must sync if the bridge crashes"
+echo
+
+log_info "Starting Bridge Server, First Client, and Core Services..."
+echo
+export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d redis-stack mongodb elasticsearch gateway_server gateway_client management_api
+
+log_info "Waiting ${WAIT_TIME_INITIAL}s for services (especially Gateway) to initialize..."
+sleep $WAIT_TIME_INITIAL
+
+log_info "Verifying initial containers started..."
+echo
+docker compose ps --filter "name=${BRIDGE_SERVER_CONTAINER_NAME}" --filter "status=running" | grep -q $BRIDGE_SERVER_CONTAINER_NAME || log_error "${BRIDGE_SERVER_CONTAINER_NAME} failed to start."
+docker compose ps --filter "name=${BRIDGE_CLIENT_1_CONTAINER_NAME}" --filter "status=running" | grep -q $BRIDGE_CLIENT_1_CONTAINER_NAME || log_error "${BRIDGE_CLIENT_1_CONTAINER_NAME} failed to start."
+docker compose ps --filter "name=${MGMT_API_CONTAINER_NAME}" --filter "status=running" | grep -q $MGMT_API_CONTAINER_NAME || log_error "${MGMT_API_CONTAINER_NAME} failed to start."
+
+echo
+log_success "Initial containers are running."
+echo
+
+SERVER_LOG_OUTPUT=$(docker logs $BRIDGE_SERVER_CONTAINER_NAME)
+
+log_info "Printing last few log lines from Bridge Server (${BRIDGE_SERVER_CONTAINER_NAME})..."
+echo "-----------------------------------------------------"
+docker logs --tail 30 $BRIDGE_SERVER_CONTAINER_NAME
+echo "-----------------------------------------------------"
+echo
+
+echo
+if echo "$SERVER_LOG_OUTPUT" | grep -q "Sync service has been scheduled with delay"; then
+log_success "Bridge Server started"; fi
+echo
+if echo "$SERVER_LOG_OUTPUT" | grep -q "A node joined the cluster"; then
+log_success "Client joined the Bridge Server. Cluster is formed."; fi
+echo
+# --- Automated API Deployment using Import ---
+log_info "Importing API definition for '${API_NAME}' with context path '/${API_CONTEXT_PATH}'..."
+echo
+AUTH_HEADER=$(echo -n "${MGMT_API_USER}:${MGMT_API_PASS}" | base64)
+
+IMPORT_PAYLOAD=$(cat < /dev/null 2>&1; then
+ log_info "Redis index not found yet, waiting 5s more..."
+ sleep 5
+ if ! docker exec $REDIS_CONTAINER_NAME redis-cli FT.INFO "gravitee-sync-events-idx" > /dev/null 2>&1; then
+ log_error "Redis index 'gravitee-sync-events-idx' does not exist. Primary client likely failed to init DSP."
+ fi
+fi
+REDIS_KEYS_OUTPUT=$(docker exec $REDIS_CONTAINER_NAME redis-cli KEYS '*' || echo "KEYS failed")
+if [[ "$REDIS_KEYS_OUTPUT" == "KEYS failed" ]]; then
+ log_error "Failed to execute KEYS command in Redis."
+fi
+echo
+echo "--- Redis KEYS * Output ---"
+echo "$REDIS_KEYS_OUTPUT"
+echo "---------------------------"
+echo
+if ! echo "$REDIS_KEYS_OUTPUT" | grep -q -E 'distributed_event:|distributed_sync_state:'; then
+ log_error "No 'distributed_event:*' or 'distributed_sync_state:*' keys found in Redis. DSP sync likely failed."
+fi
+log_success "Found keys in Redis (verify DSP keys are present above)."
+
+log_info "Simulating Bridge failure: Stopping ${BRIDGE_SERVER_SERVICE_NAME} & ${MONGO_SERVICE_NAME}"
+docker compose stop gateway_server mongodb
+log_info "Waiting 5s..."
+sleep 5
+
+log_info "Attempting to start Second Bridge Client (${BRIDGE_CLIENT_2_SERVICE_NAME})..."
+export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d gateway_client_2
+
+log_info "Waiting ${WAIT_TIME_SECONDARY}s for Second Bridge Client to start and sync from Redis..."
+sleep $WAIT_TIME_SECONDARY
+
+log_info "Verifying Second Bridge Client started..."
+echo
+docker compose ps --filter "name=${BRIDGE_CLIENT_2_CONTAINER_NAME}" --filter "status=running" | grep -q $BRIDGE_CLIENT_2_CONTAINER_NAME || log_error "${BRIDGE_CLIENT_2_CONTAINER_NAME} failed to start."
+log_success "Second Bridge Client (${BRIDGE_CLIENT_2_CONTAINER_NAME}) is running."
+
+log_info "Printing last 15 log lines from Second Bridge Client (${BRIDGE_CLIENT_2_CONTAINER_NAME})..."
+echo "-----------------------------------------------------"
+docker logs --tail 15 $BRIDGE_CLIENT_2_CONTAINER_NAME
+echo "-----------------------------------------------------"
+echo
+
+log_info "Verifying Second Bridge Client synced API '/${API_CONTEXT_PATH}' from Redis..."
+echo
+sleep 5
+LOG_OUTPUT=$(docker logs $BRIDGE_CLIENT_2_CONTAINER_NAME)
+SYNC_LOG_FOUND=false
+DEPLOY_LOG_FOUND=false
+BRIDGE_ERROR_DURING_SYNC_FOUND=false
+
+if echo "$LOG_OUTPUT" | grep -q "i.g.g.s.s.p.d.s.AbstractDistributedSynchronizer"; then SYNC_LOG_FOUND=true; fi
+if echo "$LOG_OUTPUT" | grep -q "API id\[.*\] name\[${API_NAME}\] version\[.*\] has been deployed"; then DEPLOY_LOG_FOUND=true; fi
+if echo "$LOG_OUTPUT" | grep "Fetching event" -A 10 | grep -q -E "Failed|Error|Unable to sync.*http.*${BRIDGE_SERVER_NAME}"; then BRIDGE_ERROR_DURING_SYNC_FOUND=true; fi
+
+if ! $SYNC_LOG_FOUND; then
+ log_error "Second gateway logs do not show 'Fetching events from the distributed repository'."
+echo
+elif ! $DEPLOY_LOG_FOUND; then
+ log_error "Second gateway logs show 'Fetching events' but NOT deployment of API '${API_NAME}'."
+echo
+elif $BRIDGE_ERROR_DURING_SYNC_FOUND; then
+ log_info "NOTE: Second gateway might have logged errors trying to connect to the (down) bridge during startup/sync attempts, as expected."
+echo
+fi
+log_success "Second Bridge Client appears to have synced API '${API_NAME}' from Redis."
+echo
+
+log_info "Testing API access via Second Bridge Client (port ${BRIDGE_CLIENT_2_PORT})..."
+echo
+HTTP_STATUS_CLIENT2=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:${BRIDGE_CLIENT_2_PORT}/${API_CONTEXT_PATH} || echo "Curl failed")
+if [[ "$HTTP_STATUS_CLIENT2" == "Curl failed" ]]; then
+ log_error "Failed to connect to Second Bridge Client on port ${BRIDGE_CLIENT_2_PORT}."
+elif [ "$HTTP_STATUS_CLIENT2" -ne 200 ] && [ "$HTTP_STATUS_CLIENT2" -ne 401 ] && [ "$HTTP_STATUS_CLIENT2" -ne 403 ]; then
+ log_error "API call via Second Bridge Client failed with status code $HTTP_STATUS_CLIENT2."
+else
+ log_success "API call via Second Bridge Client returned expected status code $HTTP_STATUS_CLIENT2."
+fi
+
+echo
+log_success "Bridge & Primary Client Failure Test Scenario Completed Successfully!"
+echo
+
+exit 0
diff --git a/docker/quick-setup/distributed-sync/.config/gravitee.yml b/docker/quick-setup/distributed-sync/.config/gravitee.yml
new file mode 100644
index 00000000000..12d27973517
--- /dev/null
+++ b/docker/quick-setup/distributed-sync/.config/gravitee.yml
@@ -0,0 +1,27 @@
+management:
+ type: mongodb
+ mongodb:
+ uri: mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000
+
+ratelimit:
+ type: mongodb
+ mongodb:
+ uri: mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000
+
+cluster:
+ type: hazelcast
+ hazelcast:
+ config-path: /opt/graviteeio-gateway/config/hazelcast.xml
+
+distributed-sync:
+ type: redis
+ redis:
+ host: redis-stack
+ port: 6379
+
+services:
+ sync:
+ repository:
+ enabled: true
+ distributed:
+ enabled: true
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync/.config/hazelcast.xml b/docker/quick-setup/distributed-sync/.config/hazelcast.xml
new file mode 100644
index 00000000000..3c6b197e769
--- /dev/null
+++ b/docker/quick-setup/distributed-sync/.config/hazelcast.xml
@@ -0,0 +1,26 @@
+
+
+
+
+
+ gio-apim-gateway-cluster
+
+ 5701
+
+
+
+
+ gio_apim_gateway_primary
+ gio_apim_gateway_secondary
+
+
+
+
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync/.config/logback.xml b/docker/quick-setup/distributed-sync/.config/logback.xml
new file mode 100644
index 00000000000..330f05b7328
--- /dev/null
+++ b/docker/quick-setup/distributed-sync/.config/logback.xml
@@ -0,0 +1,65 @@
+
+
+
+
+
+
+
+
+
+ %d{HH:mm:ss.SSS} [%thread] [%X{api}] %-5level %logger{36} - %msg%n
+
+
+
+
+ ${gravitee.home}/logs/gravitee.log
+
+
+ ${gravitee.home}/logs/gravitee_%d{yyyy-MM-dd}.log
+
+
+ 30
+
+
+
+ %d{HH:mm:ss.SSS} [%thread] [%X{api}] %-5level %logger{36} - %msg%n
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docker/quick-setup/distributed-sync/README.md b/docker/quick-setup/distributed-sync/README.md
new file mode 100644
index 00000000000..3939ffbcafa
--- /dev/null
+++ b/docker/quick-setup/distributed-sync/README.md
@@ -0,0 +1,44 @@
+# Gateway with Distributed Sync
+
+Here is a docker-compose to run APIM with two gateways in a primary(master)-secondary setup:
+
+You can classically call your apis through any of your gateway, for example: `http://localhost:8082/myapi` or `http://localhost:8081/myapi` .
+
+## How to run ?
+
+⚠️ You need a license file to be able to run Enterprise Edition of APIM. Do not forget to add your license file into `./.license`.
+
+`APIM_VERSION={APIM_VERSION} docker-compose up -d `
+
+To be sure to fetch last version of images, you can do
+`export APIM_VERSION={APIM_VERSION} && docker-compose down -v && docker-compose pull && docker-compose up`
+
+If you want to run only one gateway first and then the other, you can do
+`export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d redis-stack mongodb elasticsearch gateway_primary gateway_secondary management_api management_ui`
+
+and then to start the secondary gateway, you can do
+`export APIM_REGISTRY=graviteeio.azurecr.io && export APIM_VERSION=master-latest && docker compose up -d gateway_secondary`
+
+To see what is inside Redis, for example the distributed sync data, you can do
+`redis-cli`
+
+and then inside redis-cli, you can do
+`Keys distributed*`
+
+## Scenario Testing
+
+Below are the two scenarios to test distributed sync process. Logs/Info/Success messages are printed as output.
+
+### Scenario1: Test that a new gateway must sync without access to DB
+
+Run the script `test-new-gateway-sync-without-db.sh`.
+
+The script above starts all the services along with primary gateway.
+It then kills the primary gateway and the database, starts the new gateway and verifies that it synced via redis.
+
+### Scenario2: If the master node of the cluster crashes, test that a new master must be elected
+
+Run the script `test-master-crash-new-master.sh`.
+
+The script above starts all the services along with primary and secondary gateway.
+It then kills the primary/master gateway and verifies that the secondary gateway takes over as the primary/master gateway.
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync/docker-compose.yml b/docker/quick-setup/distributed-sync/docker-compose.yml
new file mode 100644
index 00000000000..ea08b2cbfb1
--- /dev/null
+++ b/docker/quick-setup/distributed-sync/docker-compose.yml
@@ -0,0 +1,172 @@
+#
+# Copyright © 2015 The Gravitee team (http://gravitee.io)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+networks:
+ frontend:
+ name: frontend
+ storage:
+ name: storage
+ email:
+ name: email
+
+volumes:
+ data-elasticsearch:
+ data-mongo:
+ apim-mongodb-logs:
+ apim-gateway-logs:
+ apim-gateway-secondary-logs:
+ apim-management-api-logs:
+ apim-management-ui-logs:
+ apim-portal-ui-logs:
+
+services:
+ redis-stack:
+ image: redis/redis-stack:latest
+ container_name: redis_stack
+ ports:
+ - "6379:6379"
+ networks:
+ - storage
+ - frontend
+
+ mongodb:
+ image: mongo:${MONGODB_VERSION:-6.0}
+ container_name: gio_apim_mongodb
+ restart: always
+ volumes:
+ - data-mongo:/data/db
+ - apim-mongodb-logs:/var/log/mongodb
+ healthcheck:
+ test: mongosh --eval 'db.runCommand({serverStatus:1}).ok' --quiet | grep 1
+ interval: 5s
+ timeout: 3s
+ retries: 10
+ networks:
+ - storage
+
+ elasticsearch:
+ image: docker.elastic.co/elasticsearch/elasticsearch:${ELASTIC_VERSION:-8.17.2}
+ container_name: gio_apim_elasticsearch
+ restart: always
+ volumes:
+ - data-elasticsearch:/usr/share/elasticsearch/data
+ environment:
+ - http.host=0.0.0.0
+ - transport.host=0.0.0.0
+ - xpack.security.enabled=false
+ - cluster.name=elasticsearch
+ - bootstrap.memory_lock=true
+ - discovery.type=single-node
+ - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
+ ulimits:
+ memlock:
+ soft: -1
+ hard: -1
+ nofile: 65536
+ healthcheck:
+ test: ["CMD", "curl", "-f", "http://localhost:9200/_cluster/health?wait_for_status=yellow&timeout=5s" ]
+ interval: 5s
+ timeout: 3s
+ retries: 10
+ networks:
+ - storage
+
+ gateway_primary:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-gateway:${APIM_VERSION:-latest}
+ container_name: gio_apim_gateway_primary
+ restart: always
+ ports:
+ - "8082:8082"
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ elasticsearch:
+ condition: service_healthy
+ volumes:
+ - apim-gateway-logs:/opt/graviteeio-gateway/logs
+ - ./.license:/opt/graviteeio-gateway/license
+ - ./.config:/opt/graviteeio-gateway/config
+ environment:
+ - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200
+ - "license.key=${LICENSE_KEY}"
+ networks:
+ - storage
+ - frontend
+
+
+ gateway_secondary:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-gateway:${APIM_VERSION:-latest}
+ container_name: gio_apim_gateway_secondary
+ restart: always
+ ports:
+ - "8081:8082"
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ elasticsearch:
+ condition: service_healthy
+ volumes:
+ - apim-gateway-secondary-logs:/opt/graviteeio-gateway/logs
+ - ./.license:/opt/graviteeio-gateway/license
+ - ./.config:/opt/graviteeio-gateway/config
+ environment:
+ - gravitee_reporters_elasticsearch_endpoints_0=http://elasticsearch:9200
+ - "license.key=${LICENSE_KEY}"
+ networks:
+ - storage
+ - frontend
+
+ management_api:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-management-api:${APIM_VERSION:-latest}
+ container_name: gio_apim_management_api
+ restart: always
+ ports:
+ - "8083:8083"
+ links:
+ - mongodb
+ - elasticsearch
+ depends_on:
+ mongodb:
+ condition: service_healthy
+ elasticsearch:
+ condition: service_healthy
+ volumes:
+ - apim-management-api-logs:/opt/graviteeio-management-api/logs
+ - ./.license:/opt/graviteeio-management-api/license
+ environment:
+ - gravitee_management_mongodb_uri=mongodb://mongodb:27017/gravitee?serverSelectionTimeoutMS=5000&connectTimeoutMS=5000&socketTimeoutMS=5000
+ - gravitee_analytics_elasticsearch_endpoints_0=http://elasticsearch:9200
+ - "license.key=${LICENSE_KEY}"
+ networks:
+ - storage
+ - frontend
+ - email
+
+ management_ui:
+ image: ${APIM_REGISTRY:-graviteeio}/apim-management-ui:${APIM_VERSION:-latest}
+ container_name: gio_apim_management_ui
+ restart: always
+ ports:
+ - "8084:8080"
+ depends_on:
+ - management_api
+ environment:
+ - MGMT_API_URL=http://localhost:8083/management/
+ #- DEFAULT_PORTAL=next # Uncomment to set the portal-next as the default portal for management UI environment redirection.
+ volumes:
+ - apim-management-ui-logs:/var/log/nginx
+ networks:
+ - frontend
\ No newline at end of file
diff --git a/docker/quick-setup/distributed-sync/test-master-crash-new-master.sh b/docker/quick-setup/distributed-sync/test-master-crash-new-master.sh
new file mode 100755
index 00000000000..2671ef35606
--- /dev/null
+++ b/docker/quick-setup/distributed-sync/test-master-crash-new-master.sh
@@ -0,0 +1,374 @@
+#!/bin/bash
+#
+# Copyright © 2015 The Gravitee team (http://gravitee.io)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+# Exit immediately if a command exits with a non-zero status.
+set -e
+
+# --- Configuration ---
+export APIM_REGISTRY=${APIM_REGISTRY:-graviteeio.azurecr.io}
+export APIM_VERSION=${APIM_VERSION:-master-latest}
+
+PRIMARY_GW_CONTAINER_NAME="gio_apim_gateway_primary"
+SECONDARY_GW_CONTAINER_NAME="gio_apim_gateway_secondary"
+REDIS_CONTAINER_NAME="redis_stack"
+MONGO_CONTAINER_NAME="gio_apim_mongodb"
+MGMT_API_CONTAINER_NAME="gio_apim_management_api"
+ELASTIC_CONTAINER_NAME="gio_apim_elasticsearch"
+
+REDIS_SERVICE_NAME="redis-stack"
+MONGO_SERVICE_NAME="mongodb"
+ELASTIC_SERVICE_NAME="elasticsearch"
+PRIMARY_GW_SERVICE_NAME="gateway_primary"
+SECONDARY_GW_SERVICE_NAME="gateway_secondary"
+MGMT_API_SERVICE_NAME="management_api"
+
+# --- API Details ---
+API_CONTEXT_PATH="testDSPWithoutDBSync"
+API_NAME="testDSPWithoutDBSync"
+# -------------------
+
+PRIMARY_GW_PORT="8082"
+SECONDARY_GW_PORT="8081"
+MGMT_API_URL="http://localhost:8083"
+MGMT_API_USER="admin"
+MGMT_API_PASS="admin"
+
+WAIT_TIME_INITIAL=60
+WAIT_TIME_SECONDARY=30
+
+# --- Helper Functions ---
+log_info() {
+ echo
+ echo "🔵 [INFO] $(date '+%Y-%m-%d %H:%M:%S') $1"
+}
+
+log_success() {
+ echo "✅ [SUCCESS] $(date '+%Y-%m-%d %H:%M:%S') $1"
+}
+
+log_error() {
+ echo "❌ [ERROR] $(date '+%Y-%m-%d %H:%M:%S') $1" >&2
+ echo "--- Last 20 lines of ${PRIMARY_GW_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${PRIMARY_GW_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${PRIMARY_GW_CONTAINER_NAME}"
+ echo "--- Last 20 lines of ${SECONDARY_GW_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${SECONDARY_GW_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${SECONDARY_GW_CONTAINER_NAME}"
+ exit 1
+}
+
+cleanup() {
+ log_info "Cleaning up Docker environment..."
+ if docker compose ps -q &> /dev/null; then
+ docker compose down -v --remove-orphans
+ else
+ log_info "No active docker compose project found to clean up."
+ fi
+ log_info "Cleanup complete."
+}
+
+# --- Main Test ---
+trap cleanup EXIT
+
+echo
+log_info "Test that a new master must be elected if the master node of the cluster crashes."
+echo
+
+log_info "Starting Primary Gateway and Core Services..."
+export APIM_REGISTRY=${APIM_REGISTRY} && export APIM_VERSION=${APIM_VERSION} && docker compose up -d ${REDIS_SERVICE_NAME} ${MONGO_SERVICE_NAME} ${ELASTIC_SERVICE_NAME} ${PRIMARY_GW_SERVICE_NAME} ${MGMT_API_SERVICE_NAME}
+
+log_info "Waiting ${WAIT_TIME_INITIAL}s for services to initialize..."
+sleep $WAIT_TIME_INITIAL
+
+log_info "Now starting secondary gateway"
+export APIM_REGISTRY=${APIM_REGISTRY} && export APIM_VERSION=${APIM_VERSION} && docker compose up -d ${SECONDARY_GW_SERVICE_NAME}
+
+log_info "Waiting ${WAIT_TIME_INITIAL}s for secondary gateway to start"
+sleep $WAIT_TIME_INITIAL
+
+log_info "Verifying initial containers started..."
+
+# Check Management API is running via docker ps
+docker compose ps --filter "name=${MGMT_API_CONTAINER_NAME}" --filter "status=running" | grep -q $MGMT_API_CONTAINER_NAME || log_error "${MGMT_API_CONTAINER_NAME} container failed to start."
+
+if docker logs $MGMT_API_CONTAINER_NAME | grep -m 1 -q "Started oejs.Server"; then
+ log_success "Management API reported ready."
+else
+ log_error "Management API (${MGMT_API_CONTAINER_NAME}) having errors."
+fi
+
+# Check Primary Gateway is running via docker ps
+docker compose ps --filter "name=${PRIMARY_GW_CONTAINER_NAME}" --filter "status=running" | grep -q $PRIMARY_GW_CONTAINER_NAME || log_error "${PRIMARY_GW_CONTAINER_NAME} container failed to start."
+
+# Check Secondary Gateway is running via docker ps
+docker compose ps --filter "name=${SECONDARY_GW_CONTAINER_NAME}" --filter "status=running" | grep -q $SECONDARY_GW_CONTAINER_NAME || log_error "${SECONDARY_GW_CONTAINER_NAME} container failed to start."
+
+PRIMARY_GW_LOG_OUTPUT=$(docker logs $PRIMARY_GW_CONTAINER_NAME)
+SECONDARY_GW_LOG_OUTPUT=$(docker logs $SECONDARY_GW_CONTAINER_NAME)
+
+echo
+if echo "$PRIMARY_GW_LOG_OUTPUT" | grep -q "Sync service has been scheduled with delay"; then
+log_success "Primary Gateway started"; fi
+echo
+if echo "$SECONDARY_GW_LOG_OUTPUT" | grep -q "Sync service has been scheduled with delay"; then
+log_success "Secondary Gateway started"; fi
+echo
+
+log_info "Printing last few log lines from Primary Gateway Server (${PRIMARY_GW_CONTAINER_NAME})..."
+echo "-----------------------------------------------------"
+echo "$PRIMARY_GW_LOG_OUTPUT" | tail -n 20
+echo "-----------------------------------------------------"
+echo
+log_info "Printing last few log lines from Secondary Gateway Server (${PRIMARY_GW_CONTAINER_NAME})..."
+echo "-----------------------------------------------------"
+echo "$SECONDARY_GW_LOG_OUTPUT" | tail -n 20
+echo "-----------------------------------------------------"
+echo
+
+
+if echo "$PRIMARY_GW_LOG_OUTPUT" | grep -q "A node joined the cluster"; then
+log_success "Primary Gateway is the master gateway. Cluster is formed as confirmed via logs of the container."; fi
+
+# --- Automated API Deployment using Import ---
+log_info "Importing API definition for '${API_NAME}' with context path '/${API_CONTEXT_PATH}'..."
+echo
+AUTH_HEADER=$(echo -n "${MGMT_API_USER}:${MGMT_API_PASS}" | base64)
+
+IMPORT_PAYLOAD=$(cat <&2
+ echo "--- Last 20 lines of ${PRIMARY_GW_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${PRIMARY_GW_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${PRIMARY_GW_CONTAINER_NAME}"
+ echo "--- Last 20 lines of ${SECONDARY_GW_CONTAINER_NAME} ---"
+ docker logs --tail 20 ${SECONDARY_GW_CONTAINER_NAME} 2>/dev/null || echo "Could not get logs for ${SECONDARY_GW_CONTAINER_NAME}"
+ exit 1
+}
+
+cleanup() {
+ log_info "Cleaning up Docker environment..."
+ if docker compose ps -q &> /dev/null; then
+ docker compose down -v --remove-orphans
+ else
+ log_info "No active docker compose project found to clean up."
+ fi
+ log_info "Cleanup complete."
+}
+
+# --- Main Test ---
+trap cleanup EXIT
+
+echo
+log_info "Test that a new gateway must sync without access to DB"
+echo
+
+log_info "Starting Primary Gateway and Core Services..."
+export APIM_REGISTRY=${APIM_REGISTRY} && export APIM_VERSION=${APIM_VERSION} && docker compose up -d ${REDIS_SERVICE_NAME} ${MONGO_SERVICE_NAME} ${ELASTIC_SERVICE_NAME} ${PRIMARY_GW_SERVICE_NAME} ${MGMT_API_SERVICE_NAME}
+
+log_info "Waiting ${WAIT_TIME_INITIAL}s for services (especially Management API & Primary GW) to initialize..."
+sleep $WAIT_TIME_INITIAL
+
+log_info "Verifying initial containers started..."
+
+# Check Management API is running via docker ps
+docker compose ps --filter "name=${MGMT_API_CONTAINER_NAME}" --filter "status=running" | grep -q $MGMT_API_CONTAINER_NAME || log_error "${MGMT_API_CONTAINER_NAME} container failed to start."
+
+if docker logs $MGMT_API_CONTAINER_NAME | grep -m 1 -q "Started oejs.Server"; then
+ log_success "Management API reported ready."
+else
+ log_error "Management API (${MGMT_API_CONTAINER_NAME}) did not log 'Started oejs.Server' within the timeout."
+fi
+
+
+# Check Primary Gateway is running via docker ps
+docker compose ps --filter "name=${PRIMARY_GW_CONTAINER_NAME}" --filter "status=running" | grep -q $PRIMARY_GW_CONTAINER_NAME || log_error "${PRIMARY_GW_CONTAINER_NAME} container failed to start."
+if docker logs $PRIMARY_GW_CONTAINER_NAME | grep -m 1 -q "Sync service has been scheduled"; then
+ log_success "Primary Gateway appears ready."
+else
+ log_error "Primary Gateway (${PRIMARY_GW_CONTAINER_NAME}) did not log 'Sync service has been scheduled'."
+fi
+echo
+
+# --- Automated API Deployment using Import ---
+log_info "Importing API definition for '${API_NAME}' with context path '/${API_CONTEXT_PATH}'..."
+echo
+AUTH_HEADER=$(echo -n "${MGMT_API_USER}:${MGMT_API_PASS}" | base64)
+
+IMPORT_PAYLOAD=$(cat <