diff --git a/.github/actions/setup_databend_cluster/action.yml b/.github/actions/setup_databend_cluster/action.yml
deleted file mode 100644
index b4890ab0..00000000
--- a/.github/actions/setup_databend_cluster/action.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-name: "Setup Stateful Cluster Linux"
-description: "Running stateful tests in cluster mode"
-inputs:
- version:
- description: "query and meta service version"
- required: true
- default: "1.2.710-nightly"
- target:
- description: ""
- required: true
- default: "x86_64-unknown-linux-gnu"
-runs:
- using: "composite"
- steps:
-
- - name: Minio Setup for (ubuntu-latest only)
- shell: bash
- run: |
- docker run -d --network host --name minio \
- -e "MINIO_ACCESS_KEY=minioadmin" \
- -e "MINIO_SECRET_KEY=minioadmin" \
- -e "MINIO_ADDRESS=:9900" \
- -v /tmp/data:/data \
- -v /tmp/config:/root/.minio \
- minio/minio server /data
-
- export AWS_ACCESS_KEY_ID=minioadmin
- export AWS_SECRET_ACCESS_KEY=minioadmin
- export AWS_EC2_METADATA_DISABLED=true
-
- aws --endpoint-url http://127.0.0.1:9900/ s3 mb s3://testbucket
-
- - name: Start Nginx
- shell: bash
- run: |
- docker run -d --network host --name nginx-lb \
- -v ${{ github.workspace }}/scripts/ci/nginx_rr.conf:/etc/nginx/nginx.conf:ro \
- nginx
-
- - name: Download binary and extract into target directory
- shell: bash
- run: |
- wget --progress=bar:force:noscroll https://github.com/datafuselabs/databend/releases/download/v${{ inputs.version }}/databend-v${{ inputs.version }}-${{ inputs.target }}.tar.gz
- mkdir -p ./databend
- tar -xzvf databend-v${{ inputs.version }}-${{ inputs.target }}.tar.gz -C ./databend
- rm databend-v${{ inputs.version }}-${{ inputs.target }}.tar.gz
-
- - name: Start Databend Cluster
- shell: bash
- run: |
- chmod +x ./databend/bin/databend-meta
- chmod +x ./databend/bin/databend-query
- chmod +x ./scripts/wait_tcp.py
- chmod +x ./scripts/deploy/deploy_cluster.sh
- ./scripts/deploy/deploy_cluster.sh
diff --git a/.github/workflows/cron.integration.yml b/.github/workflows/cron.integration.yml
index c4560cb4..4f4de483 100644
--- a/.github/workflows/cron.integration.yml
+++ b/.github/workflows/cron.integration.yml
@@ -17,21 +17,8 @@ concurrency:
jobs:
test:
runs-on: ubuntu-latest
- services:
- databend:
- image: datafuselabs/databend:nightly
- env:
- QUERY_DEFAULT_USER: databend
- QUERY_DEFAULT_PASSWORD: databend
- MINIO_ENABLED: true
- ports:
- - 8000:8000
- - 9000:9000
steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- ref: ${{ github.ref }}
+ - uses: actions/checkout@v4
- name: Set up JDK 17
uses: actions/setup-java@v4
@@ -42,17 +29,16 @@ jobs:
gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }} # Value of the GPG private key to import
gpg-passphrase: MAVEN_GPG_PASSPHRASE # env variable for GPG private key passphrase
- - name: Verify Service Running
- run: |
- sleep 30
- cid=$(docker ps -a | grep databend | cut -d' ' -f1)
- docker logs ${cid}
- curl -u databend:databend --request POST localhost:8000/v1/query --header 'Content-Type:application/json' --data-raw '{"sql":"select 1"}'
+ - name: Start Cluster With Nginx and Minio
+ working-directory: tests
+ run: make up
- - name: Run Maven clean deploy with release profile
- run: mvn test -DexcludedGroups=cluster,FLAKY
+ - name: Test with conn to nginx
+ run: mvn test -DexcludedGroups=FLAKY
env:
MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
+ DATABEND_TEST_CONN_PORT: 8000
+
notify:
if: failure()
needs: [ test ]
diff --git a/.github/workflows/test_cluster.yml b/.github/workflows/test_cluster.yml
index 780c4453..a47b6abf 100644
--- a/.github/workflows/test_cluster.yml
+++ b/.github/workflows/test_cluster.yml
@@ -1,23 +1,24 @@
-name: Databend Cluster Tests
+name: Cluster Tests
on:
push:
branches:
- main
- - master
pull_request:
branches:
- main
- - master
jobs:
test:
runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ version:
+ - "nightly"
+ - "v1.2.790-nightly"
steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- ref: ${{ github.ref }}
+ - uses: actions/checkout@v4
- name: Set up JDK 17
uses: actions/setup-java@v4
@@ -28,33 +29,15 @@ jobs:
gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }} # Value of the GPG private key to import
gpg-passphrase: MAVEN_GPG_PASSPHRASE # env variable for GPG private key passphrase
- - uses: ./.github/actions/setup_databend_cluster
- timeout-minutes: 15
- with:
- version: '1.2.754-nightly'
- target: 'x86_64-unknown-linux-gnu'
-
- - name: Test with conn to node 1
- run: mvn test -DexcludedGroups=FLAKY
- env:
- MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
-
- - name: View Nginx logs
- run: docker logs nginx-lb
-
- - name: check nginx
- run: |
- curl -u 'databend:databend' -X POST "http://localhost:8010/v1/query" \
- -H 'Content-Type: application/json' \
- -d '{"sql": "select 1", "pagination": { "wait_time_secs": 5 }}' || true
+ - name: Start Cluster With Nginx and Minio
+ working-directory: tests
+ run: make up
env:
- MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
-
- - name: View Nginx logs
- run: docker logs nginx-lb
+ DATABEND_QUERY_VERSION: ${{ matrix.version }}
- name: Test with conn to nginx
run: mvn test -DexcludedGroups=FLAKY
env:
MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
- DATABEND_TEST_CONN_PORT: 8010
+ DATABEND_TEST_CONN_PORT: 8000
+ DATABEND_QUERY_VERSION: ${{ matrix.version }}
diff --git a/.github/workflows/test_compatibility.yml b/.github/workflows/test_compatibility.yml
new file mode 100644
index 00000000..f8d8ab95
--- /dev/null
+++ b/.github/workflows/test_compatibility.yml
@@ -0,0 +1,54 @@
+name: Compatibility Tests
+
+on:
+ push:
+ branches:
+ - main
+ pull_request:
+ branches:
+ - main
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ server:
+ - "nightly"
+ - "v1.2.790-nightly"
+ driver:
+ - "0.4.0"
+ - "0.3.9"
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Set up JDK 17
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '17'
+ cache: 'maven'
+ gpg-private-key: ${{ secrets.GPG_PRIVATE_KEY }} # Value of the GPG private key to import
+ gpg-passphrase: MAVEN_GPG_PASSPHRASE # env variable for GPG private key passphrase
+
+ - name: Start Cluster With Nginx and Minio
+ working-directory: tests
+ run: make up
+ env:
+ DATABEND_QUERY_VERSION: ${{ matrix.server }}
+
+ - name: Test with conn to nginx
+ run: mvn clean package -DskipTests
+ env:
+ MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
+
+ - name: Test with conn to nginx
+ working-directory: tests/compatibility
+ run: sh test_compatibility.sh
+ env:
+ MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
+ DATABEND_TEST_CONN_PORT: 8000
+ DATABEND_QUERY_VERSION: ${{ matrix.server }}
+ DATABEND_JDBC_VERSION: ${{ matrix.driver }}
+ TEST_SIDE: "driver"
diff --git a/.github/workflows/test.yml b/.github/workflows/test_standalone.yml
similarity index 95%
rename from .github/workflows/test.yml
rename to .github/workflows/test_standalone.yml
index 71f34255..179e6fb9 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test_standalone.yml
@@ -1,4 +1,4 @@
-name: Tests
+name: Standalone Test
on:
push:
@@ -51,6 +51,6 @@ jobs:
curl -u databend:databend --request POST localhost:8000/v1/query --header 'Content-Type:application/json' --data-raw '{"sql":"select 1"}'
- name: Run Maven clean deploy with release profile
- run: mvn test -DexcludedGroups=CLUSTER,FLAKY
+ run: mvn test -DexcludedGroups=MULTI_HOST,FLAKY
env:
MAVEN_GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
diff --git a/.gitignore b/.gitignore
index 02b0ef86..676fbe0b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,3 +6,6 @@ databend-jdbc/databend-jdbc-debug.log
target/
/databend/
.databend/
+tests/data
+tests/compatibility/*.jar
+test-output
diff --git a/README.md b/README.md
index 18cdc343..3584bd8d 100644
--- a/README.md
+++ b/README.md
@@ -50,25 +50,28 @@ import java.sql.ResultSet;
public class Main {
public static void main(String[] args) throws SQLException {
- Connection conn = DriverManager.getConnection("jdbc:databend://localhost:8000", "root", "");
- Statement statement = conn.createStatement();
- statement.execute("SELECT number from numbers(200000) order by number");
- ResultSet r = statement.getResultSet();
- // ** We must call `rs.next()` otherwise the query may be canceled **
- while (rs.next()) {
- System.out.println(r.getInt(1));
+ try ( Connection conn = DriverManager.getConnection("jdbc:databend://localhost:8000", "root", "");
+ Statement statement = conn.createStatement()
+ ) {
+ statement.execute("SELECT number from numbers(200000) order by number");
+ try(ResultSet r = statement.getResultSet()){
+ // ** We must call `rs.next()` otherwise the query may be canceled **
+ while (rs.next()) {
+ System.out.println(r.getInt(1));
+ }
+ }
}
- conn.close();
}
}
```
### Important Notes
-1. Because the `select`, `copy into`, `merge into` are query type SQL, they will return a `ResultSet` object, you must
+1. Close Connection/Statement/ResultSet to release resources faster.
+2. Because the `select`, `copy into`, `merge into` are query type SQL, they will return a `ResultSet` object, you must
call `rs.next()` before accessing the data. Otherwise, the query may be canceled. If you do not want get the result,
you can call `while(r.next(){})` to iterate over the result set.
-2. For other SQL such as `create/drop table` non-query type SQL, you can call `statement.execute()` directly.
+3. For other SQL such as `create/drop table` non-query type SQL, you can call `statement.execute()` directly.
## JDBC Java type mapping
The Databend type is mapped to Java type as follows:
@@ -99,3 +102,84 @@ For detailed references, please take a look at the following Links:
1. [Connection Parameters](./docs/Connection.md) : detailed documentation about how to use connection parameters in a
jdbc connection
+
+
+# FileTransfer API
+
+The `FileTransferAPI` interface provides a high-performance, Java-based mechanism for streaming data directly between your application and Databend's internal stage, eliminating the need for intermediate local files. It is designed for efficient bulk data operations.
+
+## Key Features
+
+* **Streaming Upload/Download:** Directly transfer data using `InputStream`, supporting large files without excessive memory consumption
+* **Direct Table Loading:** Ingest data from streams or staged files directly into Databend tables using the `COPY INTO` command
+* **Compression:** Supports on-the-fly compression and decompression during transfer to optimize network traffic
+* **Flexible Data Ingestion:** Offers both stage-based and streaming-based methods for loading data into tables
+
+## Core Methods
+
+### `uploadStream`
+Uploads a data stream as a single file to the specified internal stage.
+
+**Parameters:**
+- `stageName`: The stage which will receive the uploaded file
+- `destPrefix`: The prefix of the file name in the stage
+- `inputStream`: The input stream of the file data
+- `destFileName`: The destination file name in the stage
+- `fileSize`: The size of the file being uploaded
+- `compressData`: Whether to compress the data during transfer
+
+### `downloadStream`
+Downloads a file from the internal stage and returns it as an `InputStream`.
+
+**Parameters:**
+- `stageName`: The stage which contains the file to download
+- `sourceFileName`: The name of the file in the stage
+- `decompress`: Whether to decompress the data during download
+
+**Returns:** `InputStream` of the downloaded file content
+
+
+### `loadStreamToTable`
+A versatile method to load data from a stream directly into a table, using either a staging or streaming approach.
+
+Available with databend-jdbc >= 0.4 AND databend-query >= 1.2.791.
+
+**Parameters:**
+- `sql`: SQL statement with specific syntax for data loading
+- `inputStream`: The input stream of the file data to load
+- `fileSize`: The size of the file being loaded
+- `loadMethod`: The loading method - "stage" or "streaming". `stage` method first upload file to a special path in user stage, while `steaming` method load data to while transforming data.
+
+**Returns:** Number of rows successfully loaded
+
+## Quick Start
+
+The following example demonstrates how to upload data and load it into a table:
+
+```java
+// 1. Upload a file to the internal stage
+Connection conn = DriverManager.getConnection("jdbc:databend://localhost:8000");
+FileTransferAPI api = conn.unwrap(DatabendConnection.class);
+
+FileInputStream fileStream = new FileInputStream("data.csv");
+api.uploadStream(
+ "my_stage",
+ "uploads/",
+ fileStream,
+ "data.csv",
+ Files.size(Paths.get("data.csv")),
+ true // Compress the data during upload
+);
+fileStream.close();
+
+// 2. Load the staged file into a table
+FileInputStream fileStream = new FileInputStream("data.csv");
+String sql = "insert into my_table from @_databend_load file_format=(type=csv)"; // use special stage `_databend_load
+api.loadStreamToTable(sql, file_stream, Files.size(Paths.get("data.csv")), "stage");
+fileStream.close();
+conn.close())
+
+
+```
+
+> **Important:** Callers are responsible for properly closing the provided `InputStream` objects after operations are complete.
diff --git a/databend-client/pom.xml b/databend-client/pom.xml
index 7c827b3a..80100a9f 100644
--- a/databend-client/pom.xml
+++ b/databend-client/pom.xml
@@ -18,25 +18,21 @@
${project.parent.basedir}
8
- 2.15.2
com.fasterxml.jackson.core
jackson-annotations
- ${jackson.version}
com.fasterxml.jackson.core
jackson-core
- ${jackson.version}
com.fasterxml.jackson.core
jackson-databind
- ${jackson.version}
@@ -53,14 +49,13 @@
com.google.guava
guava
- 32.0.1-jre
com.squareup.okhttp3
okhttp
-
+
com.squareup.okio
okio
@@ -76,18 +71,8 @@
okhttp-urlconnection
-
- com.github.zafarkhaja
- java-semver
-
-
-
- io.airlift
- json
- test
-
org.testng
testng
diff --git a/databend-client/src/main/java/com/databend/client/ClientSettings.java b/databend-client/src/main/java/com/databend/client/ClientSettings.java
index f9297bbc..3b6ef4a8 100644
--- a/databend-client/src/main/java/com/databend/client/ClientSettings.java
+++ b/databend-client/src/main/java/com/databend/client/ClientSettings.java
@@ -31,6 +31,8 @@ public class ClientSettings {
public static final String X_DATABEND_STICKY_NODE = "X-DATABEND-STICKY-NODE";
public static final String DatabendWarehouseHeader = "X-DATABEND-WAREHOUSE";
public static final String DatabendTenantHeader = "X-DATABEND-TENANT";
+ public static final String DatabendSQLHeader = "X-DATABEND-SQL";
+ public static final String DatabendQueryContextHeader = "X-DATABEND-QUERY-CONTEXT";
private final String host;
private final DatabendSession session;
private final Integer queryTimeoutSecs;
@@ -40,14 +42,14 @@ public class ClientSettings {
private final PaginationOptions paginationOptions;
private final StageAttachment stageAttachment;
- private Map additionalHeaders;
+ private final Map additionalHeaders;
private final int retryAttempts;
// TODO(zhihanz) timezone and locale info
//ClientSettings for test case use
public ClientSettings(String host) {
- this(host, DatabendSession.createDefault(), DEFAULT_QUERY_TIMEOUT, DEFAULT_CONNECTION_TIMEOUT, DEFAULT_SOCKET_TIMEOUT, PaginationOptions.defaultPaginationOptions(), new HashMap(), null, DEFAULT_RETRY_ATTEMPTS);
+ this(host, DatabendSession.createDefault(), DEFAULT_QUERY_TIMEOUT, DEFAULT_CONNECTION_TIMEOUT, DEFAULT_SOCKET_TIMEOUT, PaginationOptions.defaultPaginationOptions(), new HashMap<>(), null, DEFAULT_RETRY_ATTEMPTS);
}
public ClientSettings(String host, String database) {
diff --git a/databend-client/src/main/java/com/databend/client/DatabendClient.java b/databend-client/src/main/java/com/databend/client/DatabendClient.java
index a265c3b1..01a1e476 100644
--- a/databend-client/src/main/java/com/databend/client/DatabendClient.java
+++ b/databend-client/src/main/java/com/databend/client/DatabendClient.java
@@ -29,8 +29,6 @@ public interface DatabendClient extends Closeable {
String getNodeID();
- String getServerVersion();
-
Map getAdditionalHeaders();
diff --git a/databend-client/src/main/java/com/databend/client/DatabendClientFactory.java b/databend-client/src/main/java/com/databend/client/DatabendClientFactory.java
deleted file mode 100644
index 7750bc3b..00000000
--- a/databend-client/src/main/java/com/databend/client/DatabendClientFactory.java
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.databend.client;
-
-public final class DatabendClientFactory {
- private DatabendClientFactory() {
- }
-
-}
diff --git a/databend-client/src/main/java/com/databend/client/DatabendClientV1.java b/databend-client/src/main/java/com/databend/client/DatabendClientV1.java
index 85f5e770..2f97bc02 100644
--- a/databend-client/src/main/java/com/databend/client/DatabendClientV1.java
+++ b/databend-client/src/main/java/com/databend/client/DatabendClientV1.java
@@ -16,7 +16,6 @@
import com.databend.client.errors.CloudErrors;
import okhttp3.*;
-import okio.Buffer;
import javax.annotation.concurrent.ThreadSafe;
import java.io.IOException;
@@ -50,10 +49,6 @@ public class DatabendClientV1
public static final MediaType MEDIA_TYPE_JSON = MediaType.parse("application/json; charset=utf-8");
public static final JsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class);
public static final JsonCodec DISCOVERY_RESULT_CODEC = jsonCodec(DiscoveryResponseCodec.DiscoveryResponse.class);
- public static final String succeededState = "succeeded";
- public static final String failedState = "failed";
- public static final String runningState = "running";
-
public static final String QUERY_PATH = "/v1/query";
public static final String DISCOVERY_PATH = "/v1/discovery_nodes";
@@ -66,15 +61,14 @@ public class DatabendClientV1
private final PaginationOptions paginationOptions;
// request with retry timeout
private final Integer requestTimeoutSecs;
- private final Map additonalHeaders;
- private String serverVersion;
+ private final Map additionalHeaders;
// client session
private final AtomicReference databendSession;
private String nodeID;
private final AtomicReference currentResults = new AtomicReference<>(null);
private static final Logger logger = Logger.getLogger(DatabendClientV1.class.getPackage().getName());
- private Consumer on_session_state_update;
+ private final Consumer on_session_state_update;
public DatabendClientV1(OkHttpClient httpClient, String sql, ClientSettings settings,
Consumer on_session_state_update,
@@ -89,7 +83,7 @@ public DatabendClientV1(OkHttpClient httpClient, String sql, ClientSettings sett
this.host = settings.getHost();
this.paginationOptions = settings.getPaginationOptions();
this.requestTimeoutSecs = settings.getQueryTimeoutSecs();
- this.additonalHeaders = settings.getAdditionalHeaders();
+ this.additionalHeaders = settings.getAdditionalHeaders();
this.maxRetryAttempts = settings.getRetryAttempts();
this.databendSession = new AtomicReference<>(settings.getSession());
this.nodeID = last_node_id.get();
@@ -126,7 +120,7 @@ public static Request.Builder prepareRequest(HttpUrl url, Map ad
}
private Request buildQueryRequest(String query, ClientSettings settings) {
- HttpUrl url = HttpUrl.get(settings.getHost());
+ HttpUrl url = HttpUrl.parse(settings.getHost());
if (url == null) {
// TODO(zhihanz) use custom exception
throw new IllegalArgumentException("Invalid host: " + settings.getHost());
@@ -139,7 +133,7 @@ private Request buildQueryRequest(String query, ClientSettings settings) {
}
url = url.newBuilder().encodedPath(QUERY_PATH).build();
- Request.Builder builder = prepareRequest(url, this.additonalHeaders);
+ Request.Builder builder = prepareRequest(url, this.additionalHeaders);
DatabendSession session = databendSession.get();
if (session != null && session.getNeedSticky()) {
builder.addHeader(ClientSettings.X_DATABEND_STICKY_NODE, nodeID);
@@ -149,10 +143,6 @@ private Request buildQueryRequest(String query, ClientSettings settings) {
private static Request buildDiscoveryRequest(ClientSettings settings) {
HttpUrl url = HttpUrl.get(settings.getHost());
- if (url == null) {
- // TODO(zhihanz) use custom exception
- throw new IllegalArgumentException("Invalid host: " + settings.getHost());
- }
String discoveryPath = DISCOVERY_PATH;
// intentionally use unsupported discovery path for testing
if (settings.getAdditionalHeaders().get("~mock.unsupported.discovery") != null && BOOLEAN_TRUE_STR.equals(settings.getAdditionalHeaders().get("~mock.unsupported.discovery"))) {
@@ -313,19 +303,6 @@ private boolean executeInternal(Request request, OptionalLong materializedJsonSi
}
}
- private String requestBodyToString(Request request) {
- try {
- final Request copy = request.newBuilder().build();
- final Buffer buffer = new Buffer();
- if (copy.body() != null) {
- copy.body().writeTo(buffer);
- }
- return buffer.readUtf8();
- } catch (final IOException e) {
- return "did not work";
- }
- }
-
@Override
public boolean execute(Request request) {
return executeInternal(request, OptionalLong.empty());
@@ -340,20 +317,13 @@ private void processResponse(Headers headers, QueryResults results) {
this.on_session_state_update.accept(session);
}
}
- if (results.getQueryId() != null && this.additonalHeaders.get(ClientSettings.X_Databend_Query_ID) == null) {
- this.additonalHeaders.put(ClientSettings.X_Databend_Query_ID, results.getQueryId());
+ if (results.getQueryId() != null && this.additionalHeaders.get(ClientSettings.X_Databend_Query_ID) == null) {
+ this.additionalHeaders.put(ClientSettings.X_Databend_Query_ID, results.getQueryId());
}
if (headers != null) {
- String serverVersionString = headers.get(ClientSettings.X_DATABEND_VERSION);
- if (serverVersionString != null) {
- try {
- serverVersion = serverVersionString;
- } catch (Exception ignored) {
- }
- }
String route_hint = headers.get(ClientSettings.X_DATABEND_ROUTE_HINT);
if (route_hint != null) {
- this.additonalHeaders.put(ClientSettings.X_DATABEND_ROUTE_HINT, route_hint);
+ this.additionalHeaders.put(ClientSettings.X_DATABEND_ROUTE_HINT, route_hint);
}
}
currentResults.set(results);
@@ -375,7 +345,7 @@ public boolean advance() {
String nextUriPath = this.currentResults.get().getNextUri().toString();
HttpUrl url = HttpUrl.get(this.host);
url = url.newBuilder().encodedPath(nextUriPath).build();
- Request.Builder builder = prepareRequest(url, this.additonalHeaders);
+ Request.Builder builder = prepareRequest(url, this.additionalHeaders);
builder.addHeader(ClientSettings.X_DATABEND_STICKY_NODE, this.nodeID);
Request request = builder.get().build();
return executeInternal(request, OptionalLong.of(MAX_MATERIALIZED_JSON_RESPONSE_SIZE));
@@ -388,7 +358,7 @@ public boolean hasNext() {
@Override
public Map getAdditionalHeaders() {
- return additonalHeaders;
+ return additionalHeaders;
}
@Override
@@ -406,12 +376,6 @@ public String getNodeID() {
return this.nodeID;
}
- @Override
- public String getServerVersion() {
- return this.serverVersion;
- }
-
-
@Override
public void close() {
closeQuery();
@@ -432,7 +396,7 @@ private void closeQuery() {
String path = uri.toString();
HttpUrl url = HttpUrl.get(this.host);
url = url.newBuilder().encodedPath(path).build();
- Request r = prepareRequest(url, this.additonalHeaders).get().build();
+ Request r = prepareRequest(url, this.additionalHeaders).get().build();
try {
httpClient.newCall(r).execute().close();
} catch (IOException ignored) {
diff --git a/databend-client/src/main/java/com/databend/client/QuerySchema.java b/databend-client/src/main/java/com/databend/client/QuerySchema.java
deleted file mode 100644
index cc8ef013..00000000
--- a/databend-client/src/main/java/com/databend/client/QuerySchema.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.databend.client;
-
-// "schema":[{"name":"column1","type":"UInt8"}]
-// "schema":[{"name":"column2","type":"Nullable(UInt8)"}]
-//public class QuerySchema {
-//
-// @JsonCreator
-// public QuerySchema(
-// @JsonProperty() List fields) {
-// this.fields = fields;
-// }
-//
-// // add builder
-//
-// @JsonProperty
-// public List getFields() {
-// return fields;
-// }
-//
-//
-// @Override
-// public String toString() {
-// return toStringHelper(this)
-// .add("fields", fields)
-// .toString();
-// }
-//}
diff --git a/databend-client/src/main/java/com/databend/client/ServerInfo.java b/databend-client/src/main/java/com/databend/client/ServerInfo.java
deleted file mode 100644
index 41494356..00000000
--- a/databend-client/src/main/java/com/databend/client/ServerInfo.java
+++ /dev/null
@@ -1,36 +0,0 @@
-package com.databend.client;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-public class ServerInfo {
- private final String id;
- private final String startTime;
-
- @JsonCreator
- public ServerInfo(
- @JsonProperty("id") String id,
- @JsonProperty("start_time") String startTime) {
- this.id = id;
- this.startTime = startTime;
- }
-
- @JsonProperty
- public String getId() {
- return id;
- }
-
- @JsonProperty("start_time")
- public String getStartTime() {
- return startTime;
- }
-
- @JsonProperty
- @Override
- public String toString() {
- return "ServerInfo{" +
- "id='" + id + '\'' +
- ", startTime='" + startTime + '\'' +
- '}';
- }
-}
diff --git a/databend-client/src/main/java/com/databend/client/ServerVersions.java b/databend-client/src/main/java/com/databend/client/ServerVersions.java
deleted file mode 100644
index 295750e9..00000000
--- a/databend-client/src/main/java/com/databend/client/ServerVersions.java
+++ /dev/null
@@ -1,11 +0,0 @@
-package com.databend.client;
-
-import com.github.zafarkhaja.semver.Version;
-
-public class ServerVersions {
- private static final Version HEARTBEAT = Version.forIntegers(1, 2, 709);
-
- public static boolean supportHeartbeat(Version ver) {
- return ver != null && ver.greaterThan(HEARTBEAT);
- }
-}
diff --git a/databend-client/src/main/java/com/databend/client/constant/DatabendConstant.java b/databend-client/src/main/java/com/databend/client/constant/DatabendConstant.java
index 824f098d..037fe0ab 100644
--- a/databend-client/src/main/java/com/databend/client/constant/DatabendConstant.java
+++ b/databend-client/src/main/java/com/databend/client/constant/DatabendConstant.java
@@ -1,3 +1,17 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package com.databend.client.constant;
/**
diff --git a/databend-client/src/main/java/com/databend/client/data/ColumnTypeHandlerBase.java b/databend-client/src/main/java/com/databend/client/data/ColumnTypeHandlerBase.java
index 6a667a21..2731c129 100644
--- a/databend-client/src/main/java/com/databend/client/data/ColumnTypeHandlerBase.java
+++ b/databend-client/src/main/java/com/databend/client/data/ColumnTypeHandlerBase.java
@@ -1,3 +1,17 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package com.databend.client.data;
public abstract class ColumnTypeHandlerBase implements ColumnTypeHandler {
diff --git a/databend-client/src/main/java/com/databend/client/data/DatabendDataType.java b/databend-client/src/main/java/com/databend/client/data/DatabendDataType.java
index ad874568..5b42141a 100644
--- a/databend-client/src/main/java/com/databend/client/data/DatabendDataType.java
+++ b/databend-client/src/main/java/com/databend/client/data/DatabendDataType.java
@@ -1,3 +1,17 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package com.databend.client.data;
diff --git a/databend-client/src/test/java/com/databend/client/TestClientIT.java b/databend-client/src/test/java/com/databend/client/TestClientIT.java
index 47ba2800..43c10f01 100644
--- a/databend-client/src/test/java/com/databend/client/TestClientIT.java
+++ b/databend-client/src/test/java/com/databend/client/TestClientIT.java
@@ -97,7 +97,7 @@ public void testBasicQueryIDHeader() {
Assert.assertEquals(cli1.getAdditionalHeaders().get(X_Databend_Query_ID), expectedUUID1);
}
- @Test(groups = {"it"})
+ @Test(groups = {"IT"})
public void testDiscoverNodes() {
OkHttpClient client = new OkHttpClient.Builder().addInterceptor(OkHttpUtils.basicAuthInterceptor("databend", "databend")).build();
String expectedUUID = UUID.randomUUID().toString().replace("-", "");
diff --git a/databend-client/src/test/java/com/databend/client/TestDiscoveryNodes.java b/databend-client/src/test/java/com/databend/client/TestDiscoveryNodes.java
index cd150ccc..55024254 100644
--- a/databend-client/src/test/java/com/databend/client/TestDiscoveryNodes.java
+++ b/databend-client/src/test/java/com/databend/client/TestDiscoveryNodes.java
@@ -1,3 +1,17 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package com.databend.client;
import com.databend.client.errors.QueryErrors;
diff --git a/databend-client/src/test/java/com/databend/client/TestQueryErrors.java b/databend-client/src/test/java/com/databend/client/TestQueryErrors.java
index c1afa4cf..8561d8fc 100644
--- a/databend-client/src/test/java/com/databend/client/TestQueryErrors.java
+++ b/databend-client/src/test/java/com/databend/client/TestQueryErrors.java
@@ -15,18 +15,21 @@
package com.databend.client;
import com.databend.client.errors.QueryErrors;
-import io.airlift.json.JsonCodec;
+import com.fasterxml.jackson.core.JsonProcessingException;
import org.testng.Assert;
import org.testng.annotations.Test;
-import static io.airlift.json.JsonCodec.jsonCodec;
+import static com.databend.client.JsonCodec.jsonCodec;
+
+
+
@Test(timeOut = 10000)
public class TestQueryErrors
{
private static final JsonCodec QUERY_ERROR_JSON_CODEC = jsonCodec(QueryErrors.class);
@Test( groups = {"unit"} )
- public void testQueryError() {
+ public void testQueryError() throws JsonProcessingException {
String json = "{\"code\": 1000, \"message\": \"test\"}";
Assert.assertEquals(QUERY_ERROR_JSON_CODEC.fromJson(json).getCode(), 1000);
Assert.assertEquals(QUERY_ERROR_JSON_CODEC.fromJson(json).getMessage(), "test");
diff --git a/databend-client/src/test/java/com/databend/client/TestQueryResults.java b/databend-client/src/test/java/com/databend/client/TestQueryResults.java
index ebc1bd4c..4d0e80b0 100644
--- a/databend-client/src/test/java/com/databend/client/TestQueryResults.java
+++ b/databend-client/src/test/java/com/databend/client/TestQueryResults.java
@@ -14,20 +14,22 @@
package com.databend.client;
-import io.airlift.json.JsonCodec;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
import org.testng.Assert;
import org.testng.annotations.Test;
import java.util.List;
-import static io.airlift.json.JsonCodec.jsonCodec;
+import static com.databend.client.JsonCodec.jsonCodec;
+
@Test(timeOut = 10000)
public class TestQueryResults {
private static final JsonCodec QUERY_RESULTS_CODEC = jsonCodec(QueryResults.class);
@Test(groups = {"unit"})
- public void testBasic() {
+ public void testBasic() throws JsonProcessingException {
String goldenValue = "{\"id\":\"5c4e776a-8171-462a-b2d3-6a34823d0552\",\"session_id\":\"3563624b-8767-44ff-a235-3f5bb4e54d03\",\"session\":{},\"schema\":[{\"name\":\"(number / 3)\",\"type\":\"Float64\"},{\"name\":\"(number + 1)\",\"type\":\"UInt64\"}],\"data\":[[\"0.0\",\"1\"],[\"0.3333333333333333\",\"2\"],[\"0.6666666666666666\",\"3\"],[\"1.0\",\"4\"],[\"1.3333333333333333\",\"5\"],[\"1.6666666666666667\",\"6\"],[\"2.0\",\"7\"],[\"2.3333333333333335\",\"8\"],[\"2.6666666666666665\",\"9\"],[\"3.0\",\"10\"]],\"state\":\"Succeeded\",\"error\":null,\"stats\":{\"scan_progress\":{\"rows\":10,\"bytes\":80},\"write_progress\":{\"rows\":0,\"bytes\":0},\"result_progress\":{\"rows\":10,\"bytes\":160},\"running_time_ms\":1.494205},\"affect\":null,\"stats_uri\":\"/v1/query/5c4e776a-8171-462a-b2d3-6a34823d0552\",\"final_uri\":\"/v1/query/5c4e776a-8171-462a-b2d3-6a34823d0552/final\",\"next_uri\":\"/v1/query/5c4e776a-8171-462a-b2d3-6a34823d0552/final\",\"kill_uri\":\"/v1/query/5c4e776a-8171-462a-b2d3-6a34823d0552/kill\"}";
QueryResults queryResults = QUERY_RESULTS_CODEC.fromJson(goldenValue);
Assert.assertEquals(queryResults.getQueryId(), "5c4e776a-8171-462a-b2d3-6a34823d0552");
@@ -44,27 +46,27 @@ public void testBasic() {
}
@Test(groups = "unit")
- public void TestError() {
+ public void TestError() throws JsonProcessingException {
String goldenValue = "{\"id\":\"\",\"session_id\":null,\"session\":null,\"schema\":[],\"data\":[],\"state\":\"Failed\",\"error\":{\"code\":1065,\"message\":\"error: \\n --> SQL:1:8\\n |\\n1 | select error\\n | ^^^^^ column doesn't exist\\n\\n\"},\"stats\":{\"scan_progress\":{\"rows\":0,\"bytes\":0},\"write_progress\":{\"rows\":0,\"bytes\":0},\"result_progress\":{\"rows\":0,\"bytes\":0},\"running_time_ms\":0.0},\"affect\":null,\"stats_uri\":null,\"final_uri\":null,\"next_uri\":null,\"kill_uri\":null}";
QueryResults queryResults = QUERY_RESULTS_CODEC.fromJson(goldenValue);
Assert.assertEquals(queryResults.getQueryId(), "");
- Assert.assertEquals(queryResults.getSessionId(), null);
- Assert.assertEquals(queryResults.getSession(), null);
+ Assert.assertNull(queryResults.getSessionId());
+ Assert.assertNull(queryResults.getSession());
Assert.assertEquals(queryResults.getState(), "Failed");
Assert.assertEquals(queryResults.getError().getCode(), 1065);
- Assert.assertEquals(queryResults.getError().getMessage().contains("error: \n --> SQL:1:8"), true);
+ Assert.assertTrue(queryResults.getError().getMessage().contains("error: \n --> SQL:1:8"));
}
@Test(groups = "unit")
- public void TestDateTime() {
+ public void TestDateTime() throws JsonProcessingException {
String goldenString = "{\"id\":\"1fbbaf5b-8807-47d3-bb9c-122a3b7c527c\",\"session_id\":\"ef4a4a66-7a81-4a90-b6ab-d484313111b8\",\"session\":{},\"schema\":[{\"name\":\"date\",\"type\":\"Date\"},{\"name\":\"ts\",\"type\":\"Timestamp\"}],\"data\":[[\"2022-04-07\",\"2022-04-07 01:01:01.123456\"],[\"2022-04-08\",\"2022-04-08 01:01:01.000000\"],[\"2022-04-07\",\"2022-04-07 01:01:01.123456\"],[\"2022-04-08\",\"2022-04-08 01:01:01.000000\"],[\"2022-04-07\",\"2022-04-07 01:01:01.123456\"],[\"2022-04-08\",\"2022-04-08 01:01:01.000000\"]],\"state\":\"Succeeded\",\"error\":null,\"stats\":{\"scan_progress\":{\"rows\":6,\"bytes\":72},\"write_progress\":{\"rows\":0,\"bytes\":0},\"result_progress\":{\"rows\":6,\"bytes\":72},\"running_time_ms\":7.681399},\"affect\":null,\"stats_uri\":\"/v1/query/1fbbaf5b-8807-47d3-bb9c-122a3b7c527c\",\"final_uri\":\"/v1/query/1fbbaf5b-8807-47d3-bb9c-122a3b7c527c/final\",\"next_uri\":\"/v1/query/1fbbaf5b-8807-47d3-bb9c-122a3b7c527c/final\",\"kill_uri\":\"/v1/query/1fbbaf5b-8807-47d3-bb9c-122a3b7c527c/kill\"}";
QueryResults queryResults = QUERY_RESULTS_CODEC.fromJson(goldenString);
Assert.assertEquals(queryResults.getQueryId(), "1fbbaf5b-8807-47d3-bb9c-122a3b7c527c");
Assert.assertEquals(queryResults.getSessionId(), "ef4a4a66-7a81-4a90-b6ab-d484313111b8");
- Assert.assertEquals(queryResults.getSession().getDatabase(), null);
- Assert.assertEquals(queryResults.getSession().getSettings(), null);
+ Assert.assertNull(queryResults.getSession().getDatabase());
+ Assert.assertNull(queryResults.getSession().getSettings());
Assert.assertEquals(queryResults.getState(), "Succeeded");
- Assert.assertEquals(queryResults.getError(), null);
+ Assert.assertNull(queryResults.getError());
Assert.assertEquals(queryResults.getSchema().size(), 2);
Assert.assertEquals(queryResults.getSchema().get(0).getName(), "date");
Assert.assertEquals(queryResults.getSchema().get(0).getDataType().getType(), "Date");
@@ -77,7 +79,7 @@ public void TestDateTime() {
}
@Test(groups = "unit")
- public void TestUseDB() {
+ public void TestUseDB() throws JsonProcessingException {
String goldenString = "{\"id\":\"d0aa3285-0bf5-42da-b06b-0d3db55f10bd\",\"session_id\":\"ded852b7-0da2-46ba-8708-e6fcb1c33081\",\"session\":{\"database\":\"db2\"},\"schema\":[],\"data\":[],\"state\":\"Succeeded\",\"error\":null,\"stats\":{\"scan_progress\":{\"rows\":0,\"bytes\":0},\"write_progress\":{\"rows\":0,\"bytes\":0},\"result_progress\":{\"rows\":0,\"bytes\":0},\"running_time_ms\":0.891883},\"affect\":{\"type\":\"UseDB\",\"name\":\"db2\"},\"stats_uri\":\"/v1/query/d0aa3285-0bf5-42da-b06b-0d3db55f10bd\",\"final_uri\":\"/v1/query/d0aa3285-0bf5-42da-b06b-0d3db55f10bd/final\",\"next_uri\":\"/v1/query/d0aa3285-0bf5-42da-b06b-0d3db55f10bd/final\",\"kill_uri\":\"/v1/query/d0aa3285-0bf5-42da-b06b-0d3db55f10bd/kill\"}";
QueryResults queryResults = QUERY_RESULTS_CODEC.fromJson(goldenString);
Assert.assertEquals(queryResults.getQueryId(), "d0aa3285-0bf5-42da-b06b-0d3db55f10bd");
@@ -87,7 +89,7 @@ public void TestUseDB() {
}
@Test(groups = "unit")
- public void TestChangeSettings() {
+ public void TestChangeSettings() throws JsonProcessingException {
String goldenString = "{\"id\":\"a59cf8ff-f8a0-4bf6-bb90-120d3ea140c0\",\"session_id\":\"3423881e-f57b-4c53-a432-cf665ac1fb3e\",\"session\":{\"settings\":{\"max_threads\":\"1\"}},\"schema\":[],\"data\":[],\"state\":\"Succeeded\",\"error\":null,\"stats\":{\"scan_progress\":{\"rows\":0,\"bytes\":0},\"write_progress\":{\"rows\":0,\"bytes\":0},\"result_progress\":{\"rows\":0,\"bytes\":0},\"running_time_ms\":0.81772},\"affect\":{\"type\":\"ChangeSettings\",\"keys\":[\"max_threads\"],\"values\":[\"1\"],\"is_globals\":[false]},\"stats_uri\":\"/v1/query/a59cf8ff-f8a0-4bf6-bb90-120d3ea140c0\",\"final_uri\":\"/v1/query/a59cf8ff-f8a0-4bf6-bb90-120d3ea140c0/final\",\"next_uri\":\"/v1/query/a59cf8ff-f8a0-4bf6-bb90-120d3ea140c0/final\",\"kill_uri\":\"/v1/query/a59cf8ff-f8a0-4bf6-bb90-120d3ea140c0/kill\"}";
QueryResults queryResults = QUERY_RESULTS_CODEC.fromJson(goldenString);
QueryAffect affect = queryResults.getAffect();
@@ -97,12 +99,12 @@ public void TestChangeSettings() {
Assert.assertEquals(((QueryAffect.ChangeSettings) affect).getValues().size(), 1);
Assert.assertEquals(((QueryAffect.ChangeSettings) affect).getValues().get(0), "1");
Assert.assertEquals(((QueryAffect.ChangeSettings) affect).getIsGlobals().size(), 1);
- Assert.assertEquals(((QueryAffect.ChangeSettings) affect).getIsGlobals().get(0).booleanValue(), false);
+ Assert.assertFalse(((QueryAffect.ChangeSettings) affect).getIsGlobals().get(0).booleanValue());
}
@Test(groups = "unit")
- public void TestArray() {
+ public void TestArray() throws JsonProcessingException {
String goldenString = "{\"id\":\"eecb2440-0180-45cb-8b21-23f4a9975df3\",\"session_id\":\"ef692df6-657d-42b8-a10d-6e6cac657abe\",\"session\":{},\"schema\":[{\"name\":\"id\",\"type\":\"Int8\"},{\"name\":\"obj\",\"type\":\"Variant\"},{\"name\":\"d\",\"type\":\"Timestamp\"},{\"name\":\"s\",\"type\":\"String\"},{\"name\":\"arr\",\"type\":\"Array(Int64)\"}],\"data\":[[\"1\",\"{\\\"a\\\": 1,\\\"b\\\": 2}\",\"1983-07-12 21:30:55.888000\",\"hello world, 你好\",\"[1,2,3,4,5]\"]],\"state\":\"Succeeded\",\"error\":null,\"stats\":{\"scan_progress\":{\"rows\":1,\"bytes\":131},\"write_progress\":{\"rows\":0,\"bytes\":0},\"result_progress\":{\"rows\":1,\"bytes\":131},\"running_time_ms\":9.827047},\"affect\":null,\"stats_uri\":\"/v1/query/eecb2440-0180-45cb-8b21-23f4a9975df3\",\"final_uri\":\"/v1/query/eecb2440-0180-45cb-8b21-23f4a9975df3/final\",\"next_uri\":\"/v1/query/eecb2440-0180-45cb-8b21-23f4a9975df3/final\",\"kill_uri\":\"/v1/query/eecb2440-0180-45cb-8b21-23f4a9975df3/kill\"}";
QueryResults queryResults = QUERY_RESULTS_CODEC.fromJson(goldenString);
Assert.assertEquals(queryResults.getQueryId(), "eecb2440-0180-45cb-8b21-23f4a9975df3");
@@ -112,14 +114,14 @@ public void TestArray() {
}
@Test(groups = "unit")
- public void TestVariant() {
+ public void TestVariant() throws JsonProcessingException {
String goldenString = "{\"id\":\"d74b2471-3a15-45e2-9ef4-ca8a39505661\",\"session_id\":\"f818e198-20d9-4c06-8de6-bc68ab6e9dc1\",\"session\":{},\"schema\":[{\"name\":\"var\",\"type\":\"Nullable(Variant)\"}],\"data\":[[\"1\"],[\"1.34\"],[\"true\"],[\"[1,2,3,[\\\"a\\\",\\\"b\\\",\\\"c\\\"]]\"],[\"{\\\"a\\\":1,\\\"b\\\":{\\\"c\\\":2}}\"]],\"state\":\"Succeeded\",\"error\":null,\"stats\":{\"scan_progress\":{\"rows\":5,\"bytes\":168},\"write_progress\":{\"rows\":0,\"bytes\":0},\"result_progress\":{\"rows\":5,\"bytes\":168},\"running_time_ms\":7.827281},\"affect\":null,\"stats_uri\":\"/v1/query/d74b2471-3a15-45e2-9ef4-ca8a39505661\",\"final_uri\":\"/v1/query/d74b2471-3a15-45e2-9ef4-ca8a39505661/final\",\"next_uri\":\"/v1/query/d74b2471-3a15-45e2-9ef4-ca8a39505661/final\",\"kill_uri\":\"/v1/query/d74b2471-3a15-45e2-9ef4-ca8a39505661/kill\"}\n";
QueryResults queryResults = QUERY_RESULTS_CODEC.fromJson(goldenString);
Assert.assertEquals(queryResults.getQueryId(), "d74b2471-3a15-45e2-9ef4-ca8a39505661");
Assert.assertEquals(queryResults.getSchema().size(), 1);
Assert.assertEquals(queryResults.getSchema().get(0).getName(), "var");
Assert.assertEquals(queryResults.getSchema().get(0).getDataType().getType(), "Variant");
- Assert.assertEquals(queryResults.getSchema().get(0).getDataType().isNullable(), true);
+ Assert.assertTrue(queryResults.getSchema().get(0).getDataType().isNullable());
}
}
diff --git a/databend-client/src/test/java/com/databend/client/data/TestColumnTypeHandlerFactory.java b/databend-client/src/test/java/com/databend/client/data/TestColumnTypeHandlerFactory.java
index 1611c722..666be104 100644
--- a/databend-client/src/test/java/com/databend/client/data/TestColumnTypeHandlerFactory.java
+++ b/databend-client/src/test/java/com/databend/client/data/TestColumnTypeHandlerFactory.java
@@ -1,3 +1,17 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package com.databend.client.data;
import org.testng.Assert;
diff --git a/databend-client/src/test/java/com/databend/client/data/TestDatabendTypes.java b/databend-client/src/test/java/com/databend/client/data/TestDatabendTypes.java
index 1e205d25..440d8aea 100644
--- a/databend-client/src/test/java/com/databend/client/data/TestDatabendTypes.java
+++ b/databend-client/src/test/java/com/databend/client/data/TestDatabendTypes.java
@@ -1,3 +1,17 @@
+/*
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package com.databend.client.data;
import org.testng.Assert;
diff --git a/databend-jdbc/pom.xml b/databend-jdbc/pom.xml
index ca8523ed..382bb238 100644
--- a/databend-jdbc/pom.xml
+++ b/databend-jdbc/pom.xml
@@ -9,7 +9,6 @@
0.4.0
../pom.xml
- com.databend
databend-jdbc
0.4.0
databend-jdbc
@@ -43,6 +42,11 @@
com.fasterxml.jackson.core
jackson-databind
+
+ com.vdurmont
+ semver4j
+ 3.1.0
+
diff --git a/databend-jdbc/src/main/java/com/databend/jdbc/Capability.java b/databend-jdbc/src/main/java/com/databend/jdbc/Capability.java
new file mode 100644
index 00000000..cced8eef
--- /dev/null
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/Capability.java
@@ -0,0 +1,20 @@
+package com.databend.jdbc;
+
+import com.vdurmont.semver4j.Semver;
+
+public class Capability {
+ private final boolean streamingLoad;
+ private final boolean heartbeat;
+ public Capability(Semver ver) {
+ streamingLoad = ver.isGreaterThan(new Semver("1.2.781"));
+ heartbeat = ver.isGreaterThan(new Semver("1.2.709"));
+ }
+
+ public boolean streamingLoad() {
+ return streamingLoad;
+ }
+
+ public boolean heartBeat() {
+ return heartbeat;
+ }
+}
diff --git a/databend-jdbc/src/main/java/com/databend/jdbc/DatabendConnection.java b/databend-jdbc/src/main/java/com/databend/jdbc/DatabendConnection.java
index 370e9ca0..f4ad77a7 100644
--- a/databend-jdbc/src/main/java/com/databend/jdbc/DatabendConnection.java
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/DatabendConnection.java
@@ -1,13 +1,8 @@
package com.databend.jdbc;
-import com.databend.client.ClientSettings;
-import com.databend.client.DatabendClient;
-import com.databend.client.DatabendClientV1;
-import com.databend.client.DatabendSession;
-import com.databend.client.ServerVersions;
-import com.databend.client.PaginationOptions;
-import com.databend.client.QueryRequest;
-import com.databend.client.StageAttachment;
+import com.databend.client.*;
+
+import static com.databend.client.JsonCodec.jsonCodec;
import com.databend.jdbc.annotation.NotImplemented;
import com.databend.jdbc.cloud.DatabendCopyParams;
import com.databend.jdbc.cloud.DatabendPresignClient;
@@ -16,7 +11,11 @@
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.vdurmont.semver4j.Semver;
import okhttp3.*;
+import okio.BufferedSink;
+import okio.Okio;
+import okio.Source;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
@@ -25,6 +24,7 @@
import java.io.InputStream;
import java.net.ConnectException;
import java.net.URI;
+import java.nio.charset.StandardCharsets;
import java.sql.Array;
import java.sql.Blob;
import java.sql.CallableStatement;
@@ -42,6 +42,7 @@
import java.sql.Savepoint;
import java.sql.Statement;
import java.sql.Struct;
+import java.time.Instant;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Executor;
@@ -70,25 +71,30 @@
public class DatabendConnection implements Connection, FileTransferAPI, Consumer {
private static final Logger logger = Logger.getLogger(DatabendConnection.class.getPackage().getName());
+ public static final String STREAMING_LOAD_PATH = "/v1/streaming_load";
+ public static final String LOGIN_PATH = "/v1/session/login";
public static final String LOGOUT_PATH = "/v1/session/logout";
public static final String HEARTBEAT_PATH = "/v1/session/heartbeat";
- private static FileHandler FILE_HANDLER;
+ private static final ObjectMapper objectMapper = new ObjectMapper();
+ private static final JsonCodec SESSION_JSON_CODEC = jsonCodec(DatabendSession.class);
private final AtomicBoolean closed = new AtomicBoolean();
private final AtomicBoolean autoCommit = new AtomicBoolean(true);
private final URI httpUri;
private final AtomicReference schema = new AtomicReference<>();
private final OkHttpClient httpClient;
- private final ConcurrentHashMap statements = new ConcurrentHashMap();
+ private final ConcurrentHashMap statements = new ConcurrentHashMap<>();
private final DatabendDriverUri driverUri;
private boolean autoDiscovery;
- private AtomicReference session = new AtomicReference<>();
+ private final AtomicReference session = new AtomicReference<>();
private String routeHint = "";
- private AtomicReference lastNodeID = new AtomicReference<>();
+ private final AtomicReference lastNodeID = new AtomicReference<>();
+ private Semver serverVersion = null;
+ private Capability serverCapability = null;
- static ExecutorService heartbeatScheduler = null;
- private HeartbeatManager heartbeatManager = new HeartbeatManager();
+ static volatile ExecutorService heartbeatScheduler = null;
+ private final HeartbeatManager heartbeatManager = new HeartbeatManager();
private void initializeFileHandler() {
if (this.debug()) {
@@ -103,11 +109,11 @@ private void initializeFileHandler() {
System.setProperty("java.util.logging.FileHandler.count", "200");
// Enable log file reuse
System.setProperty("java.util.logging.FileHandler.append", "true");
- FILE_HANDLER = new FileHandler(file.getAbsolutePath(), Integer.parseInt(System.getProperty("java.util.logging.FileHandler.limit")),
+ FileHandler fileHandler= new FileHandler(file.getAbsolutePath(), Integer.parseInt(System.getProperty("java.util.logging.FileHandler.limit")),
Integer.parseInt(System.getProperty("java.util.logging.FileHandler.count")), true);
- FILE_HANDLER.setLevel(Level.ALL);
- FILE_HANDLER.setFormatter(new SimpleFormatter());
- logger.addHandler(FILE_HANDLER);
+ fileHandler.setLevel(Level.ALL);
+ fileHandler.setFormatter(new SimpleFormatter());
+ logger.addHandler(fileHandler);
} catch (Exception e) {
throw new RuntimeException("Failed to create FileHandler", e);
}
@@ -130,6 +136,42 @@ private void initializeFileHandler() {
this.setSession(session);
initializeFileHandler();
+ this.login();
+ }
+
+ public Semver getServerVersion() {
+ return this.serverVersion;
+ }
+
+ public Capability getServerCapability() {
+ return this.serverCapability;
+ }
+
+ private void login() throws SQLException {
+ RetryPolicy retryPolicy = new RetryPolicy(true, true);
+
+ HashMap headers = new HashMap<>();
+ headers.put("Accept", "application/json");
+ headers.put("Content-Type", "application/json");
+ try {
+ LoginRequest req = new LoginRequest();
+ req.database = this.getSchema();
+ req.settings = this.driverUri.getSessionSettings();
+ String bodyString = objectMapper.writeValueAsString(req);
+ RequestBody requestBody= RequestBody.create(MEDIA_TYPE_JSON, bodyString);
+
+ ResponseWithBody response = requestHelper(LOGIN_PATH, "post", requestBody, headers, retryPolicy);
+ // old server do not support this API
+ if (response.response.code() != 400) {
+ String version = objectMapper.readTree(response.body).get("version").asText();
+ if (version != null) {
+ this.serverVersion = new Semver(version);
+ this.serverCapability = new Capability(this.serverVersion);
+ }
+ }
+ } catch(JsonProcessingException e){
+ throw new RuntimeException(e);
+ }
}
public static String randRouteHint() {
@@ -204,7 +246,7 @@ public static String getCopyIntoSql(String database, DatabendCopyParams params)
sb.append("FROM ");
sb.append(params.getDatabendStage().toString());
sb.append(" ");
- sb.append(params.toString());
+ sb.append(params);
return sb.toString();
}
@@ -287,7 +329,6 @@ public void commit()
} catch (SQLException e) {
throw new SQLException("Failed to commit", e);
}
- return;
}
@Override
@@ -313,7 +354,6 @@ public void rollback()
} catch (SQLException e) {
throw new SQLException("Failed to rollback", e);
}
- return;
}
@Override
@@ -364,7 +404,7 @@ public void setCatalog(String s)
@Override
public int getTransactionIsolation()
throws SQLException {
- return 0;
+ return Connection.TRANSACTION_NONE;
}
@Override
@@ -707,7 +747,7 @@ DatabendClient startQueryWithFailover(String sql, StageAttachment attach) throws
for (int attempt = 0; attempt <= maxRetries; attempt++) {
try {
- String queryId = UUID.randomUUID().toString().replace("-", "");;
+ String queryId = UUID.randomUUID().toString().replace("-", "");
String candidateHost = selectHostForQuery(queryId);
// configure the client settings
@@ -874,6 +914,7 @@ private Map setAdditionalHeaders() {
if (!this.routeHint.isEmpty()) {
additionalHeaders.put(X_DATABEND_ROUTE_HINT, this.routeHint);
}
+ additionalHeaders.put("User-Agent", USER_AGENT_VALUE);
return additionalHeaders;
}
@@ -950,10 +991,7 @@ public void uploadStream(String stageName, String destPrefix, InputStream inputS
logger.info("upload cost time: " + (uploadEndTime - uploadStartTime) / 1000000.0 + "ms");
}
}
- } catch (RuntimeException e) {
- logger.warning("failed to upload input stream, file size is:" + fileSize / 1024.0 + e.getMessage());
- throw new SQLException(e);
- } catch (IOException e) {
+ } catch (RuntimeException | IOException e) {
logger.warning("failed to upload input stream, file size is:" + fileSize / 1024.0 + e.getMessage());
throw new SQLException(e);
}
@@ -987,71 +1025,193 @@ public void copyIntoTable(String database, String tableName, DatabendCopyParams
while (rs.next()) {
}
}
+ @Override
+ public int loadStreamToTable(String sql, InputStream inputStream, long fileSize, String loadMethod) throws SQLException {
+ loadMethod = loadMethod.toLowerCase();
+ if (!"stage".equals(loadMethod) && !"streaming".equals(loadMethod)) {
+ throw new SQLException("invalid value for loadMethod(" + loadMethod + ") only accept \"stage\" or \" streaming\"");
+ }
+
+ if (!this.serverCapability.streamingLoad()) {
+ throw new SQLException("please upgrade databend-query to >1.2.781 to use loadStreamToTable, current version=" + this.serverVersion);
+ }
+
+ if (!sql.contains("@_databend_load")) {
+ throw new SQLException("invalid sql: must contain @_databend_load when used in loadStreamToTable ");
+ }
+
+ if ("streaming".equals(loadMethod)) {
+ return streamingLoad(sql, inputStream, fileSize);
+ } else {
+ Instant now = Instant.now();
+ long nanoTimestamp = now.getEpochSecond() * 1_000_000_000 + now.getNano();
+ String fileName = String.valueOf(nanoTimestamp);
+ String location = "~/_databend_load/" + fileName;
+ sql = sql.replace("_databend_load", location);
+ uploadStream("~", "_databend_load", inputStream, fileName, fileSize, false);
+ Statement statement = this.createStatement();
+ statement.execute(sql);
+ ResultSet rs = statement.getResultSet();
+ while (rs.next()) {
+ }
+ return statement.getUpdateCount();
+ }
+ }
+
+ MultipartBody buildMultiPart(InputStream inputStream, long fileSize) {
+ RequestBody requestBody = new RequestBody() {
+ @Override
+ public MediaType contentType() {
+ return MediaType.parse("application/octet-stream");
+ }
+
+ @Override
+ public long contentLength() {
+ return fileSize;
+ }
+
+ @Override
+ public void writeTo(BufferedSink sink) throws IOException {
+ try (Source source = Okio.source(inputStream)) {
+ sink.writeAll(source);
+ }
+ }
+ };
+ return new MultipartBody.Builder()
+ .setType(MultipartBody.FORM)
+ .addFormDataPart(
+ "upload",
+ "java.io.InputStream",
+ requestBody
+ ).build();
+ }
+
+ int streamingLoad(String sql, InputStream inputStream, long fileSize) throws SQLException {
+ RetryPolicy retryPolicy = new RetryPolicy(true, true);
+
+ try {
+ HashMap headers = new HashMap<>();
+ DatabendSession session = this.session.get();
+ if (session != null) {
+ String sessionString = objectMapper.writeValueAsString(session);
+ headers.put(DatabendQueryContextHeader, sessionString);
+ }
+ headers.put(DatabendSQLHeader, sql);
+ headers.put("Accept", "application/json");
+ RequestBody requestBody = buildMultiPart(inputStream, fileSize);
+ ResponseWithBody response = requestHelper(STREAMING_LOAD_PATH, "put", requestBody, headers, retryPolicy);
+ JsonNode json = objectMapper.readTree(response.body);
+ JsonNode error = json.get("error");
+ if (error != null) {
+ throw new SQLException("streaming load fail: code = " + error.get("code").asText() + ", message=" + error.get("message").asText());
+ }
+ String base64 = response.response.headers().get(DatabendQueryContextHeader);
+ if (base64 != null) {
+ byte[] bytes = Base64.getUrlDecoder().decode(base64);
+ String str = new String(bytes, StandardCharsets.UTF_8);
+ try {
+ session = SESSION_JSON_CODEC.fromJson(str);
+ } catch(Exception e) {
+ throw new RuntimeException(e);
+ }
+ if (session != null) {
+ this.session.set(session);
+ }
+ }
+ JsonNode stats = json.get("stats");
+ if (stats != null) {
+ int rows = stats.get("rows").asInt(-1);
+ if (rows != -1) {
+ return rows;
+ }
+ }
+ throw new SQLException("invalid response for " + STREAMING_LOAD_PATH + ": " + response.body);
+ } catch(JsonProcessingException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
void logout() throws SQLException {
DatabendSession session = this.session.get();
if (session == null || !session.getNeedKeepAlive()) {
return;
}
- generalRequest(LOGOUT_PATH, "{}");
+ RetryPolicy retryPolicy = new RetryPolicy(false, false);
+ RequestBody body = RequestBody.create(MEDIA_TYPE_JSON, "{}");
+ requestHelper(LOGOUT_PATH, "post", body, new HashMap<>(), retryPolicy);
}
- String generalRequest(String path, String body) throws SQLException {
- DatabendSession session = this.session.get();
- int times = getMaxFailoverRetries() + 1;
- List hosts = new LinkedList();
+
+ HttpUrl getUrl(String path) {
+ String host = this.driverUri.getUri().toString();
+ HttpUrl url = HttpUrl.get(host);
+ return url.newBuilder().encodedPath(path).build();
+ }
+
+ ResponseWithBody sendRequestWithRetry(Request request, RetryPolicy retryPolicy, String path) throws SQLException {
String failReason = null;
- String lastHost = null;
-
- for (int i = 1; i <= times; i++) {
- String candidateHost = this.driverUri.getUri("").toString();
- // candidateHost = "http://localhost:8888";
- hosts.add(candidateHost);
- if (lastHost == candidateHost) {
- break;
- }
- lastHost = candidateHost;
- logger.log(Level.FINE, "retry " + i + " times to logout on " + candidateHost);
-
- ClientSettings settings = this.makeClientSettings("", candidateHost).build();
- HttpUrl url = HttpUrl.get(candidateHost).newBuilder().encodedPath(path).build();
- Request.Builder builder = new Request.Builder()
- .url(url)
- .header("User-Agent", USER_AGENT_VALUE);
- if (settings.getAdditionalHeaders() != null) {
- settings.getAdditionalHeaders().forEach(builder::addHeader);
- }
- if (session.getNeedSticky()) {
- builder.addHeader(ClientSettings.X_DATABEND_ROUTE_HINT, uriRouteHint(candidateHost));
- String lastNodeID = this.lastNodeID.get();
- if (lastNodeID != null) {
- builder.addHeader(ClientSettings.X_DATABEND_STICKY_NODE, lastNodeID);
- }
- }
- for (int j = 1; j <= 3; j++) {
- Request request = builder.post(okhttp3.RequestBody.create(MEDIA_TYPE_JSON, body)).build();
- try (Response response = httpClient.newCall(request).execute()) {
- if (response.code() != 200) {
- throw new SQLException("Error logout: code =" + response.code() + ", body = " + response.body());
- }
- return response.body().string();
- } catch (IOException e) {
- if (e.getCause() instanceof ConnectException) {
- if (failReason == null) {
- failReason = e.getMessage();
- }
- try {
- MILLISECONDS.sleep(j * 100);
- } catch (InterruptedException e2) {
- Thread.currentThread().interrupt();
- return null;
- }
+
+ for (int j = 1; j <= 3; j++) {
+ try (Response response = httpClient.newCall(request).execute()) {
+ int code = response.code();
+ if (code != 200) {
+ if (retryPolicy.shouldIgnore(code)) {
+ return new ResponseWithBody(response, "");
} else {
- break;
+ failReason = "status code =" + response.code() + ", body = " + response.body().string();
+ if (!retryPolicy.shouldRetry(code))
+ break;
+ }
+ } else {
+ String body = response.body().string();
+ return new ResponseWithBody(response, body);
+ }
+ } catch (IOException e) {
+ if (retryPolicy.shouldRetry(e)) {
+ if (failReason == null) {
+ failReason = e.getMessage();
}
+ } else {
+ break;
+ }
+ }
+ if (j < 3) {
+ try {
+ MILLISECONDS.sleep(j * 100);
+ } catch (InterruptedException e2) {
+ Thread.currentThread().interrupt();
+ return null;
}
}
}
- throw new SQLException("Failover Retry Error executing query after retries on hosts " + hosts + ": " + failReason);
+ throw new SQLException("Error accessing " + path + ": " + failReason);
+ }
+
+ ResponseWithBody requestHelper(String path, String method, RequestBody body, Map headers, RetryPolicy retryPolicy) throws SQLException {
+ DatabendSession session = this.session.get();
+ HttpUrl url = getUrl(path);
+
+ Request.Builder builder = new Request.Builder().url(url);
+ this.setAdditionalHeaders().forEach(builder::addHeader);
+ if (headers != null) {
+ headers.forEach(builder::addHeader);
+ }
+ if (session.getNeedSticky()) {
+ builder.addHeader(ClientSettings.X_DATABEND_ROUTE_HINT, url.host());
+ String lastNodeID = this.lastNodeID.get();
+ if (lastNodeID != null) {
+ builder.addHeader(ClientSettings.X_DATABEND_STICKY_NODE, lastNodeID);
+ }
+ }
+ if ("post".equals(method)) {
+ builder = builder.post(body);
+ } else if ("put".equals(method)) {
+ builder = builder.put(body);
+ } else {
+ builder = builder.get();
+ }
+ Request request = builder.build();
+ return sendRequestWithRetry(request, retryPolicy, path);
}
class HeartbeatManager implements Runnable {
@@ -1087,7 +1247,7 @@ private ArrayList queryLiveness() {
ArrayList arr = new ArrayList<>();
for (DatabendStatement stmt : statements.keySet()) {
QueryLiveness ql = stmt.queryLiveness();
- if (ql != null && !ql.stopped && ServerVersions.supportHeartbeat(ql.serverVersion)) {
+ if (ql != null && !ql.stopped && ql.serverSupportHeartBeat) {
arr.add(ql);
}
}
@@ -1097,12 +1257,12 @@ private ArrayList queryLiveness() {
private void doHeartbeat(ArrayList queryLivenesses ) {
long now = System.currentTimeMillis();
lastHeartbeatStartTimeMillis = now;
- Map> nodeToQueryID = new HashMap();
- Map queries = new HashMap();
+ Map> nodeToQueryID = new HashMap<>();
+ Map queries = new HashMap<>();
for (QueryLiveness ql: queryLivenesses) {
if (now - ql.lastRequestTime.get() >= ql.resultTimeoutSecs * 1000 / 2) {
- nodeToQueryID.computeIfAbsent(ql.nodeID, k -> new ArrayList()).add(ql.queryID);
+ nodeToQueryID.computeIfAbsent(ql.nodeID, k -> new ArrayList<>()).add(ql.queryID);
queries.put(ql.queryID, ql);
}
}
@@ -1110,15 +1270,15 @@ private void doHeartbeat(ArrayList queryLivenesses ) {
return;
}
- ObjectMapper mapper = new ObjectMapper();
Map map = new HashMap<>();
map.put("node_to_queries", nodeToQueryID);
try {
- String body = mapper.writeValueAsString(map);
-
- body = generalRequest(HEARTBEAT_PATH, body);
- JsonNode toRemove = mapper.readTree(body).get("queries_to_remove");
+ String body = objectMapper.writeValueAsString(map);
+ RequestBody requestBody = RequestBody.create(MEDIA_TYPE_JSON, body);
+ RetryPolicy retryPolicy = new RetryPolicy(true, false);
+ body = requestHelper(HEARTBEAT_PATH, "post", requestBody, null, retryPolicy).body;
+ JsonNode toRemove = objectMapper.readTree(body).get("queries_to_remove");
if (toRemove.isArray()) {
for (JsonNode element : toRemove) {
String queryId = element.asText();
@@ -1155,8 +1315,8 @@ public void run() {
ArrayList arr = queryLiveness();
doHeartbeat(arr);
+ heartbeatFuture = null;
synchronized (DatabendConnection.this) {
- heartbeatFuture = null;
if (arr.size() > 0) {
if (heartbeatFuture == null) {
scheduleHeartbeat();
@@ -1171,5 +1331,35 @@ public void run() {
boolean isHeartbeatStopped() {
return heartbeatManager.heartbeatFuture == null;
}
-}
+ static class RetryPolicy {
+ boolean ignore404;
+ boolean retry503;
+ RetryPolicy(boolean ignore404, boolean retry503) {
+ this.ignore404 = ignore404;
+ this.retry503 = retry503;
+ }
+
+ boolean shouldIgnore(int code) {
+ return ignore404 && code == 404;
+ }
+
+ boolean shouldRetry(int code) {
+ return retry503 && (code == 502 || code == 503);
+ }
+
+ boolean shouldRetry(IOException e) {
+ return e.getCause() instanceof ConnectException;
+ }
+ }
+
+ static class ResponseWithBody {
+ public Response response;
+ public String body;
+
+ ResponseWithBody(Response response, String body) {
+ this.response = response;
+ this.body = body;
+ }
+ }
+}
diff --git a/databend-jdbc/src/main/java/com/databend/jdbc/DatabendDriverUri.java b/databend-jdbc/src/main/java/com/databend/jdbc/DatabendDriverUri.java
index fe8fdd96..b726deaf 100644
--- a/databend-jdbc/src/main/java/com/databend/jdbc/DatabendDriverUri.java
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/DatabendDriverUri.java
@@ -1,6 +1,6 @@
package com.databend.jdbc;
-import com.databend.client.GlobalCookieJar;
+import com.databend.jdbc.util.GlobalCookieJar;
import com.databend.jdbc.util.URLUtils;
import com.google.common.base.Splitter;
import com.google.common.collect.Maps;
@@ -23,7 +23,6 @@
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeUnit;
-import java.util.logging.Logger;
import static com.databend.client.OkHttpUtils.*;
import static com.databend.jdbc.ConnectionProperties.*;
@@ -37,7 +36,6 @@
* Parses and extracts parameters from a databend JDBC URL
*/
public final class DatabendDriverUri {
- private static final Logger logger = Logger.getLogger(DatabendDriverUri.class.getPackage().getName());
private static final String JDBC_URL_PREFIX = "jdbc:";
private static final String JDBC_URL_START = JDBC_URL_PREFIX + "databend://";
private static final Splitter QUERY_SPLITTER = Splitter.on('&').omitEmptyStrings();
@@ -251,8 +249,7 @@ private static Map.Entry> parse(String url)
}
Map uriProperties = new LinkedHashMap<>();
String raw = url.substring(pos + JDBC_URL_START.length());
- String scheme;
- String host = null;
+ String host;
int port = -1;
raw = tryParseUriUserPassword(raw, uriProperties);
diff --git a/databend-jdbc/src/main/java/com/databend/jdbc/DatabendResultSet.java b/databend-jdbc/src/main/java/com/databend/jdbc/DatabendResultSet.java
index f04044da..f65659f6 100644
--- a/databend-jdbc/src/main/java/com/databend/jdbc/DatabendResultSet.java
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/DatabendResultSet.java
@@ -3,7 +3,6 @@
import com.databend.client.DatabendClient;
import com.databend.client.QueryResults;
import com.databend.client.QueryRowField;
-import com.github.zafarkhaja.semver.Version;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.Streams;
@@ -38,7 +37,7 @@ public class DatabendResultSet extends AbstractDatabendResultSet {
private final QueryLiveness liveness;
- private DatabendResultSet(Statement statement, DatabendClient client, List schema, long maxRows, QueryLiveness liveness) throws SQLException {
+ private DatabendResultSet(Statement statement, DatabendClient client, List schema, long maxRows, QueryLiveness liveness) {
super(Optional.of(requireNonNull(statement, "statement is null")), schema,
new AsyncIterator<>(flatten(new ResultsPageIterator(client, liveness), maxRows), client), client.getResults().getQueryId());
this.statement = statement;
@@ -46,21 +45,13 @@ private DatabendResultSet(Statement statement, DatabendClient client, List s = client.getResults().getSchema();
AtomicLong lastRequestTime = new AtomicLong(System.currentTimeMillis());
QueryResults r = client.getResults();
- Version serverVersion = null;
- if (client.getServerVersion() != null) {
- try {
- serverVersion = Version.valueOf(client.getServerVersion());
- } catch (Exception ignored) {
-
- }
- }
- QueryLiveness liveness = new QueryLiveness(r.getQueryId(), client.getNodeID(), lastRequestTime, r.getResultTimeoutSecs(), serverVersion);
+ QueryLiveness liveness = new QueryLiveness(r.getQueryId(), client.getNodeID(), lastRequestTime, r.getResultTimeoutSecs(), capability.heartBeat());
return new DatabendResultSet(statement, client, s, maxRows, liveness);
}
diff --git a/databend-jdbc/src/main/java/com/databend/jdbc/DatabendStatement.java b/databend-jdbc/src/main/java/com/databend/jdbc/DatabendStatement.java
index 69f6703f..681defa9 100644
--- a/databend-jdbc/src/main/java/com/databend/jdbc/DatabendStatement.java
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/DatabendStatement.java
@@ -194,7 +194,7 @@ final boolean internalExecute(String sql, StageAttachment attachment) throws SQL
break;
}
}
- resultSet = DatabendResultSet.create(this, client, maxRows.get());
+ resultSet = DatabendResultSet.create(this, client, maxRows.get(), connection().getServerCapability());
currentResult.set(resultSet);
if (isQueryStatement(sql)) {
// Always -1 when returning a ResultSet with query statement
diff --git a/databend-jdbc/src/main/java/com/databend/jdbc/FileTransferAPI.java b/databend-jdbc/src/main/java/com/databend/jdbc/FileTransferAPI.java
index 470cd6cf..343a5758 100644
--- a/databend-jdbc/src/main/java/com/databend/jdbc/FileTransferAPI.java
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/FileTransferAPI.java
@@ -41,4 +41,15 @@ public interface FileTransferAPI {
* @throws SQLException fail to copy into table
*/
void copyIntoTable(String database, String tableName, DatabendCopyParams params) throws SQLException;
+
+ /**
+ * Upload inputStream into the target table
+ *
+ * @param sql the sql with syntax `Insert into [() [values (?, ...)]] from @_databend_load [file_format=(...)]`
+ * @param inputStream the input stream of the file
+ * @param loadMethod one of "stage" or "streaming"
+ * @return num of rows loaded
+ * @throws SQLException fail to load file into table
+ */
+ int loadStreamToTable(String sql, InputStream inputStream, long fileSize, String loadMethod) throws SQLException;
}
diff --git a/databend-jdbc/src/main/java/com/databend/jdbc/LoginRequest.java b/databend-jdbc/src/main/java/com/databend/jdbc/LoginRequest.java
new file mode 100644
index 00000000..b489fbaa
--- /dev/null
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/LoginRequest.java
@@ -0,0 +1,9 @@
+package com.databend.jdbc;
+
+
+import java.util.Map;
+
+public class LoginRequest {
+ public String database;
+ public Map settings;
+}
diff --git a/databend-jdbc/src/main/java/com/databend/jdbc/NonRegisteringDatabendDriver.java b/databend-jdbc/src/main/java/com/databend/jdbc/NonRegisteringDatabendDriver.java
index 1b9ae69e..7bd86d34 100644
--- a/databend-jdbc/src/main/java/com/databend/jdbc/NonRegisteringDatabendDriver.java
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/NonRegisteringDatabendDriver.java
@@ -1,6 +1,6 @@
package com.databend.jdbc;
-import com.databend.client.GlobalCookieJar;
+import com.databend.jdbc.util.GlobalCookieJar;
import okhttp3.Cookie;
import okhttp3.OkHttpClient;
@@ -72,7 +72,7 @@ public Connection connect(String url, Properties info)
}
}
- return new DatabendConnection(uri, builder.build());
+ return connection;
}
@Override
diff --git a/databend-jdbc/src/main/java/com/databend/jdbc/QueryLiveness.java b/databend-jdbc/src/main/java/com/databend/jdbc/QueryLiveness.java
index 28a78e03..cfdf2920 100644
--- a/databend-jdbc/src/main/java/com/databend/jdbc/QueryLiveness.java
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/QueryLiveness.java
@@ -1,21 +1,19 @@
package com.databend.jdbc;
-import com.github.zafarkhaja.semver.Version;
-
import java.util.concurrent.atomic.AtomicLong;
public class QueryLiveness {
String queryID;
String nodeID;
- Version serverVersion;
+ boolean serverSupportHeartBeat;
AtomicLong lastRequestTime;
boolean stopped;
long resultTimeoutSecs;
- public QueryLiveness(String queryID, String nodeID, AtomicLong lastRequestTime, Long resultTimeoutSecs, Version severVersion) {
+ public QueryLiveness(String queryID, String nodeID, AtomicLong lastRequestTime, Long resultTimeoutSecs, boolean serverSupportHeartBeat) {
this.queryID = queryID;
this.nodeID = nodeID;
this.lastRequestTime = lastRequestTime;
this.resultTimeoutSecs = resultTimeoutSecs;
- this.serverVersion = severVersion;
+ this.serverSupportHeartBeat = serverSupportHeartBeat;
}
}
diff --git a/databend-client/src/main/java/com/databend/client/GlobalCookieJar.java b/databend-jdbc/src/main/java/com/databend/jdbc/util/GlobalCookieJar.java
similarity index 95%
rename from databend-client/src/main/java/com/databend/client/GlobalCookieJar.java
rename to databend-jdbc/src/main/java/com/databend/jdbc/util/GlobalCookieJar.java
index e1886722..6fc104b1 100644
--- a/databend-client/src/main/java/com/databend/client/GlobalCookieJar.java
+++ b/databend-jdbc/src/main/java/com/databend/jdbc/util/GlobalCookieJar.java
@@ -1,4 +1,4 @@
-package com.databend.client;
+package com.databend.jdbc.util;
import okhttp3.Cookie;
import okhttp3.CookieJar;
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/Compatibility.java b/databend-jdbc/src/test/java/com/databend/jdbc/Compatibility.java
new file mode 100644
index 00000000..b5fa8df0
--- /dev/null
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/Compatibility.java
@@ -0,0 +1,68 @@
+package com.databend.jdbc;
+
+import com.vdurmont.semver4j.Semver;
+
+public class Compatibility {
+ public static class Capability {
+ boolean streamingLoad;
+
+ public Capability() {
+ this.streamingLoad = true;
+ }
+ public Capability(boolean streamingLoad) {
+ this.streamingLoad = streamingLoad;
+ }
+
+ public static Capability fromServerVersion(Semver ver) {
+ boolean streamingLoad = ver.isGreaterThanOrEqualTo(new Semver("1.2.792"));
+ return new Capability(streamingLoad);
+ }
+
+ public static Capability fromDriverVersion(Semver ver) {
+ boolean streamingLoad = ver.isGreaterThanOrEqualTo(new Semver("0.4.1"));
+ return new Capability(streamingLoad);
+ }
+ }
+
+ public static Semver driverVersion = getDriverVersion();
+ public static Semver serverVersion = getServerVersion();
+ public static Capability driverCapability = driverVersion==null? new Capability(): Capability.fromDriverVersion(driverVersion);
+ public static Capability serverCapability = serverVersion==null? new Capability(): Capability.fromServerVersion(serverVersion);
+
+ private static Semver getDriverVersion() {
+ String env = System.getenv("DATABEND_JDBC_VERSION");
+ if (env == null) {
+ return null;
+ }
+ return new Semver(env);
+ }
+ private static Semver getServerVersion() {
+ String env = System.getenv("DATABEND_QUERY_VERSION");
+ if (env == null || "nightly".equals(env)) {
+ return null;
+ }
+ return new Semver(env, Semver.SemverType.NPM).withClearedSuffixAndBuild();
+ }
+
+ public static boolean skipDriverBugLowerThen(String version) {
+ if (driverVersion != null && driverVersion.isLowerThan(new Semver(version))) {
+ StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
+ String callerName = stackTrace[2].getMethodName();
+ System.out.println("SkipDriverBug version=" + version + ", method=" + callerName);
+ return true;
+ }
+ return false;
+ }
+ public static boolean skipBugLowerThenOrEqualTo(String serverVersionBug, String driverVersionBug) {
+ if (driverVersion != null && driverVersion.isLowerThanOrEqualTo(new Semver(serverVersionBug))
+ && serverVersion != null && serverVersion.isLowerThanOrEqualTo(serverVersionBug)
+ ) {
+ StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace();
+ String callerName = stackTrace[2].getMethodName();
+ System.out.printf("SkipDriverBug (server <= %s && driver <=%s), method = %s",
+ serverVersionBug, driverVersionBug, callerName);
+ return true;
+ }
+ return false;
+ }
+}
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestBasicDriver.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestBasicDriver.java
index 8f21afe4..678e3f34 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestBasicDriver.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestBasicDriver.java
@@ -2,9 +2,6 @@
import com.databend.client.DatabendSession;
import com.databend.client.PaginationOptions;
-import org.locationtech.jts.geom.Geometry;
-import org.locationtech.jts.io.ParseException;
-import org.locationtech.jts.io.WKBReader;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
@@ -35,10 +32,6 @@ public void setUp()
c.createStatement().execute("create table test_basic_driver.table1(i int)");
c.createStatement().execute("insert into test_basic_driver.table1 values(1)");
c.createStatement().execute("create database test_basic_driver_2");
- c.createStatement().execute("create table test_basic_driver.table_with_null(a int,b varchar default null, c varchar, d varchar)");
- c.createStatement().execute("insert into test_basic_driver.table_with_null(a,b,c,d) values(1,null,'null','NULL')");
-
- // json data
}
@Test(groups = {"IT"})
@@ -90,7 +83,7 @@ public void testSchema() {
}
}
- @Test
+ @Test(groups = {"IT"})
public void testCreateUserFunction() throws SQLException {
String s = "create or replace function add_plus(int,int)\n" +
"returns int\n" +
@@ -119,11 +112,12 @@ public void testCreateUserFunction() throws SQLException {
}
}
- @Test
- public void TestMergeinto() throws SQLException {
- try (Connection connection = Utils.createConnection()) {
- DatabendStatement statement = (DatabendStatement) connection.createStatement();
- statement.execute("CREATE TABLE IF NOT EXISTS test_basic_driver.target_table (\n" +
+ @Test(groups = {"IT"})
+ public void TestMergeInto() throws SQLException {
+ try (Connection connection = Utils.createConnection();
+ Statement statement = connection.createStatement()
+ ) {
+ statement.execute("CREATE OR REPLACE TABLE test_basic_driver.target_table (\n" +
" ID INT,\n" +
" Name VARCHAR(50),\n" +
" Age INT,\n" +
@@ -134,7 +128,7 @@ public void TestMergeinto() throws SQLException {
" (1, 'Alice', 25, 'Toronto'),\n" +
" (2, 'Bob', 30, 'Vancouver'),\n" +
" (3, 'Carol', 28, 'Montreal');");
- statement.execute("CREATE TABLE IF NOT EXISTS test_basic_driver.source_table (\n" +
+ statement.execute("CREATE OR REPLACE TABLE test_basic_driver.source_table (\n" +
" ID INT,\n" +
" Name VARCHAR(50),\n" +
" Age INT,\n" +
@@ -154,15 +148,15 @@ public void TestMergeinto() throws SQLException {
" WHEN NOT MATCHED THEN\n" +
" INSERT *;\n");
ResultSet r = statement.getResultSet();
- r.next();
+
+ Assert.assertTrue(r.next());
Assert.assertEquals(3, statement.getUpdateCount());
- System.out.println(statement.getUpdateCount());
- } catch (SQLException throwables) {
- throwables.printStackTrace();
+ } catch (SQLException throwable) {
+ throwable.printStackTrace();
}
}
- @Test
+ @Test(groups = {"IT"})
public void testWriteDouble() throws SQLException {
try (Connection connection = Utils.createConnection()) {
DatabendStatement statement = (DatabendStatement) connection.createStatement();
@@ -173,7 +167,7 @@ public void testWriteDouble() throws SQLException {
" City VARCHAR(50),\n" +
" Score DOUBLE\n" +
");");
- Double infDouble = Double.POSITIVE_INFINITY;
+ double infDouble = Double.POSITIVE_INFINITY;
String sql = "INSERT INTO test_basic_driver.table_double (ID, Name, Age, City, Score) values";
PreparedStatement prepareStatement = connection.prepareStatement(sql);
@@ -194,19 +188,26 @@ public void testWriteDouble() throws SQLException {
}
}
- @Test
+ @Test(groups = {"IT"})
public void testDefaultSelectNullValue() throws SQLException {
- try (Connection connection = Utils.createConnection()) {
- DatabendStatement statement = (DatabendStatement) connection.createStatement();
- statement.executeQuery("SELECT a,b,c,d from test_basic_driver.table_with_null");
+ try (Connection connection = Utils.createConnection();
+ Statement statement = connection.createStatement()
+ ) {
+ statement.execute("create table test_basic_driver.table_with_null(a int,b varchar default null, c varchar, d varchar)");
+ statement.execute("insert into test_basic_driver.table_with_null(a,b,c,d) values(1,null,'null','NULL')");
+ statement.execute("SELECT a,b,c,d from test_basic_driver.table_with_null");
ResultSet r = statement.getResultSet();
r.next();
Assert.assertEquals(r.getInt(1), 1);
- Assert.assertEquals(r.getObject(2), null);
+ Assert.assertNull(r.getObject(2));
Assert.assertEquals(r.getObject(3), "null");
- Assert.assertEquals(r.getObject(4), "NULL");
- } catch (SQLException throwables) {
- throwables.printStackTrace();
+ if (Compatibility.skipDriverBugLowerThen("0.3.9")) {
+ Assert.assertNull(r.getObject(4));
+ } else {
+ Assert.assertEquals(r.getObject(4), "NULL");
+ }
+ } catch (SQLException throwable) {
+ throwable.printStackTrace();
}
}
@@ -246,7 +247,7 @@ public void testBasicWithProperties() throws SQLException {
}
}
- @Test
+ @Test(groups = {"IT"})
public void testPrepareStatementQuery() throws SQLException {
String sql = "SELECT number from numbers(100) where number = ? or number = ?";
Connection conn = Utils.createConnection("test_basic_driver");
@@ -297,7 +298,7 @@ public void testUpdateSession()
public void testResultException() {
try (Connection connection = Utils.createConnection()) {
Statement statement = connection.createStatement();
- ResultSet r = statement.executeQuery("SELECT 1e189he 198h");
+ statement.execute("SELECT 1e189he 198h");
} catch (SQLException e) {
Assert.assertTrue(e.getMessage().contains("Query failed"));
}
@@ -318,33 +319,4 @@ public void testSelectWithPreparedStatement()
Assert.assertEquals(r.getString(1), "2021-01-01 00:00:00.000000");
}
}
-
- @Test(groups = {"IT"})
- public void testSelectGeometry() throws SQLException, ParseException {
- // skip due to failed cluster tests
-
- try (Connection connection = Utils.createConnection()) {
- connection.createStatement().execute("set enable_geo_create_table=1");
- connection.createStatement().execute("CREATE or replace table cities ( id INT, name VARCHAR NOT NULL, location GEOMETRY);");
- connection.createStatement().execute("INSERT INTO cities (id, name, location) VALUES (1, 'New York', 'POINT (-73.935242 40.73061))');");
- connection.createStatement().execute("INSERT INTO cities (id, name, location) VALUES (2, 'Null', null);");
- Statement statement = connection.createStatement();
- try (ResultSet r = statement.executeQuery("select location from cities order by id")) {
- r.next();
- Assert.assertEquals("{\"type\": \"Point\", \"coordinates\": [-73.935242,40.73061]}", r.getObject(1));
- r.next();
- Assert.assertNull(r.getObject(1));
- }
-
- // set geometry_output_format to wkb
- connection.createStatement().execute("set geometry_output_format='WKB'");
- try (ResultSet r = statement.executeQuery("select location from cities order by id")) {
- r.next();
- byte[] wkb = r.getBytes(1);
- WKBReader wkbReader = new WKBReader();
- Geometry geometry = wkbReader.read(wkb);
- Assert.assertEquals("POINT (-73.935242 40.73061)", geometry.toText());
- }
- }
- }
}
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestDatabendDatabaseMetaData.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestDatabendDatabaseMetaData.java
index 97f6b848..2362fcd0 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestDatabendDatabaseMetaData.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestDatabendDatabaseMetaData.java
@@ -72,14 +72,19 @@ public void setUp()
// json data
}
- @Test(groups = {"UNIT"})
+ @Test(groups = {"IT"})
public void testVersion() throws SQLException {
try (Connection c = Utils.createConnection()) {
DatabaseMetaData metaData = c.getMetaData();
int major = metaData.getDriverMajorVersion();
int minor = metaData.getDriverMinorVersion();
- assertEquals(major, 0);
- assertEquals(minor, 4);
+ if (Compatibility.driverVersion != null) {
+ assertEquals(major, (int) Compatibility.driverVersion.getMajor());
+ assertEquals(minor, (int) Compatibility.driverVersion.getMinor());
+ } else {
+ assertEquals(major, 0);
+ assertEquals(minor, 4);
+ }
}
}
@@ -110,6 +115,15 @@ public void testGetDatabaseProductVersion()
int minorVersion = metaData.getDatabaseMinorVersion();
String checkVersion = String.format("v%.1f.%d", majorVersion, minorVersion);
Assert.assertTrue(metaData.getDatabaseProductVersion().contains(checkVersion));
+
+ if (Compatibility.serverCapability.streamingLoad && Compatibility.driverCapability.streamingLoad) {
+ DatabendConnection conn = connection.unwrap(DatabendConnection.class);
+ if (conn.getServerVersion() != null) {
+ String semver = "v" + conn.getServerVersion().toString();
+ Assert.assertTrue(semver.startsWith(checkVersion), semver);
+ Assert.assertNotNull(conn.getServerCapability());
+ }
+ }
}
}
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestDatabendDriverUri.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestDatabendDriverUri.java
index d7bfed37..3bf0a26e 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestDatabendDriverUri.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestDatabendDriverUri.java
@@ -7,6 +7,8 @@
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Properties;
@Test(timeOut = 10000)
@@ -254,7 +256,7 @@ public void TestSetSchema() throws SQLException {
public void TestSetSessionSettings() throws SQLException {
Properties props = new Properties();
// set session settings
- props.setProperty("session_settings", "key1=value1,key2=value2");
+ props.setProperty("session_settings", "max_threads=1,query_tag=tag1");
props.setProperty("user", "databend");
props.setProperty("password", "databend");
DatabendConnection connection = (DatabendConnection) Utils.createConnection("default", props);
@@ -262,15 +264,13 @@ public void TestSetSessionSettings() throws SQLException {
Statement statement = connection.createStatement();
statement.execute("show settings");
ResultSet r = statement.getResultSet();
+ Map settings = new HashMap<>();
while (r.next()) {
- String name = r.getString("name");
- String value = r.getString("value");
- if (name.equals("key1")) {
- Assert.assertEquals(value, "value1");
- } else if (name.equals("key2")) {
- Assert.assertEquals(value, "value2");
- }
+ settings.put(r.getString("name"), r.getString("value"));
}
+ Assert.assertEquals(settings.get("max_threads"), "1");
+ Assert.assertEquals(settings.get("query_tag"), "tag1");
+
} catch (SQLException e) {
throw new RuntimeException(e);
} finally {
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestFileTransfer.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestFileTransfer.java
index d4410b21..e1017656 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestFileTransfer.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestFileTransfer.java
@@ -6,6 +6,7 @@
import de.siegmar.fastcsv.writer.LineDelimiter;
import okhttp3.OkHttpClient;
import org.testng.Assert;
+import org.testng.SkipException;
import org.testng.annotations.BeforeTest;
import org.testng.annotations.Test;
@@ -25,7 +26,7 @@ private static byte[] streamToByteArray(InputStream stream) throws IOException {
byte[] buffer = new byte[1024];
ByteArrayOutputStream os = new ByteArrayOutputStream();
- int line = 0;
+ int line;
// read bytes from stream, and store them in buffer
while ((line = stream.read(buffer)) != -1) {
// Writes bytes from byte array (buffer) into output stream.
@@ -40,10 +41,11 @@ private static byte[] streamToByteArray(InputStream stream) throws IOException {
public void setUp()
throws SQLException {
// create table
- Connection c = Utils.createConnection();
+ try (Connection c = Utils.createConnection()) {
- c.createStatement().execute("drop table if exists copy_into");
- c.createStatement().execute("CREATE TABLE IF NOT EXISTS copy_into (i int, a Variant, b string) ENGINE = FUSE");
+ c.createStatement().execute("drop table if exists copy_into");
+ c.createStatement().execute("CREATE TABLE IF NOT EXISTS copy_into (i int, a Variant, b string) ENGINE = FUSE");
+ }
}
// generate a csv file in a temp directory with given lines, return absolute path of the generated csv
@@ -98,7 +100,7 @@ private String generateRandomCSVComplex(int lines) {
FileWriter writer = new FileWriter(csvPath);
CsvWriter w = CsvWriter.builder().quoteCharacter('"').lineDelimiter(LineDelimiter.LF).build(writer);
for (int i = 0; i < lines; i++) {
- w.writeRow("1", "{\"str_col\": 1, \"int_col\": 2}", "c");
+ w.writeRow(String.valueOf(i), "{\"str_col\": 1, \"int_col\": 2}", "c");
}
writer.close();
} catch (Exception e) {
@@ -113,8 +115,8 @@ public void testFileTransfer()
String filePath = generateRandomCSV(10000);
File f = new File(filePath);
InputStream downloaded = null;
- try (FileInputStream fileInputStream = new FileInputStream(f)) {
- Connection connection = Utils.createConnection();
+ try (FileInputStream fileInputStream = new FileInputStream(f);
+ Connection connection = Utils.createConnection()) {
String stageName = "test_stage";
DatabendConnection databendConnection = connection.unwrap(DatabendConnection.class);
PresignContext.createStageIfNotExists(databendConnection, stageName);
@@ -131,14 +133,15 @@ public void testFileTransfer()
}
}
- @Test(groups = {"LOCAL"})
+ @Test(groups = {"IT"})
public void testFileTransferThroughAPI() {
String filePath = generateRandomCSV(100000);
File f = new File(filePath);
- try (InputStream fileInputStream = Files.newInputStream(f.toPath())) {
+ try (InputStream fileInputStream = Files.newInputStream(f.toPath());
+ Connection connection = Utils.createConnectionWithPresignedUrlDisable()) {
Logger.getLogger(OkHttpClient.class.getName()).setLevel(Level.ALL);
- Connection connection = Utils.createConnectionWithPresignedUrlDisable();
+
String stageName = "test_stage";
DatabendConnection databendConnection = connection.unwrap(DatabendConnection.class);
PresignContext.createStageIfNotExists(databendConnection, stageName);
@@ -176,4 +179,49 @@ public void testCopyInto() {
throw new RuntimeException(e);
}
}
+
+ @Test(groups = {"IT"})
+ public void testLoadStreamToTableWithStage() {
+ testLoadStreamToTableInner("stage");
+ }
+
+ @Test(groups = {"IT"})
+ public void testLoadStreamToTableWithStreaming() {
+ testLoadStreamToTableInner("streaming");
+ }
+
+ public void testLoadStreamToTableInner(String method) {
+ if (!Compatibility.driverCapability.streamingLoad) {
+ System.out.println("Skip testLoadStreamToTableInner: driver version too low");
+ return;
+ }
+ if (!Compatibility.serverCapability.streamingLoad) {
+ System.out.println("Skip testLoadStreamToTableInner: server version too low");
+ return;
+ }
+ System.out.println("testLoadStreamToTableInner " + method);
+ String filePath = generateRandomCSVComplex(10);
+ File f = new File(filePath);
+ try (FileInputStream fileInputStream = new FileInputStream(f);
+ Connection connection = Utils.createConnectionWithPresignedUrlDisable();
+ Statement statement = connection.createStatement()) {
+ statement.execute("create or replace database test_load");
+ statement.execute("use test_load");
+ statement.execute("create or replace table test_load(i int, a Variant, b string)");
+ DatabendConnection databendConnection = connection.unwrap(DatabendConnection.class);
+ String sql = "insert into test_load from @_databend_load file_format=(type=csv)";
+ int nUpdate = databendConnection.loadStreamToTable(sql, fileInputStream, f.length(), method);
+ Assert.assertEquals(nUpdate, 10);
+ fileInputStream.close();
+ ResultSet r = statement.executeQuery("SELECT * FROM test_load");
+ int n = 0;
+ while (r.next()) {
+ Assert.assertEquals(r.getInt(1), n);
+ n += 1;
+ }
+ Assert.assertEquals(10, n);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
}
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestGeometry.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestGeometry.java
new file mode 100644
index 00000000..e82367af
--- /dev/null
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestGeometry.java
@@ -0,0 +1,42 @@
+package com.databend.jdbc;
+
+import org.locationtech.jts.geom.Geometry;
+import org.locationtech.jts.io.ParseException;
+import org.locationtech.jts.io.WKBReader;
+import org.testng.Assert;
+import org.testng.annotations.Test;
+
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+public class TestGeometry {
+ @Test(groups = {"IT"})
+ public void testSelectGeometry() throws SQLException, ParseException {
+ try (Connection connection = Utils.createConnection();
+ Statement statement = connection.createStatement()
+ ) {
+ statement.execute("set enable_geo_create_table=1");
+ statement.execute("CREATE or replace table cities ( id INT, name VARCHAR NOT NULL, location GEOMETRY);");
+ statement.execute("INSERT INTO cities (id, name, location) VALUES (1, 'New York', 'POINT (-73.935242 40.73061))');");
+ statement.execute("INSERT INTO cities (id, name, location) VALUES (2, 'Null', null);");
+ try (ResultSet r = statement.executeQuery("select location from cities order by id")) {
+ r.next();
+ Assert.assertEquals("{\"type\": \"Point\", \"coordinates\": [-73.935242,40.73061]}", r.getObject(1));
+ r.next();
+ Assert.assertNull(r.getObject(1));
+ }
+
+ // set geometry_output_format to wkb
+ connection.createStatement().execute("set geometry_output_format='WKB'");
+ try (ResultSet r = statement.executeQuery("select location from cities order by id")) {
+ r.next();
+ byte[] wkb = r.getBytes(1);
+ WKBReader wkbReader = new WKBReader();
+ Geometry geometry = wkbReader.read(wkb);
+ Assert.assertEquals("POINT (-73.935242 40.73061)", geometry.toText());
+ }
+ }
+ }
+}
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestMultiHost.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestMultiHost.java
index 852e4bf2..b33ff7bd 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestMultiHost.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestMultiHost.java
@@ -11,15 +11,16 @@
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
public class TestMultiHost {
- private final String DEFAULT_JDBC_URL = "jdbc:databend://localhost:8000,localhost:8002,localhost:8003/default";
- private final String RANDOM_JDBC_URL = "jdbc:databend://localhost:8000,localhost:8002,localhost:8003/default?load_balancing_policy=random";
- private final String ROUND_ROBIN_JDBC_URL = "jdbc:databend://localhost:8000,localhost:8002,localhost:8003/default?load_balancing_policy=round_robin";
- private final String FAIL_OVER_JDBC_URL = "jdbc:databend://localhost:7222,localhost:7223,localhost:7224,localhost:8000/default?load_balancing_policy=round_robin&max_failover_retry=4";
- private final String AUTO_DISCOVERY_JDBC_URL = "jdbc:databend://localhost:8000/default?load_balancing_policy=round_robin&auto_discovery=true";
- private final String UNSUPPORT_AUTO_DISCOVERY_JDBC_URL = "jdbc:databend://localhost:8000/default?load_balancing_policy=round_robin&auto_discovery=true&enable_mock=true";
+ private final String DEFAULT_JDBC_URL = "jdbc:databend://localhost:8001,localhost:8002,localhost:8003/default";
+ private final String RANDOM_JDBC_URL = "jdbc:databend://localhost:8001,localhost:8002,localhost:8003/default?load_balancing_policy=random";
+ private final String ROUND_ROBIN_JDBC_URL = "jdbc:databend://localhost:8001,localhost:8002,localhost:8003/default?load_balancing_policy=round_robin";
+ private final String FAIL_OVER_JDBC_URL = "jdbc:databend://localhost:7222,localhost:7223,localhost:7224,localhost:8001/default?load_balancing_policy=round_robin&max_failover_retry=4";
+ private final String AUTO_DISCOVERY_JDBC_URL = "jdbc:databend://localhost:8001/default?load_balancing_policy=round_robin&auto_discovery=true";
+ private final String UNSUPPORT_AUTO_DISCOVERY_JDBC_URL = "jdbc:databend://localhost:8001/default?load_balancing_policy=round_robin&auto_discovery=true&enable_mock=true";
private Connection createConnection(String url)
@@ -27,103 +28,42 @@ private Connection createConnection(String url)
return DriverManager.getConnection(url, "databend", "databend");
}
- @Test(groups = {"IT", "CLUSTER"})
+ @Test(groups = {"IT", "MULTI_HOST"})
public void testDefaultLoadBalancing()
throws SQLException {
- // try to connect with three nodes 1000 times and count for each node
- int node8000 = 0;
- int node8002 = 0;
- int node8003 = 0;
- int unknown = 0;
- for (int i = 0; i < 100; i++) {
- try (Connection connection = createConnection(DEFAULT_JDBC_URL)) {
- DatabendStatement statement = (DatabendStatement) connection.createStatement();
- statement.execute("select value from system.configs where name = 'http_handler_port';");
- ResultSet r = statement.getResultSet();
- r.next();
- if (r.getInt(1) == 8000) {
- node8000++;
- } else if (r.getInt(1) == 8002) {
- node8002++;
- } else if (r.getInt(1) == 8003) {
- node8003++;
- } else {
- unknown++;
- }
- }
- }
- Assert.assertEquals(node8000, 100);
- Assert.assertEquals(node8002, 0);
- Assert.assertEquals(node8003, 0);
- Assert.assertEquals(unknown, 0);
+ HashMap expect = new HashMap<>();
+ expect.put(8001, 90);
+
+ HashMap actual = get_hosts_used(DEFAULT_JDBC_URL);
+ Assert.assertEquals(expect, actual);
}
- @Test(groups = {"IT", "CLUSTER"})
+ @Test(groups = {"IT", "MULTI_HOST"})
public void testRandomLoadBalancing()
throws SQLException {
- // try to connect with three nodes 1000 times and count for each node
- int node8000 = 0;
- int node8002 = 0;
- int node8003 = 0;
- int unknown = 0;
- for (int i = 0; i < 100; i++) {
- try (Connection connection = createConnection(RANDOM_JDBC_URL)) {
- DatabendStatement statement = (DatabendStatement) connection.createStatement();
- statement.execute("select value from system.configs where name = 'http_handler_port';");
- ResultSet r = statement.getResultSet();
- r.next();
- if (r.getInt(1) == 8000) {
- node8000++;
- } else if (r.getInt(1) == 8002) {
- node8002++;
- } else if (r.getInt(1) == 8003) {
- node8003++;
- } else {
- unknown++;
- }
- }
- }
- Assert.assertTrue(node8000 > 0 && node8002 > 0 && node8003 > 0);
- Assert.assertEquals(unknown, 0);
- Assert.assertEquals(node8000 + node8002 + node8003, 100);
+ HashMap actual = get_hosts_used(RANDOM_JDBC_URL);
+
+ int node8001 = actual.get(8001);
+ int node8002 = actual.get(8002);
+ int node8003 = actual.get(8003);
+
+ Assert.assertTrue(node8001 > 0 && node8002 > 0 && node8003 > 0, "got " + actual);
+ Assert.assertEquals(node8001 + node8002 + node8003, 90, "got " + actual);
}
- @Test(groups = {"IT", "CLUSTER"})
+ @Test(groups = {"IT", "MULTI_HOST"})
public void testRoundRobinLoadBalancing()
throws SQLException {
- // try to connect with three nodes 1000 times and count for each node
- int node8000 = 0;
- int node8002 = 0;
- int node8003 = 0;
- int unknown = 0;
- for (int i = 0; i < 30; i++) {
- try (Connection connection = createConnection(ROUND_ROBIN_JDBC_URL)) {
- DatabendStatement statement = (DatabendStatement) connection.createStatement();
- // remove the effect setup commands
- for (int j = 0; j < 3; j++) {
- statement.execute("select value from system.configs where name = 'http_handler_port';");
- ResultSet r = statement.getResultSet();
- r.next();
- if (r.getInt(1) == 8000) {
- node8000++;
- } else if (r.getInt(1) == 8002) {
- node8002++;
- } else if (r.getInt(1) == 8003) {
- node8003++;
- } else {
- unknown++;
- }
- }
- }
- }
- Assert.assertEquals(node8000, 30);
- Assert.assertEquals(node8002, 30);
- Assert.assertEquals(node8003, 30);
- Assert.assertEquals(unknown, 0);
- Assert.assertEquals(node8000 + node8002 + node8003, 90);
+ HashMap expect = new HashMap<>();
+ expect.put(8001, 30);
+ expect.put(8002, 30);
+ expect.put(8003, 30);
+
+ HashMap actual = get_hosts_used(ROUND_ROBIN_JDBC_URL);
+ Assert.assertEquals(expect, actual);
}
- @Test(groups = {"IT", "CLUSTER"})
+ @Test(groups = {"IT", "MULTI_HOST"})
public void testRoundRobinTransaction()
throws SQLException {
// try to connect with three nodes 1000 times and count for each node
@@ -156,82 +96,33 @@ public void testRoundRobinTransaction()
Assert.assertEquals(count, 30);
}
}
-
- @Test(groups = {"IT", "CLUSTER"})
+ // @Test(groups = {"IT", "MULTI_HOST"})
+ // skip since getConnection not support multihost for now
public void testFailOver()
throws SQLException {
- // try connect with three nodes 1000 times and count for each node
- int node8000 = 0;
- int node8002 = 0;
- int node8003 = 0;
- int unknown = 0;
- for (int i = 0; i < 30; i++) {
- try (Connection connection = createConnection(FAIL_OVER_JDBC_URL)) {
- DatabendStatement statement = (DatabendStatement) connection.createStatement();
- // remove the effect setup commands
- for (int j = 0; j < 3; j++) {
- statement.execute("select value from system.configs where name = 'http_handler_port';");
- ResultSet r = statement.getResultSet();
- r.next();
- if (r.getInt(1) == 8000) {
- node8000++;
- } else if (r.getInt(1) == 8002) {
- node8002++;
- } else if (r.getInt(1) == 8003) {
- node8003++;
- } else {
- unknown++;
- }
- }
- }
- }
+ HashMap expect = new HashMap<>();
+ expect.put(8001, 90);
- Assert.assertEquals(node8000, 90);
- Assert.assertEquals(unknown, 0);
- Assert.assertEquals(node8000 + node8002 + node8003, 90);
+ HashMap actual = get_hosts_used(FAIL_OVER_JDBC_URL);
+ Assert.assertEquals(expect, actual);
}
- @Test(groups = {"IT", "CLUSTER"})
+ @Test(groups = {"IT", "MULTI_HOST"})
public void testAutoDiscovery()
throws SQLException {
- // try connect with three nodes 1000 times and count for each node
- int node8000 = 0;
- int node8002 = 0;
- int node8003 = 0;
- int unknown = 0;
- try (Connection connection = createConnection(AUTO_DISCOVERY_JDBC_URL)) {
- for (int i = 0; i < 30; i++) {
- DatabendStatement statement = (DatabendStatement) connection.createStatement();
- // remove the effect setup commands
- for (int j = 0; j < 3; j++) {
- statement.execute("select value from system.configs where name = 'http_handler_port';");
- ResultSet r = statement.getResultSet();
- r.next();
- if (r.getInt(1) == 8000) {
- node8000++;
- } else if (r.getInt(1) == 8002) {
- node8002++;
- } else if (r.getInt(1) == 8003) {
- node8003++;
- } else {
- unknown++;
- }
- }
- }
- }
+ HashMap expect = new HashMap<>();
+ expect.put(8001, 31);
+ expect.put(8002, 30);
+ expect.put(8003, 29);
- Assert.assertEquals(node8000, 31);
- Assert.assertEquals(node8002, 30);
- Assert.assertEquals(node8003, 29);
- Assert.assertEquals(unknown, 0);
- Assert.assertEquals(node8000 + node8002 + node8003, 90);
+ HashMap actual = get_hosts_used(AUTO_DISCOVERY_JDBC_URL);
+ Assert.assertEquals(expect, actual);
}
- @Test(groups = {"IT", "CLUSTER"})
+ @Test(groups = {"IT", "MULTI_HOST"})
public void testUnSupportedAutoDiscovery()
throws SQLException {
try (Connection connection = createConnection(UNSUPPORT_AUTO_DISCOVERY_JDBC_URL)) {
-
DatabendStatement statement = (DatabendStatement) connection.createStatement();
statement.execute("select value from system.configs where name = 'http_handler_port';");
ResultSet r = statement.getResultSet();
@@ -239,27 +130,41 @@ public void testUnSupportedAutoDiscovery()
DatabendConnection dbc = (DatabendConnection) connection;
// automatically
Assert.assertFalse(dbc.isAutoDiscovery());
- } catch (SQLException e) {
- // there should be no exception
- Assert.fail("Should not throw exception");
}
}
@Test(groups = {"UNIT"})
public void testAutoDiscoveryUriParsing() throws SQLException {
- DatabendDriverUri uri = DatabendDriverUri.create("jdbc:databend://localhost:8000?ssl=true", null);
- DatabendDriverUri uri2 = DatabendDriverUri.create("jdbc:databend://127.0.0.1:8000,127.0.0.1:8002,127.0.0.1:8003?ssl=true", null);
+ DatabendDriverUri uri = DatabendDriverUri.create("jdbc:databend://localhost:8001?ssl=true", null);
+ DatabendDriverUri uri2 = DatabendDriverUri.create("jdbc:databend://127.0.0.1:8001,127.0.0.1:8002,127.0.0.1:8003?ssl=true", null);
List uris2 = uri2.getNodes().getUris();
DatabendNodes nodes = uri.getNodes();
List discoveryNodes = new ArrayList<>();
- discoveryNodes.add(DiscoveryNode.create("127.0.0.1:8000"));
+ discoveryNodes.add(DiscoveryNode.create("127.0.0.1:8001"));
discoveryNodes.add(DiscoveryNode.create("127.0.0.1:8002"));
discoveryNodes.add(DiscoveryNode.create("127.0.0.1:8003"));
List uris = nodes.parseURI(discoveryNodes);
Assert.assertEquals(uris.size(), 3);
Assert.assertEquals(uris2.size(), 3);
Assert.assertEquals(uris2, uris);
+ }
+ private HashMap get_hosts_used(String dsn) throws SQLException {
+ HashMap ports = new HashMap<>();
+ try (Connection connection = createConnection(dsn)) {
+ for (int i = 0; i < 30; i++) {
+ DatabendStatement statement = (DatabendStatement) connection.createStatement();
+ // remove the effect setup commands
+ for (int j = 0; j < 3; j++) {
+ statement.execute("select value from system.configs where name = 'http_handler_port';");
+ ResultSet r = statement.getResultSet();
+ r.next();
+ int p = r.getInt(1);
+ ports.merge(p, 1, Integer::sum);
+ }
+ }
+ }
+ return ports;
}
}
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestPrepareStatement.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestPrepareStatement.java
index d1057f8b..983a11d1 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestPrepareStatement.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestPrepareStatement.java
@@ -3,6 +3,7 @@
import com.databend.client.StageAttachment;
import org.testng.Assert;
import org.testng.annotations.BeforeTest;
+import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.sql.Connection;
@@ -17,6 +18,7 @@
import java.util.List;
import java.util.Properties;
+
public class TestPrepareStatement {
@BeforeTest
public void setUp()
@@ -24,78 +26,76 @@ public void setUp()
// create table
Connection c = Utils.createConnection();
System.out.println("-----------------");
- System.out.println("drop all existing test table");
+ c.createStatement().execute("create database if not exists test_prepare_statement");
+
c.createStatement().execute("drop table if exists test_prepare_statement");
- c.createStatement().execute("drop table if exists test_prepare_time");
- c.createStatement().execute("drop table if exists objects_test1");
- c.createStatement().execute("drop table if exists binary1");
- c.createStatement().execute("drop table if exists test_prepare_statement_null");
c.createStatement().execute("create table test_prepare_statement (a int, b string)");
- c.createStatement().execute("create table test_prepare_statement_null (a int, b string)");
- c.createStatement().execute("create table test_prepare_time(a DATE, b TIMESTAMP)");
- // json data
- c.createStatement().execute(
- "CREATE TABLE IF NOT EXISTS objects_test1(id TINYINT, obj VARIANT, d TIMESTAMP, s String, arr ARRAY(INT64)) Engine = Fuse");
- // Binary data
- c.createStatement().execute("create table IF NOT EXISTS binary1 (a binary);");
}
@Test(groups = "IT")
public void TestBatchInsert() throws SQLException {
Connection c = Utils.createConnection();
c.setAutoCommit(false);
+ Statement s = c.createStatement();
+ s.execute("use test_prepare_statement");
+ s.execute("create or replace table batch_insert (a int, b string)");
- PreparedStatement ps = c.prepareStatement("insert into test_prepare_statement values");
- ps.setInt(1, 1);
- ps.setString(2, "a");
- ps.addBatch();
- ps.setInt(1, 2);
- ps.setString(2, "b");
- ps.addBatch();
- System.out.println("execute batch insert");
+ int[] c1 = {1, 2};
+ String[] c2 = {"a", "b"};
+
+ PreparedStatement ps = c.prepareStatement("insert into batch_insert values");
+ for (int i = 0; i < c1.length; i++) {
+ ps.setInt(1, c1[i]);
+ ps.setString(2, c2[i]);
+ ps.addBatch();
+ }
int[] ans = ps.executeBatch();
- Assert.assertEquals(ans.length, 2);
- Assert.assertEquals(ans[0], 1);
- Assert.assertEquals(ans[1], 1);
- Statement statement = c.createStatement();
+ Assert.assertEquals(ans, new int[] {1, 1});
- System.out.println("execute select");
- statement.execute("SELECT * from test_prepare_statement");
- ResultSet r = statement.getResultSet();
+ s.execute("SELECT * from batch_insert");
+ ResultSet r = s.getResultSet();
- while (r.next()) {
- System.out.println(r.getInt(1));
- System.out.println(r.getString(2));
+ for (int i = 0; i < c1.length; i++) {
+ Assert.assertTrue(r.next());
+ Assert.assertEquals(r.getInt(1), c1[i]);
+ Assert.assertEquals(r.getString(2), c2[i]);
}
+ Assert.assertFalse(r.next());
}
@Test(groups = "IT")
public void TestBatchInsertWithNULL() throws SQLException {
Connection c = Utils.createConnection();
c.setAutoCommit(false);
+ Statement s = c.createStatement();
+ s.execute("use test_prepare_statement");
+ s.execute("create or replace table batch_insert_null (a int, b string)");
+
+
+ PreparedStatement ps = c.prepareStatement("insert into batch_insert_null values");
- PreparedStatement ps = c.prepareStatement("insert into test_prepare_statement_null values");
ps.setInt(1, 1);
ps.setNull(2, Types.NULL);
ps.addBatch();
+
ps.setInt(1, 2);
ps.setObject(2, null, Types.NULL);
ps.addBatch();
- System.out.println("execute batch insert");
+
int[] ans = ps.executeBatch();
- Assert.assertEquals(ans.length, 2);
- Assert.assertEquals(ans[0], 1);
- Assert.assertEquals(ans[1], 1);
- Statement statement = c.createStatement();
+ Assert.assertEquals(ans, new int[] {1, 1});
- System.out.println("execute select");
- statement.execute("SELECT * from test_prepare_statement_null");
+ Statement statement = c.createStatement();
+ statement.execute("SELECT * from batch_insert_null");
ResultSet r = statement.getResultSet();
- while (r.next()) {
- System.out.println(r.getInt(1));
- Assert.assertEquals(r.getObject(2), null);
+ int[] c1 = {1, 2};
+ for (int j : c1) {
+ Assert.assertTrue(r.next());
+ Assert.assertEquals(r.getInt(1), j);
+ Assert.assertNull(r.getString(2));
}
+ Assert.assertFalse(r.next());
}
@Test(groups = "IT")
@@ -126,154 +126,126 @@ public void TestConvertSQLWithBatchValues() throws SQLException {
@Test(groups = "IT")
public void TestBatchDelete() throws SQLException {
- Connection c = Utils.createConnection();
- c.setAutoCommit(false);
-
- PreparedStatement ps = c.prepareStatement("insert into test_prepare_statement values");
- ps.setInt(1, 1);
- ps.setString(2, "b");
- ps.addBatch();
- ps.setInt(1, 3);
- ps.setString(2, "b");
- ps.addBatch();
- System.out.println("execute batch insert");
- int[] ans = ps.executeBatch();
- Assert.assertEquals(ans.length, 2);
- Assert.assertEquals(ans[0], 1);
- Assert.assertEquals(ans[1], 1);
- Statement statement = c.createStatement();
-
- System.out.println("execute select");
- statement.execute("SELECT * from test_prepare_statement");
- ResultSet r = statement.getResultSet();
-
- while (r.next()) {
- System.out.println(r.getInt(1));
- System.out.println(r.getString(2));
- }
+ try ( Connection c = Utils.createConnection();
+ Statement statement = c.createStatement()
+ ) {
+ c.setAutoCommit(false);
+ c.createStatement().execute("create or replace table test_batch_delete(a int, b string)");
+
+ int[] c1 = {1, 3};
+ String[] c2 = {"b", "b"};
+
+ PreparedStatement ps = c.prepareStatement("insert into test_batch_delete values");
+ for (int i = 0; i < c1.length; i++) {
+ ps.setInt(1, c1[i]);
+ ps.setString(2, c2[i]);
+ ps.addBatch();
+ }
+ Assert.assertEquals(ps.executeBatch(), new int[] {1, 1});
- PreparedStatement deletePs = c.prepareStatement("delete from test_prepare_statement where a = ?");
- deletePs.setInt(1, 1);
- deletePs.addBatch();
- int[] ansDel = deletePs.executeBatch();
- System.out.println(ansDel);
+ statement.execute("SELECT * from test_batch_delete");
+ ResultSet r = statement.getResultSet();
- System.out.println("execute select");
- statement.execute("SELECT * from test_prepare_statement");
- ResultSet r1 = statement.getResultSet();
+ for (int i = 0; i < c1.length; i++) {
+ Assert.assertTrue(r.next());
+ Assert.assertEquals(r.getInt(1), c1[i]);
+ Assert.assertEquals(r.getString(2), c2[i]);
+ }
- int resultCount = 0;
- while (r1.next()) {
- resultCount += 1;
+ PreparedStatement deletePs = c.prepareStatement("delete from test_batch_delete where a = ?");
+ deletePs.setInt(1, 1);
+ deletePs.addBatch();
+ int[] ansDel = deletePs.executeBatch();
+ Assert.assertEquals(ansDel.length, 1);
+ // todo: fix this, currently == 0
+ // Assert.assertEquals(ansDel[0], 1);
+
+ System.out.println("execute select");
+ statement.execute("SELECT * from test_batch_delete");
+ ResultSet r1 = statement.getResultSet();
+
+ int resultCount = 0;
+ while (r1.next()) {
+ resultCount += 1;
+ }
+ Assert.assertEquals(resultCount, 1);
}
- Assert.assertEquals(resultCount, 1);
}
@Test(groups = "IT")
public void TestBatchInsertWithTime() throws SQLException {
Connection c = Utils.createConnection();
+ Statement s = c.createStatement();
+ s.execute("create or replace table test_prepare_time(a DATE, b TIMESTAMP)");
c.setAutoCommit(false);
- PreparedStatement ps = c.prepareStatement("insert into test_prepare_time values");
- ps.setDate(1, Date.valueOf("2020-01-10"));
- ps.setTimestamp(2, Timestamp.valueOf("1983-07-12 21:30:55.888"));
- ps.addBatch();
- ps.setDate(1, Date.valueOf("1970-01-01"));
- ps.setTimestamp(2, Timestamp.valueOf("1970-01-01 00:00:01"));
- ps.addBatch();
- ps.setDate(1, Date.valueOf("2021-01-01"));
- ps.setTimestamp(2, Timestamp.valueOf("1970-01-01 00:00:01.234"));
- int[] ans = ps.executeBatch();
- Statement statement = c.createStatement();
- System.out.println("execute select on time");
- statement.execute("SELECT * from test_prepare_time");
- ResultSet r = statement.getResultSet();
+ java.sql.Date[] c1 = {Date.valueOf("2020-01-10"), Date.valueOf("1970-01-01"), Date.valueOf("2021-01-01")};
+ Timestamp[] c2 = {Timestamp.valueOf("1983-07-12 21:30:55.888"), Timestamp.valueOf("1970-01-01 00:00:01"), Timestamp.valueOf("1970-01-01 00:00:01.234")};
- while (r.next()) {
- System.out.println(r.getDate(1).toString());
- System.out.println(r.getTimestamp(2).toString());
+ PreparedStatement ps = c.prepareStatement("insert into test_prepare_time values");
+ for (int i = 0; i < c1.length; i++) {
+ ps.setDate(1, c1[i]);
+ ps.setTimestamp(2, c2[i]);
+ ps.addBatch();
}
- }
+ Assert.assertEquals(ps.executeBatch(), new int[] {1, 1, 1});
- @Test(groups = "IT")
- public void TestBatchInsertWithComplexDataType() throws SQLException {
- Connection c = Utils.createConnection();
- c.setAutoCommit(false);
- PreparedStatement ps = c.prepareStatement("insert into objects_test1 values");
- ps.setInt(1, 1);
- ps.setString(2, "{\"a\": 1,\"b\": 2}");
- ps.setTimestamp(3, Timestamp.valueOf("1983-07-12 21:30:55.888"));
- ps.setString(4, "hello world, 你好");
- ps.setString(5, "[1,2,3,4,5]");
- ps.addBatch();
- int[] ans = ps.executeBatch();
- Statement statement = c.createStatement();
-
- System.out.println("execute select on object");
- statement.execute("SELECT * from objects_test1");
- ResultSet r = statement.getResultSet();
+ s.execute("SELECT * from test_prepare_time");
+ ResultSet r = s.getResultSet();
- while (r.next()) {
- System.out.println(r.getInt(1));
- System.out.println(r.getString(2));
- System.out.println(r.getTimestamp(3).toString());
- System.out.println(r.getString(4));
- System.out.println(r.getString(5));
+ for (int i = 0; i < c1.length; i++) {
+ Assert.assertTrue(r.next());
+ Assert.assertEquals(r.getDate(1), c1[i]);
+ Assert.assertEquals(r.getTimestamp(2), c2[i]);
}
+ Assert.assertFalse(r.next());
}
- @Test(groups = "IT")
- public void TestBatchInsertWithComplexDataTypeWithPresignAPI() throws SQLException {
- Connection c = Utils.createConnection();
- c.setAutoCommit(false);
- PreparedStatement ps = c.prepareStatement("insert into objects_test1 values");
- ps.setInt(1, 1);
- ps.setString(2, "{\"a\": 1,\"b\": 2}");
- ps.setTimestamp(3, Timestamp.valueOf("1983-07-12 21:30:55.888"));
- ps.setString(4, "hello world, 你好");
- ps.setString(5, "[1,2,3,4,5]");
- ps.addBatch();
- int[] ans = ps.executeBatch();
- Statement statement = c.createStatement();
-
- System.out.println("execute select on object");
- statement.execute("SELECT * from objects_test1");
- ResultSet r = statement.getResultSet();
-
- while (r.next()) {
- System.out.println(r.getInt(1));
- System.out.println(r.getString(2));
- System.out.println(r.getTimestamp(3).toString());
- System.out.println(r.getString(4));
- System.out.println(r.getString(5));
- }
+ @DataProvider(name = "complexDataType")
+ private Object[][] provideTestData() {
+ return new Object[][] {
+ {true, false},
+ {true, true},
+ {false, false},
+ };
}
- @Test(groups = "IT")
- public void TestBatchInsertWithComplexDataTypeWithPresignAPIPlaceHolder() throws SQLException {
- Connection c = Utils.createConnection();
- c.setAutoCommit(false);
- PreparedStatement ps = c.prepareStatement("insert into objects_test1 values(?,?,?,?,?)");
- for (int i = 0; i < 500000; i++) {
- ps.setInt(1, 2);
+ @Test(groups = "IT", dataProvider = "complexDataType")
+ public void TestBatchInsertWithComplexDataType(boolean presigned, boolean placeholder) throws SQLException {
+ String tableName = String.format("test_object_%s_%s", presigned, placeholder).toLowerCase();
+ try (Connection c = presigned ? Utils.createConnection() : Utils.createConnectionWithPresignedUrlDisable();
+ Statement s = c.createStatement()
+ ) {
+ c.setAutoCommit(false);
+ String createTableSQL = String.format(
+ "CREATE OR replace table test_prepare_statement.%s(id TINYINT, obj VARIANT, d TIMESTAMP, s String, arr ARRAY(INT64)) Engine = Fuse"
+ , tableName);
+ s.execute(createTableSQL);
+ String insertSQL = String.format("insert into test_prepare_statement.%s values %s", tableName, placeholder ? "(?,?,?,?,?)" : "");
+
+ PreparedStatement ps = c.prepareStatement(insertSQL);
+ ps.setInt(1, 1);
ps.setString(2, "{\"a\": 1,\"b\": 2}");
ps.setTimestamp(3, Timestamp.valueOf("1983-07-12 21:30:55.888"));
ps.setString(4, "hello world, 你好");
ps.setString(5, "[1,2,3,4,5]");
ps.addBatch();
- }
+ int[] ans = ps.executeBatch();
+ Assert.assertEquals(ans.length, 1);
+ Assert.assertEquals(ans[0], 1);
- int[] ans = ps.executeBatch();
- Statement statement = c.createStatement();
+ s.execute(String.format("SELECT * from test_prepare_statement.%s", tableName));
+ ResultSet r = s.getResultSet();
- System.out.println("execute select on object");
- statement.execute("SELECT * from objects_test1");
- ResultSet r = statement.getResultSet();
- int count = 0;
- while (r.next()) {
- count++;
+ Assert.assertTrue(r.next());
+ Assert.assertEquals(r.getInt(1), 1);
+ Assert.assertEquals(r.getString(2), "{\"a\":1,\"b\":2}");
+ Assert.assertEquals(Timestamp.valueOf(r.getString(3)), Timestamp.valueOf("1983-07-12 21:30:55.888"));
+ Assert.assertEquals(r.getString(4), "hello world, 你好");
+ Assert.assertEquals(r.getString(5), "[1,2,3,4,5]");
+
+ Assert.assertFalse(r.next());
}
- System.out.println(count);
}
@Test(groups = "IT")
@@ -312,7 +284,7 @@ public void TestBatchReplaceInto() throws SQLException {
c.createStatement().execute("truncate table test_prepare_statement");
}
- @Test
+ @Test(groups = "IT")
public void testPrepareStatementExecute() throws SQLException {
Connection conn = Utils.createConnection();
conn.createStatement().execute("delete from test_prepare_statement");
@@ -357,7 +329,7 @@ public void testPrepareStatementExecute() throws SQLException {
conn.createStatement().execute("truncate table test_prepare_statement");
}
- @Test
+ @Test(groups = "IT")
public void testUpdateSetNull() throws SQLException {
Connection conn = Utils.createConnection();
String sql = "insert into test_prepare_statement values (?,?)";
@@ -396,7 +368,7 @@ public void testUpdateSetNull() throws SQLException {
conn.createStatement().execute("truncate table test_prepare_statement");
}
- @Test
+ @Test(groups = "IT")
public void testUpdateStatement() throws SQLException {
Connection conn = Utils.createConnection();
String sql = "insert into test_prepare_statement values (?,?)";
@@ -430,7 +402,7 @@ public void testUpdateStatement() throws SQLException {
}
}
- @Test
+ @Test(groups = "IT")
public void testAllPreparedStatement() throws SQLException {
String sql = "insert into test_prepare_statement values (?,?)";
Connection conn = Utils.createConnection();
@@ -508,7 +480,7 @@ public void testAllPreparedStatement() throws SQLException {
conn.createStatement().execute("truncate table test_prepare_statement");
}
- @Test
+ @Test(groups = "IT")
public void shouldBuildStageAttachmentWithFileFormatOptions() throws SQLException {
Connection conn = Utils.createConnection();
Assert.assertEquals("", conn.unwrap(DatabendConnection.class).binaryFormat());
@@ -521,7 +493,7 @@ public void shouldBuildStageAttachmentWithFileFormatOptions() throws SQLExceptio
Assert.assertEquals("\\N", stageAttachment.getCopyOptions().get("NULL_DISPLAY"));
}
- @Test
+ @Test(groups = "IT")
public void testSelectWithClusterKey() throws SQLException {
Connection conn = Utils.createConnection();
conn.createStatement().execute("drop table if exists default.test_clusterkey");
@@ -551,7 +523,7 @@ public void testSelectWithClusterKey() throws SQLException {
}
}
- @Test
+ @Test(groups = "IT")
public void testEncodePass() throws SQLException {
Connection conn = Utils.createConnection();
conn.createStatement().execute("create user if not exists 'u01' identified by 'mS%aFRZW*GW';");
@@ -565,7 +537,7 @@ public void testEncodePass() throws SQLException {
conn.createStatement().execute("drop user if exists 'u01'");
}
- @Test
+ @Test(groups = "IT")
public void testExecuteUpdate() throws SQLException {
Connection conn = Utils.createConnection();
conn.createStatement().execute("delete from test_prepare_statement");
@@ -635,47 +607,50 @@ public void testExecuteUpdate() throws SQLException {
conn.createStatement().execute("delete from test_prepare_statement");
}
- @Test
-
+ @Test(groups = "IT")
public void testInsertWithSelect() throws SQLException {
+ if (Compatibility.skipDriverBugLowerThen("0.3.9")) {
+ return;
+ }
Connection conn = Utils.createConnection();
- conn.createStatement().execute("delete from test_prepare_statement");
-
- String insertSql = "insert into test_prepare_statement select a, b from test_prepare_statement where b = ?";
- try (PreparedStatement statement = conn.prepareStatement(insertSql)) {
- statement.setString(1, "a");
- int insertedRows = statement.executeUpdate();
+ Statement statement = conn.createStatement();
+ statement.execute("use test_prepare_statement");
+ statement.execute("create or replace table insert_with_select (a int, b string)");
+
+ String insertSql = "insert into insert_with_select select a, b from insert_with_select where b = ?";
+ try (PreparedStatement ps = conn.prepareStatement(insertSql)) {
+ ps.setString(1, "a");
+ int insertedRows = ps.executeUpdate();
Assert.assertEquals(0, insertedRows, "should not insert any rows as the table is empty");
}
// Insert some data
- String insertDataSql = "insert into test_prepare_statement values (?,?)";
- try (PreparedStatement statement = conn.prepareStatement(insertDataSql)) {
- statement.setInt(1, 1);
- statement.setString(2, "a");
- statement.executeUpdate();
+ String insertDataSql = "insert into insert_with_select values (?,?)";
+ try (PreparedStatement ps = conn.prepareStatement(insertDataSql)) {
+ ps.setInt(1, 1);
+ ps.setString(2, "a");
+ int insertedRows = ps.executeUpdate();
+ Assert.assertEquals(1, insertedRows, "should insert 1 rows");
- statement.setInt(1, 2);
- statement.setString(2, "b");
- statement.executeUpdate();
+ ps.setInt(1, 2);
+ ps.setString(2, "b");
+ insertedRows = ps.executeUpdate();
+ Assert.assertEquals(1, insertedRows, "should insert 1 rows");
}
// Now try to insert again with select
- try (PreparedStatement statement = conn.prepareStatement(insertSql)) {
- statement.setString(1, "a");
- int insertedRows = statement.executeUpdate();
- Assert.assertEquals(1, insertedRows, "should insert two rows from the select");
+ try (PreparedStatement ps = conn.prepareStatement(insertSql)) {
+ ps.setString(1, "a");
+ int insertedRows = ps.executeUpdate();
+ Assert.assertEquals(1, insertedRows, "should insert 1 row from the select");
}
- ResultSet rs = conn.createStatement().executeQuery("select * from test_prepare_statement order by a");
+ ResultSet rs = conn.createStatement().executeQuery("select * from insert_with_select order by a");
int count = 0;
while (rs.next()) {
count++;
}
- Assert.assertEquals(3, count, "should have four rows in the table after insert with select");
-
- // Clean up
- conn.createStatement().execute("delete from test_prepare_statement");
+ Assert.assertEquals(3, count, "should have 3 rows in the table after insert with select");
+ conn.close();
}
-
}
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestStageAttachment.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestStageAttachment.java
index f7a3963a..e700ffff 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestStageAttachment.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestStageAttachment.java
@@ -15,6 +15,5 @@ public void TestStageAttachment() {
StageAttachment attachment = new StageAttachment.Builder().setLocation(stagePath)
.build();
assertEquals("StageAttachment{location=@~/prefix/uuid/test, file_format_options={type=CSV}, copy_options=null}", attachment.toString());
-
}
}
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestStatementUtil.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestStatementUtil.java
index 8ebb82dc..55a2afdc 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestStatementUtil.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestStatementUtil.java
@@ -9,7 +9,7 @@
public class TestStatementUtil {
- @Test
+ @Test(groups = {"UNIT"})
public void testExtractColumnTypes() {
String sql = "insert into non_existing_table ('col2 String, col3 Int8, col1 VARIANT') values (?, ?, ?)";
Map columnTypes = StatementUtil.extractColumnTypes(sql);
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestTempTable.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestTempTable.java
index 73f64ee3..e30d4e1c 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestTempTable.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestTempTable.java
@@ -10,7 +10,7 @@
public class TestTempTable {
- @Test
+ @Test( groups = {"IT"})
public void testTempTable() throws SQLException {
try(Connection c1 = Utils.createConnection()) {
Statement statement= c1.createStatement();
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/TestTransaction.java b/databend-jdbc/src/test/java/com/databend/jdbc/TestTransaction.java
index 4de36170..b404d0e3 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/TestTransaction.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/TestTransaction.java
@@ -28,7 +28,6 @@ public void testCommit() throws SQLException {
Connection c2 = Utils.createConnection();
Connection c3 = Utils.createConnection()
) {
-
c1.createStatement().execute("create or replace table test_txn.table1(i int)");
try (Statement statement = c1.createStatement()) {
@@ -111,9 +110,12 @@ public void testConflict() throws SQLException {
java.sql.SQLException.class,
() -> statement1.execute("commit")
);
- // e.g. Unresolvable conflict detected for table 2249
- Assert.assertTrue(exception.getMessage().toLowerCase().contains("conflict"));
+ // Bug: Transaction timeout: last_query_id 9b619dc70fd64d6b8de7490aaf486f5c not found on this server
+ if (!Compatibility.skipBugLowerThenOrEqualTo("1.2.790", "0.3.9")) {
+ // e.g. Unresolvable conflict detected for table 2249
+ Assert.assertTrue(exception.getMessage().toLowerCase().contains("conflict"), exception.getMessage());
+ }
statement2.execute("select j from test_txn.table3 where i = 1");
ResultSet rs = statement2.getResultSet();
diff --git a/databend-jdbc/src/test/java/com/databend/jdbc/Utils.java b/databend-jdbc/src/test/java/com/databend/jdbc/Utils.java
index f8d4d354..b15f322e 100644
--- a/databend-jdbc/src/test/java/com/databend/jdbc/Utils.java
+++ b/databend-jdbc/src/test/java/com/databend/jdbc/Utils.java
@@ -1,5 +1,7 @@
package com.databend.jdbc;
+import com.vdurmont.semver4j.Semver;
+
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
@@ -8,9 +10,9 @@
public class Utils {
static String port = System.getenv("DATABEND_TEST_CONN_PORT") != null ? System.getenv("DATABEND_TEST_CONN_PORT").trim() : "8000";
-
static String username = "databend";
static String password = "databend";
+
public static String baseURL() {
return "jdbc:databend://localhost:" + port;
}
@@ -18,6 +20,7 @@ public static String baseURL() {
public static String getUsername() {
return username;
}
+
public static String getPassword() {
return password;
}
@@ -44,3 +47,4 @@ public static Connection createConnectionWithPresignedUrlDisable() throws SQLExc
return DriverManager.getConnection(url, "databend", "databend");
}
}
+
diff --git a/pom.xml b/pom.xml
index 7efe4c1e..7e0ec217 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3,6 +3,7 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
4.0.0
+
io.airlift
airbase
130
@@ -38,17 +39,19 @@
${project.basedir}
- true
- true
- true
- true
8
+
+ true
+
219
${dep.airlift.version}
+ 2.19.2
+ 32.0.1-jre
+ 1.7.6
+
+ 3.1.0
+
5.0.0-alpha.11
- 3.0.0
- 2.15.0
- 0.9.0
-missing
${maven.multiModuleProjectDirectory}/config/checkstyle/checkstyle.xml
@@ -65,34 +68,25 @@
databend-client
${project.version}
-
- io.airlift
- units
- 1.7
-
-
- io.airlift
- json
- ${dep.airlift.version}
-
+
net.jodah
failsafe
2.4.0
-
- com.squareup.okhttp3
- okhttp
- ${dep.okhttp.version}
-
-
com.squareup.okio
okio
${dep.okio.version}
+
+ com.squareup.okhttp3
+ okhttp
+ ${dep.okhttp.version}
+
+
com.squareup.okhttp3
okhttp-tls
@@ -108,13 +102,19 @@
com.google.errorprone
error_prone_annotations
- ${dep.errorprone.version}
+ 2.41.0
+
+
+
+ org.jetbrains.kotlin
+ kotlin-stdlib-common
+ 1.7.10
- com.github.zafarkhaja
- java-semver
- ${dep.semver.version}
+ org.jetbrains.kotlin
+ kotlin-stdlib
+ 1.7.10
@@ -226,7 +226,6 @@
3.3.0
${checkstyle.config.location}
- UTF-8
true
true
false
diff --git a/scripts/ci/nginx_hash.conf b/scripts/ci/nginx_hash.conf
deleted file mode 100644
index 1c7716c9..00000000
--- a/scripts/ci/nginx_hash.conf
+++ /dev/null
@@ -1,27 +0,0 @@
-events {
- worker_connections 1024;
-}
-
-http {
- map $http_x_query_route $backend {
- default backend1;
- }
-
- upstream backend1 {
- hash $http_x_databend_route_hint consistent;
-
- server 127.0.0.1:8000;
- server 127.0.0.1:8002;
- server 127.0.0.1:8003;
- }
-
- server {
- listen 8010;
-
- location / {
- proxy_pass http://$backend;
- proxy_set_header X-Databend-Relative-Path $http_x_databend_relative_path;
- proxy_set_header X-Databend-Stage-Name $http_x_databend_stage_name;
- }
- }
-}
diff --git a/scripts/ci/nginx_rr.conf b/scripts/ci/nginx_rr.conf
deleted file mode 100644
index 93c0202d..00000000
--- a/scripts/ci/nginx_rr.conf
+++ /dev/null
@@ -1,22 +0,0 @@
-events {
- worker_connections 1024;
-}
-
-http {
- upstream backend {
- server 127.0.0.1:8000;
- server 127.0.0.1:8002;
- server 127.0.0.1:8003;
- }
-
- server {
- listen 8010;
-
- location / {
- proxy_pass http://backend;
- proxy_set_header X-Databend-Relative-Path $http_x_databend_relative_path;
- proxy_set_header X-Databend-Stage-Name $http_x_databend_stage_name;
- proxy_set_header X-Databend-Sticky-Node $http_x_databend_sticky_node;
- }
- }
-}
diff --git a/scripts/deploy/config/databend-query-node-1.toml b/scripts/deploy/config/databend-query-node-1.toml
deleted file mode 100644
index b66d3e49..00000000
--- a/scripts/deploy/config/databend-query-node-1.toml
+++ /dev/null
@@ -1,152 +0,0 @@
-# Usage:
-# databend-query -c databend_query_config_spec.toml
-
-[query]
-max_active_sessions = 256
-shutdown_wait_timeout_ms = 5000
-
-# For flight rpc.
-flight_api_address = "0.0.0.0:9091"
-
-# Databend Query http address.
-# For admin RESET API.
-admin_api_address = "0.0.0.0:8080"
-
-# Databend Query metrics RESET API.
-metric_api_address = "0.0.0.0:7070"
-discovery_address = "localhost:8000"
-# Databend Query MySQL Handler.
-mysql_handler_host = "0.0.0.0"
-mysql_handler_port = 3307
-
-# Databend Query ClickHouse Handler.
-clickhouse_http_handler_host = "0.0.0.0"
-clickhouse_http_handler_port = 8124
-
-# Databend Query HTTP Handler.
-http_handler_host = "0.0.0.0"
-http_handler_port = 8000
-
-# Databend Query FlightSQL Handler.
-flight_sql_handler_host = "0.0.0.0"
-flight_sql_handler_port = 8900
-
-tenant_id = "test_tenant"
-cluster_id = "test_cluster"
-
-table_engine_memory_enabled = true
-default_storage_format = 'parquet'
-default_compression = 'zstd'
-
-enable_udf_server = true
-udf_server_allow_list = ['http://0.0.0.0:8815']
-cloud_control_grpc_server_address = "http://0.0.0.0:50051"
-
-[[query.users]]
-name = "root"
-auth_type = "no_password"
-
-[[query.users]]
-name = "default"
-auth_type = "no_password"
-
-# [[query.users]]
-# name = "admin"
-# auth_type = "no_password"
-
- [[query.users]]
- name = "databend"
- auth_type = "double_sha1_password"
- # echo -n "databend" | sha1sum | cut -d' ' -f1 | xxd -r -p | sha1sum
- auth_string = "3081f32caef285c232d066033c89a78d88a6d8a5"
-
-# [[query.users]]
-# name = "datafuselabs"
-# auth_type = "sha256_password"
-# # echo -n "datafuselabs" | sha256sum
-# auth_string = "6db1a2f5da402b43c066fcadcbf78f04260b3236d9035e44dd463f21e29e6f3b"
-
-# This for test
-[[query.udfs]]
-name = "ping"
-definition = "CREATE FUNCTION ping(STRING) RETURNS STRING LANGUAGE python HANDLER = 'ping' ADDRESS = 'http://0.0.0.0:8815'"
-
-[query.settings]
-aggregate_spilling_memory_ratio = 60
-join_spilling_memory_ratio = 60
-
-[log]
-
-[log.file]
-level = "DEBUG"
-format = "text"
-dir = "./.databend/logs_1"
-
-[log.query]
-on = true
-
-[log.structlog]
-on = true
-dir = "./.databend/structlog_1"
-
-[meta]
-# It is a list of `grpc_api_advertise_host:` of databend-meta config
-endpoints = ["0.0.0.0:9191"]
-username = "root"
-password = "root"
-client_timeout_in_second = 60
-auto_sync_interval = 60
-
-# Storage config.
-[storage]
-# fs | s3 | azblob | obs | oss
-type = "fs"
-
-# Set a local folder to store your data.
-# Comment out this block if you're NOT using local file system as storage.
-[storage.fs]
-data_path = "./.databend/stateless_test_data"
-
-# To use S3-compatible object storage, uncomment this block and set your values.
-# [storage.s3]
-# bucket = ""
-# endpoint_url = ""
-# access_key_id = ""
-# secret_access_key = ""
-# enable_virtual_host_style = false
-
-# To use Azure Blob storage, uncomment this block and set your values.
-# [storage.azblob]
-# endpoint_url = "https://.blob.core.windows.net"
-# container = ""
-# account_name = ""
-# account_key = ""
-
-# To use OBS object storage, uncomment this block and set your values.
-# [storage.obs]
-# bucket = ""
-# endpoint_url = ""
-# access_key_id = ""
-# secret_access_key = ""
-
-# To use OSS object storage, uncomment this block and set your values.
-# [storage.oss]
-# bucket = ""
-# endpoint_url = ""
-# access_key_id = ""
-# access_key_secret = ""
-
-# Cache config.
-[cache]
-# Type of storage to keep the table data cache
-#
-# available options: [none|disk]
-# default is "none", which disable table data cache
-# use "disk" to enabled disk cache
-data_cache_storage = "none"
-
-[cache.disk]
-# cache path
-path = "./.databend/_cache"
-# max bytes of cached data 20G
-max_bytes = 21474836480
diff --git a/scripts/deploy/config/databend-query-node-2.toml b/scripts/deploy/config/databend-query-node-2.toml
deleted file mode 100644
index 5f0c00ca..00000000
--- a/scripts/deploy/config/databend-query-node-2.toml
+++ /dev/null
@@ -1,104 +0,0 @@
-# Usage:
-# databend-query -c databend_query_config_spec.toml
-
-[query]
-max_active_sessions = 256
-shutdown_wait_timeout_ms = 5000
-
-# For flight rpc.
-flight_api_address = "0.0.0.0:9092"
-
-# Databend Query http address.
-# For admin RESET API.
-admin_api_address = "0.0.0.0:8082"
-
-# Databend Query metrics RESET API.
-metric_api_address = "0.0.0.0:7072"
-
-discovery_address = "localhost:8002"
-
-# Databend Query MySQL Handler.
-mysql_handler_host = "0.0.0.0"
-mysql_handler_port = 3308
-
-# Databend Query ClickHouse Handler.
-clickhouse_http_handler_host = "0.0.0.0"
-clickhouse_http_handler_port = 8126
-
-# Databend Query HTTP Handler.
-http_handler_host = "0.0.0.0"
-http_handler_port = 8002
-
-# Databend Query FlightSQL Handler.
-flight_sql_handler_host = "0.0.0.0"
-flight_sql_handler_port = 8902
-
-tenant_id = "test_tenant"
-cluster_id = "test_cluster"
-
-table_engine_memory_enabled = true
-default_storage_format = 'parquet'
-default_compression = 'zstd'
-
-[[query.users]]
-name = "root"
-auth_type = "no_password"
-
-[[query.users]]
-name = "default"
-auth_type = "no_password"
-
-[[query.users]]
-name = "databend"
-auth_type = "double_sha1_password"
-# echo -n "databend" | sha1sum | cut -d' ' -f1 | xxd -r -p | sha1sum
-auth_string = "3081f32caef285c232d066033c89a78d88a6d8a5"
-
-# This for test
-[[query.udfs]]
-name = "ping"
-definition = "CREATE FUNCTION ping(STRING) RETURNS STRING LANGUAGE python HANDLER = 'ping' ADDRESS = 'http://0.0.0.0:8815'"
-
-[log]
-
-[log.file]
-level = "INFO"
-format = "text"
-dir = "./.databend/logs_2"
-
-[log.structlog]
-on = true
-dir = "./.databend/structlog_2"
-
-[meta]
-# It is a list of `grpc_api_advertise_host:` of databend-meta config
-endpoints = ["0.0.0.0:9191"]
-username = "root"
-password = "root"
-client_timeout_in_second = 60
-auto_sync_interval = 60
-
-# Storage config.
-[storage]
-# fs | s3 | azblob | obs | oss
-type = "fs"
-
-# Set a local folder to store your data.
-# Comment out this block if you're NOT using local file system as storage.
-[storage.fs]
-data_path = "./.databend/stateless_test_data"
-
-# Cache config.
-[cache]
-# Type of storage to keep the table data cache
-#
-# available options: [none|disk]
-# default is "none", which disable table data cache
-# use "disk" to enabled disk cache
-data_cache_storage = "none"
-
-[cache.disk]
-# cache path
-path = "./.databend/_cache"
-# max bytes of cached data 20G
-max_bytes = 21474836480
diff --git a/scripts/deploy/deploy_cluster.sh b/scripts/deploy/deploy_cluster.sh
deleted file mode 100755
index fb685b50..00000000
--- a/scripts/deploy/deploy_cluster.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/bin/bash
-# Copyright 2022 The Databend Authors.
-# SPDX-License-Identifier: Apache-2.0.
-
-set -e
-
-echo "*************************************"
-echo "* Setting STORAGE_TYPE to S3. *"
-echo "* *"
-echo "* Please make sure that S3 backend *"
-echo "* is ready, and configured properly.*"
-echo "*************************************"
-export STORAGE_TYPE=s3
-export STORAGE_S3_BUCKET=testbucket
-export STORAGE_S3_ROOT=admin
-export STORAGE_S3_ENDPOINT_URL=http://127.0.0.1:9900
-export STORAGE_S3_ACCESS_KEY_ID=minioadmin
-export STORAGE_S3_SECRET_ACCESS_KEY=minioadmin
-export STORAGE_ALLOW_INSECURE=true
-
-SCRIPT_PATH="$(cd "$(dirname "$0")" >/dev/null 2>&1 && pwd)"
-cd "$SCRIPT_PATH/../.." || exit
-BUILD_PROFILE=${BUILD_PROFILE:-debug}
-
-# Caveat: has to kill query first.
-# `query` tries to remove its liveness record from meta before shutting down.
-# If meta is stopped, `query` will receive an error that hangs graceful
-# shutdown.
-killall databend-query || true
-sleep 3
-
-killall databend-meta || true
-sleep 3
-
-for bin in databend-query databend-meta; do
- if test -n "$(pgrep $bin)"; then
- echo "The $bin is not killed. force killing."
- killall -9 $bin || true
- fi
-done
-
-# Wait for killed process to cleanup resources
-sleep 1
-
-echo 'Start Meta service ...'
-
-mkdir -p ./.databend/
-
-nohup ./databend/bin/databend-meta -c scripts/deploy/config/databend-meta-node-1.toml >./.databend/meta-1.out 2>&1 &
-python3 scripts/wait_tcp.py --timeout 30 --port 9191 || { echo "wait_tcp failed. Showing meta-1.out:"; cat ./.databend/meta-1.out; exit 1; }
-# wait for cluster formation to complete.
-sleep 1
-
-echo 'Start databend-query node-1'
-nohup env RUST_BACKTRACE=1 ./databend/bin/databend-query -c scripts/deploy/config/databend-query-node-1.toml --internal-enable-sandbox-tenant >./.databend/query-1.out 2>&1 &
-
-echo "Waiting on node-1..."
-python3 scripts/wait_tcp.py --timeout 30 --port 9091
-
-echo 'Start databend-query node-2'
-env "RUST_BACKTRACE=1" nohup ./databend/bin/databend-query -c scripts/deploy/config/databend-query-node-2.toml --internal-enable-sandbox-tenant >./.databend/query-2.out 2>&1 &
-
-echo "Waiting on node-2..."
-python3 scripts/wait_tcp.py --timeout 30 --port 9092
-
-echo 'Start databend-query node-3'
-env "RUST_BACKTRACE=1" nohup ./databend/bin/databend-query -c scripts/deploy/config/databend-query-node-3.toml --internal-enable-sandbox-tenant >./.databend/query-3.out 2>&1 &
-
-echo "Waiting on node-3..."
-python3 scripts/wait_tcp.py --timeout 30 --port 9093
-
-echo "All done..."
diff --git a/scripts/wait_tcp.py b/scripts/wait_tcp.py
deleted file mode 100755
index ead8c24c..00000000
--- a/scripts/wait_tcp.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python3
-# coding: utf-8
-
-import socket
-import argparse
-import time
-import sys
-
-
-def tcp_ping(port, timeout):
-
- now = time.time()
-
- while time.time() - now < timeout:
- try:
- with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
- sock.connect(("127.0.0.1", port))
- print("OK :{} is listening".format(port))
- sys.stdout.flush()
- return
- except Exception:
- print("... connecting to :{}".format(port))
- sys.stdout.flush()
- time.sleep(1)
-
- raise Exception("fail to connect to :{}".format(port))
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description="block until successfully connecting to a local tcp port"
- )
- parser.add_argument("-p", "--port", type=int, help="local tcp port")
- parser.add_argument("-t", "--timeout", type=int, default=10, help="time to wait.")
-
- args = parser.parse_args()
-
- tcp_ping(args.port, args.timeout)
diff --git a/tests/Makefile b/tests/Makefile
new file mode 100644
index 00000000..de810550
--- /dev/null
+++ b/tests/Makefile
@@ -0,0 +1,18 @@
+DATABEND_META_VERSION ?= nightly
+DATABEND_QUERY_VERSION ?= nightly
+
+default: run
+
+run: test-core test-driver test-bendsql down
+
+prepare:
+ mkdir -p data/databend
+
+up: prepare
+ docker compose up --quiet-pull -d --wait
+ grep -q '127.0.0.1 minio' /etc/hosts || echo '127.0.0.1 minio' | sudo tee -a /etc/hosts > /dev/null
+ curl -u root: -XPOST "http://localhost:8000/v1/query" -H 'Content-Type: application/json' -d '{"sql": "select version()", "pagination": { "wait_time_secs": 10}}'
+
+down:
+ docker compose down
+
diff --git a/tests/compatibility/test_compatibility.sh b/tests/compatibility/test_compatibility.sh
new file mode 100644
index 00000000..d81d00ab
--- /dev/null
+++ b/tests/compatibility/test_compatibility.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+set -ex
+
+curl -sSLfo ./testng.jar https://repo.maven.apache.org/maven2/org/testng/testng/7.11.0/testng-7.11.0.jar
+curl -sSLfo ./semver4j.jar https://repo1.maven.org/maven2/com/vdurmont/semver4j/3.1.0/semver4j-3.1.0.jar
+curl -sSLfo ./jcommander.jar https://repo1.maven.org/maven2/org/jcommander/jcommander/1.83/jcommander-1.83.jar
+curl -sSLfo ./jts-core.jar https://repo1.maven.org/maven2/org/locationtech/jts/jts-core/1.19.0/jts-core-1.19.0.jar
+curl -sSLfo ./slf4j-api.jar https://repo1.maven.org/maven2/org/slf4j/slf4j-api/2.0.16/slf4j-api-2.0.16.jar
+
+
+
+original_dir=$(pwd)
+cd ../..
+# got 1 if not in java project
+CURRENT_VERSION=$(mvn help:evaluate -Dexpression=project.version -q -DforceStdout)
+cd "$original_dir"
+
+TEST_SIDE=${TEST_SIDE:-server}
+TEST_VER=${DATABEND_JDB_TEST_VERSION:-$CURRENT_VERSION}
+JDBC_VER=${DATABEND_JDBC_VERSION:-$CURRENT_VERSION}
+
+if [ "$TEST_SIDE" = "server" ]; then
+ curl -sSLfo ./databend-jdbc-tests.jar "https://github.com/databendlabs/databend-jdbc/releases/download/v${TEST_VER}/databend-jdbc-${TEST_VER}-tests.jar"
+else
+ cp ../../databend-jdbc/target/databend-jdbc-${TEST_VER}-tests.jar databend-jdbc-tests.jar
+fi
+
+if [ -z "DATABEND_JDBC_VERSION" ]; then
+ # only for dev
+ cp ../../databend-jdbc/target/databend-jdbc-${JDBC_VER}.jar databend-jdbc.jar
+else
+ curl -sSLfo "./databend-jdbc-${JDBC_VER}.jar" "https://github.com/databendlabs/databend-jdbc/releases/download/v${JDBC_VER}/databend-jdbc-${JDBC_VER}.jar"
+fi
+
+export DATABEND_JDBC_VERSION=$JDBC_VER
+java -Dlogback.logger.root=INFO -cp "testng.jar:slf4j-api.jar:databend-jdbc-${JDBC_VER}.jar:databend-jdbc-tests.jar:jcommander.jar:semver4j.jar" org.testng.TestNG testng.xml
diff --git a/tests/compatibility/testng.xml b/tests/compatibility/testng.xml
new file mode 100644
index 00000000..bf42904f
--- /dev/null
+++ b/tests/compatibility/testng.xml
@@ -0,0 +1,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/scripts/deploy/config/databend-meta-node-1.toml b/tests/config/databend-meta-node-1.toml
similarity index 85%
rename from scripts/deploy/config/databend-meta-node-1.toml
rename to tests/config/databend-meta-node-1.toml
index 8e7c3f1d..5e8776e4 100644
--- a/scripts/deploy/config/databend-meta-node-1.toml
+++ b/tests/config/databend-meta-node-1.toml
@@ -6,7 +6,7 @@ admin_api_address = "0.0.0.0:28101"
grpc_api_address = "0.0.0.0:9191"
# databend-query fetch this address to update its databend-meta endpoints list,
# in case databend-meta cluster changes.
-grpc_api_advertise_host = "127.0.0.1"
+grpc_api_advertise_host = "meta"
[raft_config]
id = 1
@@ -15,8 +15,8 @@ raft_api_port = 28103
# Assign raft_{listen|advertise}_host in test config.
# This allows you to catch a bug in unit tests when something goes wrong in raft meta nodes communication.
-raft_listen_host = "127.0.0.1"
-raft_advertise_host = "localhost"
+raft_listen_host = "0.0.0.0"
+raft_advertise_host = "meta"
# Start up mode: single node cluster
single = true
diff --git a/scripts/deploy/config/databend-query-node-3.toml b/tests/config/databend-query-node-1.toml
similarity index 63%
rename from scripts/deploy/config/databend-query-node-3.toml
rename to tests/config/databend-query-node-1.toml
index e44d58b8..83ec9364 100644
--- a/scripts/deploy/config/databend-query-node-3.toml
+++ b/tests/config/databend-query-node-1.toml
@@ -6,33 +6,30 @@ max_active_sessions = 256
shutdown_wait_timeout_ms = 5000
# For flight rpc.
-flight_api_address = "0.0.0.0:9093"
+flight_api_address = "0.0.0.0:9091"
# Databend Query http address.
# For admin RESET API.
-admin_api_address = "0.0.0.0:8083"
+admin_api_address = "0.0.0.0:8080"
# Databend Query metrics RESET API.
-metric_api_address = "0.0.0.0:7073"
-
-discovery_address = "localhost:8003"
+metric_api_address = "0.0.0.0:7070"
# Databend Query MySQL Handler.
mysql_handler_host = "0.0.0.0"
-mysql_handler_port = 3309
-
+mysql_handler_port = 3307
# Databend Query ClickHouse Handler.
clickhouse_http_handler_host = "0.0.0.0"
-clickhouse_http_handler_port = 8127
+clickhouse_http_handler_port = 8124
# Databend Query HTTP Handler.
http_handler_host = "0.0.0.0"
-http_handler_port = 8003
+http_handler_port = 8000
# Databend Query FlightSQL Handler.
flight_sql_handler_host = "0.0.0.0"
-flight_sql_handler_port = 8903
+flight_sql_handler_port = 8900
tenant_id = "test_tenant"
cluster_id = "test_cluster"
@@ -41,12 +38,14 @@ table_engine_memory_enabled = true
default_storage_format = 'parquet'
default_compression = 'zstd'
-[[query.users]]
-name = "root"
-auth_type = "no_password"
+enable_udf_server = true
+udf_server_allow_list = ['http://0.0.0.0:8815']
+udf_server_allow_insecure = true
+
+cloud_control_grpc_server_address = "http://0.0.0.0:50051"
[[query.users]]
-name = "default"
+name = "root"
auth_type = "no_password"
[[query.users]]
@@ -60,20 +59,20 @@ auth_string = "3081f32caef285c232d066033c89a78d88a6d8a5"
name = "ping"
definition = "CREATE FUNCTION ping(STRING) RETURNS STRING LANGUAGE python HANDLER = 'ping' ADDRESS = 'http://0.0.0.0:8815'"
+[query.settings]
+aggregate_spilling_memory_ratio = 60
+join_spilling_memory_ratio = 60
+
[log]
+level = "INFO"
[log.file]
-level = "INFO"
format = "text"
-dir = "./.databend/logs_3"
-
-[log.structlog]
-on = true
-dir = "./.databend/structlog_3"
+dir = "./.databend/logs_1"
[meta]
# It is a list of `grpc_api_advertise_host:` of databend-meta config
-endpoints = ["0.0.0.0:9191"]
+endpoints = ["meta:9191"]
username = "root"
password = "root"
client_timeout_in_second = 60
@@ -81,21 +80,18 @@ auto_sync_interval = 60
# Storage config.
[storage]
-# fs | s3 | azblob | obs | oss
-type = "fs"
+type = "s3"
-# Set a local folder to store your data.
-# Comment out this block if you're NOT using local file system as storage.
-[storage.fs]
-data_path = "./.databend/stateless_test_data"
+[storage.s3]
+bucket = "databend"
+endpoint_url = "http://minio:9000"
+#endpoint_url = "http://localhost:9000"
+access_key_id = "minioadmin"
+secret_access_key = "minioadmin"
+enable_virtual_host_style = false
# Cache config.
[cache]
-# Type of storage to keep the table data cache
-#
-# available options: [none|disk]
-# default is "none", which disable table data cache
-# use "disk" to enabled disk cache
data_cache_storage = "none"
[cache.disk]
@@ -103,3 +99,11 @@ data_cache_storage = "none"
path = "./.databend/_cache"
# max bytes of cached data 20G
max_bytes = 21474836480
+
+[spill.storage]
+type = "fs"
+
+[spill.storage.fs]
+data_path = "/fast-ssd/spill"
+reserved_space_percentage = 25.0
+max_bytes = 10737418240 # 10GB
diff --git a/tests/config/nginx.conf b/tests/config/nginx.conf
new file mode 100644
index 00000000..74ee7e99
--- /dev/null
+++ b/tests/config/nginx.conf
@@ -0,0 +1,19 @@
+events {
+ worker_connections 1024;
+}
+
+http {
+ upstream backend {
+ server query-node-1:8001;
+ server query-node-2:8002;
+ server query-node-3:8003;
+ }
+
+ server {
+ listen 8000;
+
+ location / {
+ proxy_pass http://backend;
+ }
+ }
+}
diff --git a/tests/docker-compose.yaml b/tests/docker-compose.yaml
new file mode 100644
index 00000000..f54f9759
--- /dev/null
+++ b/tests/docker-compose.yaml
@@ -0,0 +1,74 @@
+x-query-base: &query-base
+ image: docker.io/datafuselabs/databend-query:${DATABEND_QUERY_VERSION:-nightly}
+ volumes:
+ - ./config/databend-query-node-1.toml:/conf.toml:ro
+ command: -c /conf.toml
+ environment:
+ - QUERY_DATABEND_ENTERPRISE_LICENSE
+ depends_on:
+ minio:
+ condition: service_started
+ meta:
+ condition: service_healthy
+ healthcheck:
+ test: "curl -f localhost:8080/v1/health || exit 1"
+ interval: 2s
+ retries: 10
+ start_period: 2s
+ timeout: 1s
+
+services:
+ minio:
+ image: docker.io/minio/minio
+ command: server /data
+ ports:
+ - "9000:9000"
+ volumes:
+ - ./data:/data
+ meta:
+ image: docker.io/datafuselabs/databend-meta:${DATABEND_META_VERSION:-nightly}
+ volumes:
+ - ./config/databend-meta-node-1.toml:/conf.toml:ro
+ command: -c /conf.toml
+ ports:
+ - "28101:28101"
+ healthcheck:
+ test: "databend-metactl status || exit 1"
+ interval: 2s
+ retries: 10
+ start_period: 2s
+ timeout: 1s
+ query-node-1:
+ <<: *query-base
+ environment:
+ - QUERY_HTTP_HANDLER_PORT=8001
+ - QUERY_DISCOVERY_ADDRESS=localhost:8001
+ ports:
+ - "8001:8001"
+ query-node-2:
+ <<: *query-base
+ environment:
+ - QUERY_HTTP_HANDLER_PORT=8002
+ - QUERY_DISCOVERY_ADDRESS=localhost:8002
+ ports:
+ - "8002:8002"
+ query-node-3:
+ <<: *query-base
+ environment:
+ - QUERY_HTTP_HANDLER_PORT=8003
+ - QUERY_DISCOVERY_ADDRESS=localhost:8003
+ ports:
+ - "8003:8003"
+ nginx-lb:
+ image: docker.io/nginx
+ volumes:
+ - ./config/nginx.conf:/etc/nginx/nginx.conf:ro
+ ports:
+ - "8000:8000"
+ depends_on:
+ query-node-1:
+ condition: service_healthy
+ query-node-2:
+ condition: service_healthy
+ query-node-3:
+ condition: service_healthy