diff --git a/common/src/main/java/org/apache/drill/common/KerberosUtil.java b/common/src/main/java/org/apache/drill/common/KerberosUtil.java
index 44403e92f9a..cb7d511eda9 100644
--- a/common/src/main/java/org/apache/drill/common/KerberosUtil.java
+++ b/common/src/main/java/org/apache/drill/common/KerberosUtil.java
@@ -17,6 +17,9 @@
*/
package org.apache.drill.common;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
@@ -24,7 +27,7 @@
import static com.google.common.base.Preconditions.checkState;
public final class KerberosUtil {
- private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(KerberosUtil.class);
+ private static final Logger logger = LoggerFactory.getLogger(KerberosUtil.class);
// Per this link http://docs.oracle.com/javase/jndi/tutorial/ldap/security/gssapi.html
// "... GSS-API SASL mechanism was retrofitted to mean only Kerberos v5 ..."
diff --git a/common/src/main/java/org/apache/drill/common/exceptions/UserExceptionUtils.java b/common/src/main/java/org/apache/drill/common/exceptions/UserExceptionUtils.java
index 1d9fbff6f3b..15b6ec5f8a6 100644
--- a/common/src/main/java/org/apache/drill/common/exceptions/UserExceptionUtils.java
+++ b/common/src/main/java/org/apache/drill/common/exceptions/UserExceptionUtils.java
@@ -33,7 +33,8 @@ private static String decorateHint(final String text) {
return String.format("[Hint: %s]", text);
}
public static String getUserHint(final Throwable ex) {
- if (ex.getMessage().startsWith("Error getting user info for current user")) {
+ final String message = ex.getMessage();
+ if (message != null && message.startsWith("Error getting user info for current user")) {
//User does not exist hint
return decorateHint(USER_DOES_NOT_EXIST);
} else {
diff --git a/contrib/storage-hive/hive-exec-shade/pom.xml b/contrib/storage-hive/hive-exec-shade/pom.xml
index aa6032b1065..3ab44d1946a 100644
--- a/contrib/storage-hive/hive-exec-shade/pom.xml
+++ b/contrib/storage-hive/hive-exec-shade/pom.xml
@@ -32,7 +32,7 @@
Drill : Contrib : Storage : Hive : Exec Shaded
- 1.15.1
+ 1.15.2
diff --git a/contrib/storage-phoenix/pom.xml b/contrib/storage-phoenix/pom.xml
index aad0fac4763..f483dc7ff34 100644
--- a/contrib/storage-phoenix/pom.xml
+++ b/contrib/storage-phoenix/pom.xml
@@ -1,37 +1,40 @@
-
+
4.0.0
+
org.apache.drill.contrib
drill-contrib-parent
1.23.0-SNAPSHOT
+
drill-storage-phoenix
Drill : Contrib : Storage : Phoenix
- 5.1.3
-
- 2.4.17
+ 5.2.1
+ 2.6.3
+ 4.1.5
false
@@ -48,20 +51,25 @@
${project.version}
test
+
org.apache.drill
drill-common
- tests
${project.version}
+ tests
test
+
+
org.apache.phoenix
phoenix-core
${phoenix.version}
- test
- tests
+
+ com.google.protobuf
+ protobuf-java
+
org.slf4j
*
@@ -70,209 +78,94 @@
log4j
log4j
-
- commons-logging
- commons-logging
-
-
- javax.servlet
- *
-
-
- org.ow2.asm
- asm
-
-
- org.ow2.asm
- asm-all
-
-
- commons-configuration
- commons-configuration
-
-
- org.apache.commons
- commons-csv
-
-
- org.apache.hbase
- hbase-endpoint
-
-
- com.salesforce.i18n
- i18n-util
-
-
- jline
- jline
-
-
- com.codahale.metrics
- metrics-core
-
-
- com.codahale.metrics
- metrics-graphite
-
-
- org.jboss.netty
- netty
-
-
- org.apache.phoenix
- phoenix-hbase-compat-2.4.1
- ${phoenix.version}
-
+
org.apache.phoenix
phoenix-core
${phoenix.version}
+ tests
+ test
- org.slf4j
- *
-
-
- log4j
- log4j
-
-
- commons-logging
- commons-logging
-
-
- javax.servlet
- *
-
-
- org.ow2.asm
- asm
-
-
- org.ow2.asm
- asm-all
-
-
- commons-configuration
- commons-configuration
-
-
- org.apache.hbase
- hbase-testing-util
-
-
- org.apache.htrace
- htrace-core
-
-
- org.apache.commons
- commons-csv
-
-
- org.apache.hbase
- hbase-endpoint
-
-
- jline
- jline
-
-
- com.salesforce.i18n
- i18n-util
-
-
- com.codahale.metrics
- metrics-core
-
-
- com.codahale.metrics
- metrics-graphite
-
-
- org.jboss.netty
- netty
+ com.google.protobuf
+ protobuf-java
+
+
org.apache.phoenix
- phoenix-hbase-compat-2.4.0
+ phoenix-hbase-compat-2.6.0
${phoenix.version}
test
+
+
+ com.google.protobuf
+ protobuf-java
+
+
-
- org.apache.kerby
- kerb-core
- ${kerby.version}
- test
-
+
+
org.apache.hbase
- hbase-it
- ${hbase.minicluster.version}
- tests
+ hbase-testing-util
+ ${hbase.version}
test
- org.apache.hbase
- hbase-endpoint
+ org.apache.hadoop
+ hadoop-minicluster
+
+
+ org.apache.hadoop
+ hadoop-minikdc
+
+
+ org.apache.hadoop
+ hadoop-yarn-server-tests
+
org.apache.hbase
- hbase-common
- ${hbase.minicluster.version}
- tests
+ hbase-server
+ ${hbase.version}
test
org.apache.hbase
hbase-client
- ${hbase.minicluster.version}
+ ${hbase.version}
test
org.apache.hbase
- hbase-server
- ${hbase.minicluster.version}
- test
+ hbase-common
+ ${hbase.version}
org.apache.hbase
- hbase-protocol-shaded
- ${hbase.minicluster.version}
- test
+ hbase-zookeeper
+ ${hbase.version}
+
+
- org.apache.hbase
- hbase-hadoop-compat
- ${hbase.minicluster.version}
- test
+ org.apache.zookeeper
+ zookeeper
+ ${zookeeper.version}
- org.apache.hbase
- hbase-asyncfs
- tests
- ${hbase.minicluster.version}
- test
-
-
- commons-logging
- commons-logging
-
-
- log4j
- log4j
-
-
- org.slf4j
- *
-
-
+ org.apache.zookeeper
+ zookeeper-jute
+ ${zookeeper.version}
+
org.apache.hadoop
hadoop-hdfs-client
@@ -284,13 +177,14 @@
test
-
+
org.apache.hadoop
hadoop-minikdc
${hadoop.version}
test
+
commons-logging
@@ -300,102 +194,123 @@
log4j
log4j
+
- org.slf4j
- *
-
-
-
-
- org.apache.hbase
- hbase-testing-util
- ${hbase.minicluster.version}
- test
-
-
- commons-logging
- commons-logging
+ org.apache.kerby
+ kerb-simplekdc
- log4j
- log4j
+ org.apache.kerby
+ kerb-client
- org.slf4j
- *
+ org.apache.kerby
+ kerb-common
- javax.servlet
- servlet-api
+ org.apache.kerby
+ kerb-core
- io.netty
- netty
+ org.apache.kerby
+ kerb-crypto
- com.zaxxer
- HikariCP-java7
-
-
- org.apache.commons
- commons-csv
+ org.apache.kerby
+ kerb-util
+
+
+
+ org.apache.kerby
+ kerb-simplekdc
+ ${kerby.version}
+ test
+
+
+ org.apache.kerby
+ kerb-client
+ ${kerby.version}
+ test
+
+
+ org.apache.kerby
+ kerb-common
+ ${kerby.version}
+ test
+
+
+ org.apache.kerby
+ kerb-core
+ ${kerby.version}
+ test
+
+
+ org.apache.kerby
+ kerb-crypto
+ ${kerby.version}
+ test
+
+
+ org.apache.kerby
+ kerb-util
+ ${kerby.version}
+ test
+
+
+ org.apache.kerby
+ kerby-config
+ ${kerby.version}
+ test
+
+
+ org.apache.kerby
+ kerby-asn1
+ ${kerby.version}
+ test
+
+
+ org.apache.kerby
+ kerby-pkix
+ ${kerby.version}
+ test
+
+
+ org.apache.kerby
+ kerby-util
+ ${kerby.version}
+ test
+
+
+
+ org.bouncycastle
+ bcprov-jdk18on
+ 1.78.1
+ test
+
+
-
- maven-resources-plugin
-
-
- copy-java-sources
- process-sources
-
- copy-resources
-
-
- ${basedir}/target/classes/org/apache/drill/exec/store/phoenix
-
-
- src/main/java/org/apache/drill/exec/store/phoenix
- true
-
-
-
-
-
-
org.apache.maven.plugins
maven-surefire-plugin
${skipTests}
- 1
+ 1
false
-
- **/PhoenixTestSuite.class
- **/SecuredPhoenixTestSuite.class
-
-
- **/*Test.java
-
- -Xms2048m -Xmx2048m
+
+ -Xms2048m -Xmx2048m
+ --add-opens=java.base/java.lang=ALL-UNNAMED
+ --add-opens=java.base/java.util=ALL-UNNAMED
+ -Djava.net.preferIPv4Stack=true
+ -Dsun.security.krb5.debug=true
+ -Dsun.security.krb5.allowUdp=false
+
-
-
-
- jdk14+
-
- [14,)
-
-
- true
-
-
-
diff --git a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePlugin.java b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePlugin.java
index daf4e480fa7..de0b8514759 100644
--- a/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePlugin.java
+++ b/contrib/storage-phoenix/src/main/java/org/apache/drill/exec/store/phoenix/PhoenixStoragePlugin.java
@@ -43,12 +43,12 @@
import org.apache.drill.exec.store.phoenix.rules.PhoenixConvention;
import com.fasterxml.jackson.core.type.TypeReference;
-import org.apache.drill.exec.util.ImpersonationUtil;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
+import com.google.common.collect.ImmutableSet;
+import org.apache.drill.exec.util.ImpersonationUtil;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.tephra.shaded.com.google.common.collect.ImmutableSet;
public class PhoenixStoragePlugin extends AbstractStoragePlugin {
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBaseTest.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBaseTest.java
index 53b7adc7dd0..e04f39ab462 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBaseTest.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBaseTest.java
@@ -17,7 +17,18 @@
*/
package org.apache.drill.exec.store.phoenix;
-import static org.junit.Assert.assertFalse;
+import com.google.common.collect.Maps;
+import com.univocity.parsers.csv.CsvParser;
+import com.univocity.parsers.csv.CsvParserSettings;
+import org.apache.drill.exec.store.StoragePluginRegistry;
+import org.apache.drill.test.ClusterFixture;
+import org.apache.drill.test.ClusterFixtureBuilder;
+import org.apache.drill.test.ClusterTest;
+import org.apache.hadoop.fs.Path;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.FileInputStream;
import java.io.InputStreamReader;
@@ -37,22 +48,11 @@
import java.util.UUID;
import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.drill.exec.store.StoragePluginRegistry;
-import com.google.common.collect.Maps;
-import org.apache.drill.test.ClusterFixture;
-import org.apache.drill.test.ClusterFixtureBuilder;
-import org.apache.drill.test.ClusterTest;
-import org.apache.hadoop.fs.Path;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.slf4j.LoggerFactory;
-
-import com.univocity.parsers.csv.CsvParser;
-import com.univocity.parsers.csv.CsvParserSettings;
+import static org.junit.Assert.assertFalse;
public class PhoenixBaseTest extends ClusterTest {
- private static final org.slf4j.Logger logger = LoggerFactory.getLogger(PhoenixBaseTest.class);
+ private static final Logger logger = LoggerFactory.getLogger(PhoenixBaseTest.class);
public final static String U_U_I_D = UUID.randomUUID().toString();
private final static AtomicInteger initCount = new AtomicInteger(0);
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBasicsIT.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBasicsIT.java
index bb61e01632f..012c5c65e8e 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBasicsIT.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixBasicsIT.java
@@ -7,80 +7,92 @@
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
*/
+
package org.apache.drill.exec.store.phoenix;
-import static org.apache.hadoop.hbase.HConstants.HBASE_DIR;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_CAT;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
-import java.util.Optional;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.LocalHBaseCluster;
-import org.apache.phoenix.util.PhoenixRuntime;
-import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hbase.HConstants.HBASE_DIR;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
-/**
- * This is a copy of {@code org.apache.phoenix.end2end.QueryServerBasicsIT} until
- * PHOENIX-6613 is fixed
- */
public class PhoenixBasicsIT {
private static final HBaseTestingUtility util = new HBaseTestingUtility();
-
- private static final org.slf4j.Logger logger = LoggerFactory.getLogger(PhoenixBasicsIT.class);
+ private static final Logger logger = LoggerFactory.getLogger(PhoenixBasicsIT.class);
protected static String CONN_STRING;
- static LocalHBaseCluster hbaseCluster;
public static synchronized void doSetup() throws Exception {
Configuration conf = util.getConfiguration();
- // Start ZK by hand
- util.startMiniZKCluster();
+
+ // Keep it embedded & filesystem-only (no HDFS)
+ conf.set("hbase.cluster.distributed", "false");
+ conf.setBoolean("hbase.unsafe.stream.capability.enforce", false);
+ conf.setInt("hbase.master.wait.on.regionservers.mintostart", 1);
+
+ // Randomize service ports, disable HTTP/Jetty info servers to avoid Netty/servlet deps
+ conf.setInt("hbase.master.port", 0);
+ conf.setInt("hbase.master.info.port", -1);
+ conf.setInt("hbase.regionserver.port", 0);
+ conf.setInt("hbase.regionserver.info.port", -1);
+ conf.unset("hbase.http.filter.initializers"); // make sure no web filters get bootstrapped
+
+ // Force loopback to dodge IPv6/hostname hiccups
+ conf.set("hbase.zookeeper.quorum", "127.0.0.1");
+ conf.set("hbase.master.hostname", "127.0.0.1");
+ conf.set("hbase.regionserver.hostname", "127.0.0.1");
+
+ // Root dir on local FS (file:///), so HTU won't start MiniDFS
Path rootdir = util.getDataTestDirOnTestFS(PhoenixBasicsIT.class.getSimpleName());
- // There is no setRootdir method that is available in all supported HBase versions.
- conf.set(HBASE_DIR, rootdir.toString());
- hbaseCluster = new LocalHBaseCluster(conf, 1);
- hbaseCluster.startup();
+ conf.set(HBASE_DIR, rootdir.toUri().toString()); // keep URI form
- CONN_STRING = PhoenixRuntime.JDBC_PROTOCOL + ":localhost:" + getZookeeperPort();
- logger.info("JDBC connection string is " + CONN_STRING);
- }
+ // Start ZK + 1 Master + 1 RegionServer WITHOUT HDFS
+ util.startMiniZKCluster();
+ util.startMiniHBaseCluster(1, 1);
- public static int getZookeeperPort() {
- return util.getConfiguration().getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181);
+ int zkPort = util.getZkCluster().getClientPort();
+ CONN_STRING = PhoenixRuntime.JDBC_PROTOCOL + ":localhost:" + zkPort;
+ logger.info("JDBC connection string is {}", CONN_STRING);
}
public static void testCatalogs() throws Exception {
- try (final Connection connection = DriverManager.getConnection(CONN_STRING)) {
+ try (Connection connection = DriverManager.getConnection(CONN_STRING)) {
assertFalse(connection.isClosed());
- try (final ResultSet resultSet = connection.getMetaData().getCatalogs()) {
- final ResultSetMetaData metaData = resultSet.getMetaData();
- assertFalse("unexpected populated resultSet", resultSet.next());
- assertEquals(1, metaData.getColumnCount());
- assertEquals(TABLE_CAT, metaData.getColumnName(1));
+ try (ResultSet rs = connection.getMetaData().getCatalogs()) {
+ ResultSetMetaData md = rs.getMetaData();
+ String col = md.getColumnLabel(1); // label is safer than name
+ if (!"TABLE_CAT".equals(col) && !"TENANT_ID".equals(col)) {
+ // fall back to name just in case some drivers differ
+ col = md.getColumnName(1);
+ }
+ assertTrue("Unexpected first column: " + col,
+ "TABLE_CAT".equals(col) || "TENANT_ID".equals(col));
}
}
}
public static synchronized void afterClass() throws IOException {
- Optional.of(hbaseCluster).ifPresent(LocalHBaseCluster::shutdown);
- util.shutdownMiniCluster();
+ util.shutdownMiniHBaseCluster(); // stops RS & Master
+ util.shutdownMiniZKCluster(); // stops ZK
}
}
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixTestSuite.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixTestSuite.java
index 0e430904c22..3be298e2b76 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixTestSuite.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/PhoenixTestSuite.java
@@ -17,9 +17,6 @@
*/
package org.apache.drill.exec.store.phoenix;
-import java.util.TimeZone;
-import java.util.concurrent.atomic.AtomicInteger;
-
import org.apache.drill.categories.SlowTest;
import org.apache.drill.test.BaseTest;
import org.junit.AfterClass;
@@ -28,8 +25,12 @@
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
import org.junit.runners.Suite.SuiteClasses;
+import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.TimeZone;
+import java.util.concurrent.atomic.AtomicInteger;
+
@RunWith(Suite.class)
@SuiteClasses ({
@@ -40,7 +41,7 @@
@Category({ SlowTest.class })
public class PhoenixTestSuite extends BaseTest {
- private static final org.slf4j.Logger logger = LoggerFactory.getLogger(PhoenixTestSuite.class);
+ private static final Logger logger = LoggerFactory.getLogger(PhoenixTestSuite.class);
private static volatile boolean runningSuite = false;
private static final AtomicInteger initCount = new AtomicInteger(0);
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/PhoenixEnvironment.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/PhoenixEnvironment.java
index 88b3d607857..acc662cc81d 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/PhoenixEnvironment.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/PhoenixEnvironment.java
@@ -17,20 +17,6 @@
*/
package org.apache.drill.exec.store.phoenix.secured;
-import static org.apache.hadoop.hbase.HConstants.HBASE_DIR;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-import java.io.File;
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.net.InetAddress;
-import java.util.AbstractMap;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@@ -38,8 +24,11 @@
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.LocalHBaseCluster;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.security.HBaseKerberosUtils;
import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.access.AccessController;
+import org.apache.hadoop.hbase.security.token.TokenProvider;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.minikdc.MiniKdc;
@@ -48,6 +37,23 @@
import org.apache.phoenix.query.ConfigurationFactory;
import org.apache.phoenix.util.InstanceResolver;
import org.apache.phoenix.util.PhoenixRuntime;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.net.InetAddress;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+
+import static org.apache.hadoop.hbase.HConstants.HBASE_DIR;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
/**
* This is a copy of class from `org.apache.phoenix:phoenix-queryserver-it`,
@@ -97,6 +103,7 @@ public class PhoenixEnvironment {
private int numCreatedUsers;
private final String phoenixUrl;
+ private static final Logger logger = LoggerFactory.getLogger(PhoenixEnvironment.class);
private static Configuration conf() {
Configuration configuration = HBaseConfiguration.create();
@@ -195,8 +202,7 @@ private static void ensureIsEmptyDirectory(File f) throws IOException {
/**
* Setup and start kerberosed, hbase
*/
- public PhoenixEnvironment(final Configuration confIn, int numberOfUsers, boolean tls)
- throws Exception {
+ public PhoenixEnvironment(final Configuration confIn, int numberOfUsers, boolean tls) throws Exception {
Configuration conf = util.getConfiguration();
conf.addResource(confIn);
@@ -204,30 +210,98 @@ public PhoenixEnvironment(final Configuration confIn, int numberOfUsers, boolean
ensureIsEmptyDirectory(tempDir);
ensureIsEmptyDirectory(keytabDir);
keytab = new File(keytabDir, "test.keytab");
+
// Start a MiniKDC
- kdc = util.setupMiniKdc(keytab);
- // Create a service principal and spnego principal in one keytab
- // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
- // use separate identies for HBase and HDFS results in a GSS initiate error. The quick
- // solution is to just use a single "service" principal instead of "hbase" and "hdfs"
- // (or "dn" and "nn") per usual.
- kdc.createPrincipal(keytab, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
- // Start ZK by hand
+ File kdcWorkDir = new File(new File(getTempDir()), "kdc-" + System.currentTimeMillis());
+ ensureIsEmptyDirectory(kdcWorkDir);
+
+ Properties kdcConf = org.apache.hadoop.minikdc.MiniKdc.createConf();
+ kdcConf.setProperty(org.apache.hadoop.minikdc.MiniKdc.KDC_BIND_ADDRESS, "127.0.0.1");
+ kdcConf.setProperty("kdc.tcp.port", "0");
+ kdcConf.setProperty("kdc.allow_udp", "false");
+ kdcConf.setProperty("kdc.encryption.types", "aes128-cts-hmac-sha1-96");
+ kdcConf.setProperty("kdc.fast.enabled", "false");
+ kdcConf.setProperty("kdc.preauth.required", "true");
+ kdcConf.setProperty("kdc.allowable.clockskew", "300000"); // 5m
+ kdcConf.setProperty(org.apache.hadoop.minikdc.MiniKdc.DEBUG, "true");
+
+ kdc = new org.apache.hadoop.minikdc.MiniKdc(kdcConf, kdcWorkDir);
+ kdc.start();
+
+ // Write krb5.conf that disables referrals/canonicalization
+ File krb5File = new File(kdcWorkDir, "krb5.conf");
+ writeKrb5Conf(krb5File.toPath(), kdc.getRealm(), "127.0.0.1", kdc.getPort());
+ System.setProperty("java.security.krb5.conf", krb5File.getAbsolutePath());
+ System.setProperty("sun.security.krb5.allowUdp", "false");
+ System.setProperty("sun.security.krb5.disableReferrals", "true");
+ System.setProperty("java.net.preferIPv4Stack", "true");
+ System.setProperty("sun.security.krb5.debug", "true");
+ System.clearProperty("java.security.krb5.realm"); // avoid env overrides
+ System.clearProperty("java.security.krb5.kdc");
+
+ // Fresh keytab every run; create principals in one shot
+ if (keytab.exists() && !keytab.delete()) {
+ throw new IOException("Couldn't delete old keytab: " + keytab);
+ }
+ keytab.getParentFile().mkdirs();
+
+ // Use a conventional service principal to avoid canonicalization surprises
+ final String SERVICE_PRINCIPAL_LOCAL = "hbase/localhost";
+ final String SPNEGO_PRINCIPAL_LOCAL = "HTTP/localhost";
+ final String PQS_PRINCIPAL_LOCAL = "phoenixqs/localhost";
+
+ kdc.createPrincipal(
+ keytab,
+ SPNEGO_PRINCIPAL_LOCAL,
+ PQS_PRINCIPAL_LOCAL,
+ SERVICE_PRINCIPAL_LOCAL
+ );
+ // --- End explicit MiniKDC setup ---
+
+ // Start ZK by hand
util.startMiniZKCluster();
// Create a number of unprivileged users
createUsers(numberOfUsers);
- // Set configuration for HBase
- HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + kdc.getRealm());
+ // HBase ↔ Kerberos wiring: set creds BEFORE setSecuredConfiguration
+ final String servicePrincipal = "hbase/localhost@" + kdc.getRealm();
+
+ conf.set("hadoop.security.authentication", "kerberos");
+ conf.set("hbase.security.authentication", "kerberos");
+
+ conf.set("hbase.master.keytab.file", keytab.getAbsolutePath());
+ conf.set("hbase.regionserver.keytab.file", keytab.getAbsolutePath());
+ conf.set("hbase.master.kerberos.principal", servicePrincipal);
+ conf.set("hbase.regionserver.kerberos.principal", servicePrincipal);
+
+ // Make HBase copy its secured defaults *after* we have principals/keytab in conf
+ HBaseKerberosUtils.setPrincipalForTesting(servicePrincipal);
+ HBaseKerberosUtils.setKeytabFileForTesting(keytab.getAbsolutePath());
HBaseKerberosUtils.setSecuredConfiguration(conf);
+
+ // HDFS side
setHdfsSecuredConfiguration(conf);
+
+ // UGI must see kerberos
+ UserGroupInformation.setConfiguration(conf);
+
+ // Preflight: prove the keytab/KDC works *before* we start HBase
+ UserGroupInformation.loginUserFromKeytab(servicePrincipal, keytab.getAbsolutePath());
+ logger.info("UGI login OK for {}", servicePrincipal);
+
UserGroupInformation.setConfiguration(conf);
+
conf.setInt(HConstants.MASTER_PORT, 0);
conf.setInt(HConstants.MASTER_INFO_PORT, 0);
conf.setInt(HConstants.REGIONSERVER_PORT, 0);
conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
+ // Coprocessors, proxy user configs, etc. (whatever you already have)
+ conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
+ conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
+ conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName(), TokenProvider.class.getName());
+
// Clear the cached singletons so we can inject our own.
InstanceResolver.clearSingletons();
// Make sure the ConnectionInfo doesn't try to pull a default Configuration
@@ -258,10 +332,47 @@ public Configuration getConfiguration(Configuration confToClone) {
phoenixUrl = PhoenixRuntime.JDBC_PROTOCOL + ":localhost:" + getZookeeperPort();
}
+ private static void writeKrb5Conf(java.nio.file.Path path, String realm, String host, int port) throws Exception {
+ String cfg =
+ "[libdefaults]\n" +
+ " default_realm = " + realm + "\n" +
+ " dns_lookup_kdc = false\n" +
+ " dns_lookup_realm = false\n" +
+ " dns_canonicalize_hostname = false\n" +
+ " rdns = false\n" +
+ " udp_preference_limit = 1\n" +
+ " default_tkt_enctypes = aes128-cts-hmac-sha1-96\n" +
+ " default_tgs_enctypes = aes128-cts-hmac-sha1-96\n" +
+ " permitted_enctypes = aes128-cts-hmac-sha1-96\n" +
+ "\n" +
+ "[realms]\n" +
+ " " + realm + " = {\n" +
+ " kdc = " + host + ":" + port + "\n" +
+ " admin_server = " + host + ":" + port + "\n" +
+ " }\n";
+ java.nio.file.Files.createDirectories(path.getParent());
+ java.nio.file.Files.write(path, cfg.getBytes(java.nio.charset.StandardCharsets.UTF_8));
+ }
+
+
public int getZookeeperPort() {
return util.getConfiguration().getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181);
}
+ private static void createPrincipalIfAbsent(MiniKdc kdc, File keytab, String principal) throws Exception {
+ try {
+ kdc.createPrincipal(keytab, principal);
+ } catch (org.apache.kerby.kerberos.kerb.KrbException e) {
+ String msg = e.getMessage();
+ if (msg != null && msg.contains("already exists")) {
+ // Principal is already in the KDC; fine to proceed.
+ // (Keys were generated when it was first created.)
+ return;
+ }
+ throw e;
+ }
+ }
+
public void stop() throws Exception {
// Remove our custom ConfigurationFactory for future tests
InstanceResolver.clearSingletons();
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixBaseTest.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixBaseTest.java
index 193b7ba9544..fdbe85fa3bd 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixBaseTest.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixBaseTest.java
@@ -18,6 +18,7 @@
package org.apache.drill.exec.store.phoenix.secured;
import ch.qos.logback.classic.Level;
+import com.google.common.collect.Lists;
import com.sun.security.auth.module.Krb5LoginModule;
import org.apache.drill.common.config.DrillProperties;
import org.apache.drill.common.exceptions.UserRemoteException;
@@ -29,7 +30,6 @@
import org.apache.drill.exec.store.phoenix.PhoenixDataSource;
import org.apache.drill.exec.store.phoenix.PhoenixStoragePluginConfig;
import org.apache.drill.exec.util.ImpersonationUtil;
-import com.google.common.collect.Lists;
import org.apache.drill.test.ClusterFixture;
import org.apache.drill.test.ClusterFixtureBuilder;
import org.apache.drill.test.ClusterTest;
@@ -69,6 +69,8 @@ public abstract class SecuredPhoenixBaseTest extends ClusterTest {
private final static AtomicInteger initCount = new AtomicInteger(0);
+
+
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
@@ -92,6 +94,22 @@ private static void startSecuredDrillCluster() throws Exception {
Map.Entry user3 = environment.getUser(3);
dirTestWatcher.start(SecuredPhoenixTestSuite.class); // until DirTestWatcher ClassRule is implemented for JUnit5
+
+ // Create a UDF directory with proper permissions in the test directory
+ File udfDir = dirTestWatcher.makeSubDir(Paths.get("udf"));
+ // Pre-create all subdirectories that Drill will need with proper permissions
+ File drillDir = new File(udfDir, "drill");
+ File happyDir = new File(drillDir, "happy");
+ File udfSubDir = new File(happyDir, "udf");
+ File registryDir = new File(udfSubDir, "registry");
+ File stagingDir = new File(udfSubDir, "staging");
+ File tmpDir = new File(udfSubDir, "tmp");
+ // Create all directories and set permissions
+ registryDir.mkdirs();
+ stagingDir.mkdirs();
+ tmpDir.mkdirs();
+ setDirectoryPermissions(udfDir);
+
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher)
.configProperty(ExecConstants.USER_AUTHENTICATION_ENABLED, true)
.configProperty(ExecConstants.USER_AUTHENTICATOR_IMPL, UserAuthenticatorTestImpl.TYPE)
@@ -99,12 +117,23 @@ private static void startSecuredDrillCluster() throws Exception {
.configProperty(ExecConstants.IMPERSONATION_ENABLED, true)
.configProperty(ExecConstants.BIT_AUTHENTICATION_ENABLED, true)
.configProperty(ExecConstants.BIT_AUTHENTICATION_MECHANISM, "kerberos")
+ .configProperty(ExecConstants.USE_LOGIN_PRINCIPAL, true)
.configProperty(ExecConstants.SERVICE_PRINCIPAL, HBaseKerberosUtils.getPrincipalForTesting())
.configProperty(ExecConstants.SERVICE_KEYTAB_LOCATION, environment.getServiceKeytab().getAbsolutePath())
+ // Set UDF directory to a location we control with proper permissions
+ .configProperty(ExecConstants.UDF_DIRECTORY_ROOT, udfDir.getAbsolutePath())
+ .configProperty(ExecConstants.UDF_DIRECTORY_FS, "file:///" + udfDir.getAbsolutePath().replace("\\", "/"))
+ // Disable dynamic UDF support for this test to avoid filesystem issues
+ .configProperty(ExecConstants.UDF_DISABLE_DYNAMIC, true)
.configClientProperty(DrillProperties.SERVICE_PRINCIPAL, HBaseKerberosUtils.getPrincipalForTesting())
.configClientProperty(DrillProperties.USER, user1.getKey())
.configClientProperty(DrillProperties.KEYTAB, user1.getValue().getAbsolutePath());
startCluster(builder);
+
+ // After cluster starts, Drill creates subdirectories in the UDF area
+ // Set permissions recursively on all created subdirectories
+ setDirectoryPermissions(udfDir);
+
Properties user2ClientProperties = new Properties();
user2ClientProperties.setProperty(DrillProperties.SERVICE_PRINCIPAL, HBaseKerberosUtils.getPrincipalForTesting());
user2ClientProperties.setProperty(DrillProperties.USER, user2.getKey());
@@ -124,6 +153,29 @@ private static void startSecuredDrillCluster() throws Exception {
registry.put(PhoenixStoragePluginConfig.NAME + "123", config);
}
+ /**
+ * Set proper permissions on a directory to ensure it's writable and executable
+ * This method recursively sets permissions on all subdirectories created by Drill
+ */
+ private static void setDirectoryPermissions(File dir) {
+ if (dir != null && dir.exists()) {
+ // Set permissions on the directory itself
+ dir.setWritable(true, false); // writable by all
+ dir.setExecutable(true, false); // executable by all
+ dir.setReadable(true, false); // readable by all
+ // Recursively set permissions on subdirectories
+ if (dir.isDirectory()) {
+ File[] children = dir.listFiles();
+ if (children != null) {
+ for (File child : children) {
+ if (child.isDirectory()) {
+ setDirectoryPermissions(child);
+ }
+ }
+ }
+ }
+ }
+ }
/**
* Initialize HBase via Phoenix
diff --git a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixTestSuite.java b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixTestSuite.java
index 89f1a999044..7631ea7dcb2 100644
--- a/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixTestSuite.java
+++ b/contrib/storage-phoenix/src/test/java/org/apache/drill/exec/store/phoenix/secured/SecuredPhoenixTestSuite.java
@@ -26,6 +26,7 @@
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.junit.runners.Suite;
+import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.TimeZone;
@@ -41,7 +42,7 @@
@Category({ SlowTest.class, RowSetTest.class })
public class SecuredPhoenixTestSuite extends BaseTest {
- private static final org.slf4j.Logger logger = LoggerFactory.getLogger(SecuredPhoenixTestSuite.class);
+ private static final Logger logger = LoggerFactory.getLogger(SecuredPhoenixTestSuite.class);
private static volatile boolean runningSuite = false;
private static final AtomicInteger initCount = new AtomicInteger(0);
diff --git a/pom.xml b/pom.xml
index e8ece2041d3..60f7ec19604 100644
--- a/pom.xml
+++ b/pom.xml
@@ -71,7 +71,7 @@
1.10.0
1.7
5.5.0
- 10.14.2.0
+ 10.17.1.0
3072
apache/drill
@@ -107,7 +107,7 @@
1.8.2
5.7.2
- 1.0.1
+ 2.0.3
0.18.1
true
2.23.1
@@ -128,7 +128,7 @@
2.0.65.Final
4.1.118.Final
2.11.0
- 1.15.1
+ 1.15.2
1750144553
3.25.5
${project.basedir}/src/main/protobuf/
@@ -148,7 +148,7 @@
2.23.2
2.12.2
7.31.0
- 3.5.10
+ 3.9.3
diff --git a/tools/fmpp/pom.xml b/tools/fmpp/pom.xml
index eb2b3bb5cb9..6b44979b4bb 100644
--- a/tools/fmpp/pom.xml
+++ b/tools/fmpp/pom.xml
@@ -70,6 +70,18 @@
bsh
org.beanshell
+
+ maven-plugin-api
+ org.apache.maven
+
+
+ maven-compat
+ org.apache.maven
+
+
+ maven-model
+ org.apache.maven
+