From 04e4df89c342697d17bdfce2cee602bc679dcab4 Mon Sep 17 00:00:00 2001 From: openshift-ci-robot Date: Mon, 14 Jun 2021 23:03:18 +0000 Subject: [PATCH 1/2] Updating .ci-operator.yaml `build_root_image` from openshift/release This is an autogenerated PR that updates the `.ci-operator.yaml` to reference the `build_root_image` found in the [ci-operator-config](https://github.com/openshift/release/tree/master/ci-operator/config) in the [openshift/release](https://github.com/openshift/release) repository. This is done in preparation for enabling reading the `build_root` from your repository rather than the central config in [openshift/release](https://github.com/openshift/release). This allows to update the `build_root` in lockstep with code changes. For details, please refer to the [docs](https://docs.ci.openshift.org/docs/architecture/ci-operator/#build-root-image). Note that enabling this feature is mandatory for all OCP components that have an ART build config. A second autogenerated PR to the [openshift/release repository](https://github.com/openshift/release) will enable reading the `build_root` from your repository once this PR was merged. If you have any questions, please feel free to reach out in the #forum-testplatform channel in the CoreOS Slack. --- .ci-operator.yaml | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .ci-operator.yaml diff --git a/.ci-operator.yaml b/.ci-operator.yaml new file mode 100644 index 000000000000..4ff85adc3ac3 --- /dev/null +++ b/.ci-operator.yaml @@ -0,0 +1,4 @@ +build_root_image: + name: release + namespace: openshift + tag: rhel-8-release-openshift-4.6 From 1b60d09f2580c86cfe0d44a76a80259ac45c2bbb Mon Sep 17 00:00:00 2001 From: Brett Tofel Date: Wed, 2 Feb 2022 22:33:59 +0000 Subject: [PATCH 2/2] Remove accumulo plugin --- Dockerfile | 1 - Dockerfile.okd | 1 - pom.xml | 1 - presto-accumulo/pom.xml | 363 ------- .../plugin/accumulo/AccumuloClient.java | 948 ------------------ .../plugin/accumulo/AccumuloConnector.java | 145 --- .../accumulo/AccumuloConnectorFactory.java | 64 -- .../plugin/accumulo/AccumuloErrorCode.java | 56 -- .../accumulo/AccumuloHandleResolver.java | 65 -- .../plugin/accumulo/AccumuloMetadata.java | 429 -------- .../accumulo/AccumuloMetadataFactory.java | 34 - .../plugin/accumulo/AccumuloModule.java | 158 --- .../plugin/accumulo/AccumuloPlugin.java | 28 - .../plugin/accumulo/AccumuloSplitManager.java | 124 --- .../plugin/accumulo/AccumuloTableManager.java | 160 --- .../accumulo/AccumuloTransactionHandle.java | 72 -- .../io/prestosql/plugin/accumulo/Types.java | 75 -- .../plugin/accumulo/conf/AccumuloConfig.java | 143 --- .../conf/AccumuloSessionProperties.java | 174 ---- .../conf/AccumuloTableProperties.java | 250 ----- .../index/ColumnCardinalityCache.java | 410 -------- .../plugin/accumulo/index/IndexLookup.java | 401 -------- .../plugin/accumulo/index/Indexer.java | 559 ----------- .../plugin/accumulo/io/AccumuloPageSink.java | 304 ------ .../accumulo/io/AccumuloPageSinkProvider.java | 66 -- .../accumulo/io/AccumuloRecordCursor.java | 299 ------ .../plugin/accumulo/io/AccumuloRecordSet.java | 143 --- .../io/AccumuloRecordSetProvider.java | 69 -- .../iterators/MaxByteArrayCombiner.java | 47 - .../iterators/MinByteArrayCombiner.java | 47 - .../accumulo/metadata/AccumuloTable.java | 215 ---- .../accumulo/metadata/AccumuloView.java | 103 -- .../metadata/ZooKeeperMetadataManager.java | 333 ------ .../model/AccumuloColumnConstraint.java | 115 --- .../accumulo/model/AccumuloColumnHandle.java | 160 --- .../plugin/accumulo/model/AccumuloSplit.java | 100 -- .../accumulo/model/AccumuloTableHandle.java | 179 ---- .../plugin/accumulo/model/Field.java | 546 ---------- .../prestosql/plugin/accumulo/model/Row.java | 237 ----- .../plugin/accumulo/model/RowSchema.java | 121 --- .../accumulo/model/TabletSplitMetadata.java | 84 -- .../plugin/accumulo/model/WrappedRange.java | 57 -- .../serializers/AccumuloRowSerializer.java | 625 ------------ .../serializers/BooleanLexicoder.java | 38 - .../serializers/LexicoderRowSerializer.java | 422 -------- .../accumulo/serializers/MapLexicoder.java | 73 -- .../serializers/StringRowSerializer.java | 408 -------- .../java/org/apache/log4j/JulAppender.java | 104 -- .../plugin/accumulo/AccumuloQueryRunner.java | 237 ----- .../accumulo/MiniAccumuloConfigUtil.java | 52 - .../plugin/accumulo/TestAccumuloClient.java | 83 -- .../TestAccumuloDistributedQueries.java | 347 ------- .../TestAccumuloIntegrationSmokeTest.java | 66 -- .../plugin/accumulo/index/TestIndexer.java | 333 ------ .../accumulo/model/TestAccumuloSplit.java | 60 -- .../plugin/accumulo/model/TestField.java | 268 ----- .../plugin/accumulo/model/TestRow.java | 114 --- .../accumulo/model/TestWrappedRange.java | 35 - .../AbstractTestAccumuloRowSerializer.java | 316 ------ .../TestLexicoderRowSerializer.java | 23 - .../serializers/TestStringRowSerializer.java | 35 - presto-server/src/main/provisio/presto.xml | 6 - 62 files changed, 11531 deletions(-) delete mode 100644 presto-accumulo/pom.xml delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloClient.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloConnector.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloConnectorFactory.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloErrorCode.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloHandleResolver.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloMetadata.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloMetadataFactory.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloModule.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloPlugin.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloSplitManager.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloTableManager.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloTransactionHandle.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/Types.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloConfig.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloSessionProperties.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloTableProperties.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/ColumnCardinalityCache.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/IndexLookup.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/Indexer.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloPageSink.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloPageSinkProvider.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordCursor.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordSet.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordSetProvider.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/iterators/MaxByteArrayCombiner.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/iterators/MinByteArrayCombiner.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/AccumuloTable.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/AccumuloView.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/ZooKeeperMetadataManager.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloColumnConstraint.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloColumnHandle.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloSplit.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloTableHandle.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/Field.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/Row.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/RowSchema.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/TabletSplitMetadata.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/WrappedRange.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/AccumuloRowSerializer.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/BooleanLexicoder.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/LexicoderRowSerializer.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/MapLexicoder.java delete mode 100644 presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/StringRowSerializer.java delete mode 100644 presto-accumulo/src/main/java/org/apache/log4j/JulAppender.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/AccumuloQueryRunner.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/MiniAccumuloConfigUtil.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloClient.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloDistributedQueries.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloIntegrationSmokeTest.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/index/TestIndexer.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestAccumuloSplit.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestField.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestRow.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestWrappedRange.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/AbstractTestAccumuloRowSerializer.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/TestLexicoderRowSerializer.java delete mode 100644 presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/TestStringRowSerializer.java diff --git a/Dockerfile b/Dockerfile index 4bc9be6b77d6..906d4c3994e8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -50,7 +50,6 @@ COPY presto-cli /build/presto-cli COPY presto-hive /build/presto-hive COPY presto-matching /build/presto-matching COPY presto-elasticsearch /build/presto-elasticsearch -COPY presto-accumulo /build/presto-accumulo COPY presto-tests /build/presto-tests COPY presto-thrift /build/presto-thrift COPY presto-geospatial /build/presto-geospatial diff --git a/Dockerfile.okd b/Dockerfile.okd index aa2fcc5730ad..1df5b1ec03f1 100644 --- a/Dockerfile.okd +++ b/Dockerfile.okd @@ -49,7 +49,6 @@ COPY presto-cli /build/presto-cli COPY presto-hive /build/presto-hive COPY presto-matching /build/presto-matching COPY presto-elasticsearch /build/presto-elasticsearch -COPY presto-accumulo /build/presto-accumulo COPY presto-tests /build/presto-tests COPY presto-thrift /build/presto-thrift COPY presto-geospatial /build/presto-geospatial diff --git a/pom.xml b/pom.xml index efabec24a502..9dfb2ccc13dc 100644 --- a/pom.xml +++ b/pom.xml @@ -88,7 +88,6 @@ presto-kafka presto-kinesis presto-redis - presto-accumulo presto-cassandra presto-blackhole presto-memory diff --git a/presto-accumulo/pom.xml b/presto-accumulo/pom.xml deleted file mode 100644 index dbf8416e0e9e..000000000000 --- a/presto-accumulo/pom.xml +++ /dev/null @@ -1,363 +0,0 @@ - - - 4.0.0 - - - io.prestosql - presto-root - 328 - - - presto-accumulo - Presto - Accumulo Connector - presto-plugin - - - ${project.parent.basedir} - 1.7.4 - 2.12.0 - 1.2.17 - - - - - org.apache.accumulo - accumulo-core - ${dep.accumulo.version} - - - org.apache.hadoop - hadoop-client - - - org.apache.htrace - htrace-core - - - com.beust - jcommander - - - commons-beanutils - commons-beanutils-core - - - org.codehaus.plexus - plexus-utils - - - org.codehaus.plexus - plexus-utils - - - com.google.protobuf - protobuf-java - - - commons-logging - commons-logging - - - jline - jline - - - commons-cli - commons-cli - - - org.apache.maven.scm - maven-scm-api - - - org.apache.maven.scm - maven-scm-provider-svnexe - - - log4j - log4j - - - - - - org.apache.accumulo - accumulo-minicluster - ${dep.accumulo.version} - test - - - org.apache.hadoop - hadoop-client - - - org.apache.hadoop - hadoop-minicluster - - - org.apache.htrace - htrace-core - - - com.beust - jcommander - - - commons-beanutils - commons-beanutils-core - - - org.codehaus.plexus - plexus-utils - - - org.codehaus.plexus - plexus-utils - - - org.apache.accumulo - accumulo-monitor - - - org.slf4j - slf4j-log4j12 - - - com.sun.xml.bind - jaxb-impl - - - commons-logging - commons-logging - - - log4j - log4j - - - jline - jline - - - org.apache.maven.scm - maven-scm-api - - - org.apache.maven.scm - maven-scm-provider-svnexe - - - - - - org.apache.curator - curator-framework - ${dep.curator.version} - - - - org.apache.curator - curator-client - ${dep.curator.version} - - - - io.prestosql.hadoop - hadoop-apache - 2.7.7-1 - - - - io.airlift - bootstrap - - - - io.airlift - json - - - - io.airlift - log - - - - io.airlift - configuration - - - - io.airlift - concurrent - - - - io.airlift - units - - - - javax.validation - validation-api - - - - javax.annotation - javax.annotation-api - - - - com.google.guava - guava - - - - com.google.inject - guice - - - - com.google.code.findbugs - jsr305 - true - - - - commons-lang - commons-lang - 2.4 - - - - org.apache.commons - commons-lang3 - 3.4 - - - - commons-io - commons-io - 2.4 - - - - joda-time - joda-time - - - - org.apache.zookeeper - zookeeper - - - - javax.inject - javax.inject - - - - com.fasterxml.jackson.core - jackson-databind - - - - log4j - log4j - ${dep.log4j.version} - - - - - io.airlift - log-manager - runtime - - - org.slf4j - log4j-over-slf4j - - - - - - - io.prestosql - presto-spi - provided - - - - com.fasterxml.jackson.core - jackson-annotations - provided - - - - io.airlift - slice - provided - - - - org.openjdk.jol - jol-core - provided - - - - - io.prestosql - presto-main - test - - - - io.prestosql - presto-testing - test - - - - org.jetbrains - annotations - 13.0 - test - - - - io.prestosql - presto-tpch - test - - - - io.airlift.tpch - tpch - test - - - - org.testng - testng - test - - - - - - skip-accumulo-tests - - - - org.apache.maven.plugins - maven-surefire-plugin - - true - - - - - - - diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloClient.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloClient.java deleted file mode 100644 index 3b43e568ffb3..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloClient.java +++ /dev/null @@ -1,948 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.common.base.Splitter; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Iterables; -import io.airlift.log.Logger; -import io.prestosql.plugin.accumulo.conf.AccumuloConfig; -import io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties; -import io.prestosql.plugin.accumulo.conf.AccumuloTableProperties; -import io.prestosql.plugin.accumulo.index.IndexLookup; -import io.prestosql.plugin.accumulo.index.Indexer; -import io.prestosql.plugin.accumulo.io.AccumuloPageSink; -import io.prestosql.plugin.accumulo.metadata.AccumuloTable; -import io.prestosql.plugin.accumulo.metadata.AccumuloView; -import io.prestosql.plugin.accumulo.metadata.ZooKeeperMetadataManager; -import io.prestosql.plugin.accumulo.model.AccumuloColumnConstraint; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.model.TabletSplitMetadata; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.ColumnMetadata; -import io.prestosql.spi.connector.ConnectorSession; -import io.prestosql.spi.connector.ConnectorTableMetadata; -import io.prestosql.spi.connector.SchemaTableName; -import io.prestosql.spi.connector.TableNotFoundException; -import io.prestosql.spi.predicate.Domain; -import io.prestosql.spi.predicate.Marker.Bound; -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.IteratorSetting; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.PartialKey; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.io.Text; - -import javax.inject.Inject; - -import java.security.InvalidParameterException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Optional; -import java.util.Set; -import java.util.stream.Collectors; - -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.ACCUMULO_TABLE_DNE; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.ACCUMULO_TABLE_EXISTS; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.UNEXPECTED_ACCUMULO_ERROR; -import static io.prestosql.spi.StandardErrorCode.ALREADY_EXISTS; -import static io.prestosql.spi.StandardErrorCode.FUNCTION_IMPLEMENTATION_ERROR; -import static io.prestosql.spi.StandardErrorCode.INVALID_TABLE_PROPERTY; -import static io.prestosql.spi.StandardErrorCode.NOT_FOUND; -import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED; -import static java.lang.String.format; -import static java.util.Objects.requireNonNull; - -/** - * This class is the main access point for the Presto connector to interact with Accumulo. - * It is responsible for creating tables, dropping tables, retrieving table metadata, and getting the ConnectorSplits from a table. - */ -public class AccumuloClient -{ - private static final Logger LOG = Logger.get(AccumuloClient.class); - private static final Splitter COMMA_SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults(); - - private final ZooKeeperMetadataManager metaManager; - private final Authorizations auths; - private final AccumuloTableManager tableManager; - private final Connector connector; - private final IndexLookup indexLookup; - private final String username; - - @Inject - public AccumuloClient( - Connector connector, - AccumuloConfig config, - ZooKeeperMetadataManager metaManager, - AccumuloTableManager tableManager, - IndexLookup indexLookup) - throws AccumuloException, AccumuloSecurityException - { - this.connector = requireNonNull(connector, "connector is null"); - this.username = requireNonNull(config, "config is null").getUsername(); - this.metaManager = requireNonNull(metaManager, "metaManager is null"); - this.tableManager = requireNonNull(tableManager, "tableManager is null"); - this.indexLookup = requireNonNull(indexLookup, "indexLookup is null"); - - this.auths = connector.securityOperations().getUserAuthorizations(username); - } - - public AccumuloTable createTable(ConnectorTableMetadata meta) - { - // Validate the DDL is something we can handle - validateCreateTable(meta); - - Map tableProperties = meta.getProperties(); - String rowIdColumn = getRowIdColumn(meta); - - // Get the list of column handles - List columns = getColumnHandles(meta, rowIdColumn); - - // Create the AccumuloTable object - AccumuloTable table = new AccumuloTable( - meta.getTable().getSchemaName(), - meta.getTable().getTableName(), - columns, - rowIdColumn, - AccumuloTableProperties.isExternal(tableProperties), - AccumuloTableProperties.getSerializerClass(tableProperties), - AccumuloTableProperties.getScanAuthorizations(tableProperties)); - - // First, create the metadata - metaManager.createTableMetadata(table); - - // Make sure the namespace exists - tableManager.ensureNamespace(table.getSchema()); - - // Create the Accumulo table if it does not exist (for 'external' table) - if (!tableManager.exists(table.getFullTableName())) { - tableManager.createAccumuloTable(table.getFullTableName()); - } - - // Set any locality groups on the data table - setLocalityGroups(tableProperties, table); - - // Create index tables, if appropriate - createIndexTables(table); - - return table; - } - - /** - * Validates the given metadata for a series of conditions to ensure the table is well-formed. - * - * @param meta Table metadata - */ - private void validateCreateTable(ConnectorTableMetadata meta) - { - validateColumns(meta); - validateLocalityGroups(meta); - if (!AccumuloTableProperties.isExternal(meta.getProperties())) { - validateInternalTable(meta); - } - } - - private static void validateColumns(ConnectorTableMetadata meta) - { - // Check all the column types, and throw an exception if the types of a map are complex - // While it is a rare case, this is not supported by the Accumulo connector - ImmutableSet.Builder columnNameBuilder = ImmutableSet.builder(); - for (ColumnMetadata column : meta.getColumns()) { - if (Types.isMapType(column.getType())) { - if (Types.isMapType(Types.getKeyType(column.getType())) - || Types.isMapType(Types.getValueType(column.getType())) - || Types.isArrayType(Types.getKeyType(column.getType())) - || Types.isArrayType(Types.getValueType(column.getType()))) { - throw new PrestoException(INVALID_TABLE_PROPERTY, "Key/value types of a MAP column must be plain types"); - } - } - - columnNameBuilder.add(column.getName().toLowerCase(Locale.ENGLISH)); - } - - // Validate the columns are distinct - if (columnNameBuilder.build().size() != meta.getColumns().size()) { - throw new PrestoException(INVALID_TABLE_PROPERTY, "Duplicate column names are not supported"); - } - - Optional>> columnMapping = AccumuloTableProperties.getColumnMapping(meta.getProperties()); - if (columnMapping.isPresent()) { - // Validate there are no duplicates in the column mapping - long distinctMappings = columnMapping.get().values().stream().distinct().count(); - if (distinctMappings != columnMapping.get().size()) { - throw new PrestoException(INVALID_TABLE_PROPERTY, "Duplicate column family/qualifier pair detected in column mapping, check the value of " + AccumuloTableProperties.COLUMN_MAPPING); - } - - // Validate no column is mapped to the reserved entry - String reservedRowIdColumn = AccumuloPageSink.ROW_ID_COLUMN.toString(); - if (columnMapping.get().values().stream() - .filter(pair -> pair.getKey().equals(reservedRowIdColumn) && pair.getValue().equals(reservedRowIdColumn)) - .count() > 0) { - throw new PrestoException(INVALID_TABLE_PROPERTY, format("Column familiy/qualifier mapping of %s:%s is reserved", reservedRowIdColumn, reservedRowIdColumn)); - } - } - else if (AccumuloTableProperties.isExternal(meta.getProperties())) { - // Column mapping is not defined (i.e. use column generation) and table is external - // But column generation is for internal tables only - throw new PrestoException(INVALID_TABLE_PROPERTY, "Column generation for external tables is not supported, must specify " + AccumuloTableProperties.COLUMN_MAPPING); - } - } - - private static void validateLocalityGroups(ConnectorTableMetadata meta) - { - // Validate any configured locality groups - Optional>> groups = AccumuloTableProperties.getLocalityGroups(meta.getProperties()); - if (!groups.isPresent()) { - return; - } - - String rowIdColumn = getRowIdColumn(meta); - - // For each locality group - for (Map.Entry> g : groups.get().entrySet()) { - if (g.getValue().contains(rowIdColumn)) { - throw new PrestoException(INVALID_TABLE_PROPERTY, "Row ID column cannot be in a locality group"); - } - - // Validate the specified column names exist in the table definition, - // incrementing a counter for each matching column - int matchingColumns = 0; - for (ColumnMetadata column : meta.getColumns()) { - if (g.getValue().contains(column.getName().toLowerCase(Locale.ENGLISH))) { - ++matchingColumns; - - // Break out early if all columns are found - if (matchingColumns == g.getValue().size()) { - break; - } - } - } - - // If the number of matched columns does not equal the defined size, - // then a column was specified that does not exist - // (or there is a duplicate column in the table DDL, which is also an issue but has been checked before in validateColumns). - if (matchingColumns != g.getValue().size()) { - throw new PrestoException(INVALID_TABLE_PROPERTY, "Unknown Presto column defined for locality group " + g.getKey()); - } - } - } - - private void validateInternalTable(ConnectorTableMetadata meta) - { - String table = AccumuloTable.getFullTableName(meta.getTable()); - String indexTable = Indexer.getIndexTableName(meta.getTable()); - String metricsTable = Indexer.getMetricsTableName(meta.getTable()); - - if (tableManager.exists(table)) { - throw new PrestoException(ACCUMULO_TABLE_EXISTS, "Cannot create internal table when an Accumulo table already exists"); - } - - if (AccumuloTableProperties.getIndexColumns(meta.getProperties()).isPresent()) { - if (tableManager.exists(indexTable) || tableManager.exists(metricsTable)) { - throw new PrestoException(ACCUMULO_TABLE_EXISTS, "Internal table is indexed, but the index table and/or index metrics table(s) already exist"); - } - } - } - - /** - * Gets the row ID based on a table properties or the first column name. - * - * @param meta ConnectorTableMetadata - * @return Lowercase Presto column name mapped to the Accumulo row ID - */ - private static String getRowIdColumn(ConnectorTableMetadata meta) - { - Optional rowIdColumn = AccumuloTableProperties.getRowId(meta.getProperties()); - return rowIdColumn.orElse(meta.getColumns().get(0).getName()).toLowerCase(Locale.ENGLISH); - } - - private static List getColumnHandles(ConnectorTableMetadata meta, String rowIdColumn) - { - // Get the column mappings from the table property or auto-generate columns if not defined - Map> mapping = AccumuloTableProperties.getColumnMapping(meta.getProperties()).orElse(autoGenerateMapping(meta.getColumns(), AccumuloTableProperties.getLocalityGroups(meta.getProperties()))); - - // The list of indexed columns - Optional> indexedColumns = AccumuloTableProperties.getIndexColumns(meta.getProperties()); - - // And now we parse the configured columns and create handles for the metadata manager - ImmutableList.Builder cBuilder = ImmutableList.builder(); - for (int ordinal = 0; ordinal < meta.getColumns().size(); ++ordinal) { - ColumnMetadata cm = meta.getColumns().get(ordinal); - - // Special case if this column is the row ID - if (cm.getName().equalsIgnoreCase(rowIdColumn)) { - cBuilder.add( - new AccumuloColumnHandle( - rowIdColumn, - Optional.empty(), - Optional.empty(), - cm.getType(), - ordinal, - "Accumulo row ID", - false)); - } - else { - if (!mapping.containsKey(cm.getName())) { - throw new InvalidParameterException(format("Misconfigured mapping for presto column %s", cm.getName())); - } - - // Get the mapping for this column - Pair famqual = mapping.get(cm.getName()); - boolean indexed = indexedColumns.isPresent() && indexedColumns.get().contains(cm.getName().toLowerCase(Locale.ENGLISH)); - String comment = format("Accumulo column %s:%s. Indexed: %b", famqual.getLeft(), famqual.getRight(), indexed); - - // Create a new AccumuloColumnHandle object - cBuilder.add( - new AccumuloColumnHandle( - cm.getName(), - Optional.of(famqual.getLeft()), - Optional.of(famqual.getRight()), - cm.getType(), - ordinal, - comment, - indexed)); - } - } - - return cBuilder.build(); - } - - private void setLocalityGroups(Map tableProperties, AccumuloTable table) - { - Optional>> groups = AccumuloTableProperties.getLocalityGroups(tableProperties); - if (!groups.isPresent()) { - LOG.debug("No locality groups to set"); - return; - } - - ImmutableMap.Builder> localityGroupsBuilder = ImmutableMap.builder(); - for (Map.Entry> g : groups.get().entrySet()) { - ImmutableSet.Builder familyBuilder = ImmutableSet.builder(); - // For each configured column for this locality group - for (String col : g.getValue()) { - // Locate the column family mapping via the Handle - // We already validated this earlier, so it'll exist - AccumuloColumnHandle handle = table.getColumns() - .stream() - .filter(x -> x.getName().equals(col)) - .collect(Collectors.toList()) - .get(0); - familyBuilder.add(new Text(handle.getFamily().get())); - } - - localityGroupsBuilder.put(g.getKey(), familyBuilder.build()); - } - - Map> localityGroups = localityGroupsBuilder.build(); - LOG.debug("Setting locality groups: {}", localityGroups); - tableManager.setLocalityGroups(table.getFullTableName(), localityGroups); - } - - /** - * Creates the index tables from the given Accumulo table. No op if - * {@link AccumuloTable#isIndexed()} is false. - * - * @param table Table to create index tables - */ - private void createIndexTables(AccumuloTable table) - { - // Early-out if table is not indexed - if (!table.isIndexed()) { - return; - } - - // Create index table if it does not exist (for 'external' table) - if (!tableManager.exists(table.getIndexTableName())) { - tableManager.createAccumuloTable(table.getIndexTableName()); - } - - // Create index metrics table if it does not exist - if (!tableManager.exists(table.getMetricsTableName())) { - tableManager.createAccumuloTable(table.getMetricsTableName()); - } - - // Set locality groups on index and metrics table - Map> indexGroups = Indexer.getLocalityGroups(table); - tableManager.setLocalityGroups(table.getIndexTableName(), indexGroups); - tableManager.setLocalityGroups(table.getMetricsTableName(), indexGroups); - - // Attach iterators to metrics table - for (IteratorSetting setting : Indexer.getMetricIterators(table)) { - tableManager.setIterator(table.getMetricsTableName(), setting); - } - } - - /** - * Auto-generates the mapping of Presto column name to Accumulo family/qualifier, respecting the locality groups (if any). - * - * @param columns Presto columns for the table - * @param groups Mapping of locality groups to a set of Presto columns, or null if none - * @return Column mappings - */ - private static Map> autoGenerateMapping(List columns, Optional>> groups) - { - Map> mapping = new HashMap<>(); - for (ColumnMetadata column : columns) { - Optional family = getColumnLocalityGroup(column.getName(), groups); - mapping.put(column.getName(), Pair.of(family.orElse(column.getName()), column.getName())); - } - return mapping; - } - - /** - * Searches through the given locality groups to find if this column has a locality group. - * - * @param columnName Column name to get the locality group of - * @param groups Optional locality group configuration - * @return Optional string containing the name of the locality group, if present - */ - private static Optional getColumnLocalityGroup(String columnName, Optional>> groups) - { - if (groups.isPresent()) { - for (Map.Entry> group : groups.get().entrySet()) { - if (group.getValue().contains(columnName.toLowerCase(Locale.ENGLISH))) { - return Optional.of(group.getKey()); - } - } - } - - return Optional.empty(); - } - - public void dropTable(AccumuloTable table) - { - SchemaTableName tableName = new SchemaTableName(table.getSchema(), table.getTable()); - - // Remove the table metadata from Presto - if (metaManager.getTable(tableName) != null) { - metaManager.deleteTableMetadata(tableName); - } - - if (!table.isExternal()) { - // delete the table and index tables - String fullTableName = table.getFullTableName(); - if (tableManager.exists(fullTableName)) { - tableManager.deleteAccumuloTable(fullTableName); - } - - if (table.isIndexed()) { - String indexTableName = Indexer.getIndexTableName(tableName); - if (tableManager.exists(indexTableName)) { - tableManager.deleteAccumuloTable(indexTableName); - } - - String metricsTableName = Indexer.getMetricsTableName(tableName); - if (tableManager.exists(metricsTableName)) { - tableManager.deleteAccumuloTable(metricsTableName); - } - } - } - } - - public void renameTable(SchemaTableName oldName, SchemaTableName newName) - { - if (!oldName.getSchemaName().equals(newName.getSchemaName())) { - throw new PrestoException(NOT_SUPPORTED, "Accumulo does not support renaming tables to different namespaces (schemas)"); - } - - AccumuloTable oldTable = getTable(oldName); - if (oldTable == null) { - throw new TableNotFoundException(oldName); - } - - AccumuloTable newTable = new AccumuloTable( - oldTable.getSchema(), - newName.getTableName(), - oldTable.getColumns(), - oldTable.getRowId(), - oldTable.isExternal(), - oldTable.getSerializerClassName(), - oldTable.getScanAuthorizations()); - - // Validate table existence - if (!tableManager.exists(oldTable.getFullTableName())) { - throw new PrestoException(ACCUMULO_TABLE_DNE, format("Table %s does not exist", oldTable.getFullTableName())); - } - - if (tableManager.exists(newTable.getFullTableName())) { - throw new PrestoException(ACCUMULO_TABLE_EXISTS, format("Table %s already exists", newTable.getFullTableName())); - } - - // Rename index tables (which will also validate table existence) - renameIndexTables(oldTable, newTable); - - // Rename the Accumulo table - tableManager.renameAccumuloTable(oldTable.getFullTableName(), newTable.getFullTableName()); - - // We'll then create the metadata - metaManager.deleteTableMetadata(oldTable.getSchemaTableName()); - metaManager.createTableMetadata(newTable); - } - - /** - * Renames the index tables (if applicable) for the old table to the new table. - * - * @param oldTable Old AccumuloTable - * @param newTable New AccumuloTable - */ - private void renameIndexTables(AccumuloTable oldTable, AccumuloTable newTable) - { - if (!oldTable.isIndexed()) { - return; - } - - if (!tableManager.exists(oldTable.getIndexTableName())) { - throw new PrestoException(ACCUMULO_TABLE_DNE, format("Table %s does not exist", oldTable.getIndexTableName())); - } - - if (tableManager.exists(newTable.getIndexTableName())) { - throw new PrestoException(ACCUMULO_TABLE_EXISTS, format("Table %s already exists", newTable.getIndexTableName())); - } - - if (!tableManager.exists(oldTable.getMetricsTableName())) { - throw new PrestoException(ACCUMULO_TABLE_DNE, format("Table %s does not exist", oldTable.getMetricsTableName())); - } - - if (tableManager.exists(newTable.getMetricsTableName())) { - throw new PrestoException(ACCUMULO_TABLE_EXISTS, format("Table %s already exists", newTable.getMetricsTableName())); - } - - tableManager.renameAccumuloTable(oldTable.getIndexTableName(), newTable.getIndexTableName()); - tableManager.renameAccumuloTable(oldTable.getMetricsTableName(), newTable.getMetricsTableName()); - } - - public void createView(SchemaTableName viewName, String viewData) - { - if (getSchemaNames().contains(viewName.getSchemaName())) { - if (getViewNames(viewName.getSchemaName()).contains(viewName.getTableName())) { - throw new PrestoException(ALREADY_EXISTS, "View already exists"); - } - - if (getTableNames(viewName.getSchemaName()).contains(viewName.getTableName())) { - throw new PrestoException(ALREADY_EXISTS, "View already exists as data table"); - } - } - - metaManager.createViewMetadata(new AccumuloView(viewName.getSchemaName(), viewName.getTableName(), viewData)); - } - - public void createOrReplaceView(SchemaTableName viewName, String viewData) - { - if (getView(viewName) != null) { - metaManager.deleteViewMetadata(viewName); - } - - metaManager.createViewMetadata(new AccumuloView(viewName.getSchemaName(), viewName.getTableName(), viewData)); - } - - public void dropView(SchemaTableName viewName) - { - metaManager.deleteViewMetadata(viewName); - } - - public void renameColumn(AccumuloTable table, String source, String target) - { - if (!table.getColumns().stream().anyMatch(columnHandle -> columnHandle.getName().equalsIgnoreCase(source))) { - throw new PrestoException(NOT_FOUND, format("Failed to find source column %s to rename to %s", source, target)); - } - - // Copy existing column list, replacing the old column name with the new - ImmutableList.Builder newColumnList = ImmutableList.builder(); - for (AccumuloColumnHandle columnHandle : table.getColumns()) { - if (columnHandle.getName().equalsIgnoreCase(source)) { - newColumnList.add(new AccumuloColumnHandle( - target, - columnHandle.getFamily(), - columnHandle.getQualifier(), - columnHandle.getType(), - columnHandle.getOrdinal(), - columnHandle.getComment(), - columnHandle.isIndexed())); - } - else { - newColumnList.add(columnHandle); - } - } - - // Create new table metadata - AccumuloTable newTable = new AccumuloTable( - table.getSchema(), - table.getTable(), - newColumnList.build(), - table.getRowId().equalsIgnoreCase(source) ? target : table.getRowId(), - table.isExternal(), - table.getSerializerClassName(), - table.getScanAuthorizations()); - - // Replace the table metadata - metaManager.deleteTableMetadata(new SchemaTableName(table.getSchema(), table.getTable())); - metaManager.createTableMetadata(newTable); - } - - public Set getSchemaNames() - { - return metaManager.getSchemaNames(); - } - - public Set getTableNames(String schema) - { - requireNonNull(schema, "schema is null"); - return metaManager.getTableNames(schema); - } - - public AccumuloTable getTable(SchemaTableName table) - { - requireNonNull(table, "schema table name is null"); - return metaManager.getTable(table); - } - - public Set getViewNames(String schema) - { - requireNonNull(schema, "schema is null"); - return metaManager.getViewNames(schema); - } - - public AccumuloView getView(SchemaTableName viewName) - { - requireNonNull(viewName, "schema table name is null"); - return metaManager.getView(viewName); - } - - /** - * Fetches the TabletSplitMetadata for a query against an Accumulo table. - *

- * Does a whole bunch of fun stuff! Splitting on row ID ranges, applying secondary indexes, column pruning, - * all sorts of sweet optimizations. What you have here is an important method. - * - * @param session Current session - * @param schema Schema name - * @param table Table Name - * @param rowIdDomain Domain for the row ID - * @param constraints Column constraints for the query - * @param serializer Instance of a row serializer - * @return List of TabletSplitMetadata objects for Presto - */ - public List getTabletSplits( - ConnectorSession session, - String schema, - String table, - Optional rowIdDomain, - List constraints, - AccumuloRowSerializer serializer) - { - try { - String tableName = AccumuloTable.getFullTableName(schema, table); - LOG.debug("Getting tablet splits for table %s", tableName); - - // Get the initial Range based on the row ID domain - Collection rowIdRanges = getRangesFromDomain(rowIdDomain, serializer); - List tabletSplits = new ArrayList<>(); - - // Use the secondary index, if enabled - if (AccumuloSessionProperties.isOptimizeIndexEnabled(session)) { - // Get the scan authorizations to query the index - Authorizations auths = getScanAuthorizations(session, schema, table); - - // Check the secondary index based on the column constraints - // If this returns true, return the tablet splits to Presto - if (indexLookup.applyIndex(schema, table, session, constraints, rowIdRanges, tabletSplits, serializer, auths)) { - return tabletSplits; - } - } - - // If we can't (or shouldn't) use the secondary index, we will just use the Range from the row ID domain - - // Split the ranges on tablet boundaries, if enabled - Collection splitRanges; - if (AccumuloSessionProperties.isOptimizeSplitRangesEnabled(session)) { - splitRanges = splitByTabletBoundaries(tableName, rowIdRanges); - } - else { - // if not enabled, just use the same collection - splitRanges = rowIdRanges; - } - - // Create TabletSplitMetadata objects for each range - boolean fetchTabletLocations = AccumuloSessionProperties.isOptimizeLocalityEnabled(session); - - LOG.debug("Fetching tablet locations: %s", fetchTabletLocations); - - for (Range range : splitRanges) { - // If locality is enabled, then fetch tablet location - if (fetchTabletLocations) { - tabletSplits.add(new TabletSplitMetadata(getTabletLocation(tableName, range.getStartKey()), ImmutableList.of(range))); - } - else { - // else, just use the default location - tabletSplits.add(new TabletSplitMetadata(Optional.empty(), ImmutableList.of(range))); - } - } - - // Log some fun stuff and return the tablet splits - LOG.debug("Number of splits for table %s is %d with %d ranges", tableName, tabletSplits.size(), splitRanges.size()); - return tabletSplits; - } - catch (Exception e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to get splits from Accumulo", e); - } - } - - /** - * Gets the scan authorizations to use for scanning tables. - *

- * In order of priority: session username authorizations, then table property, then the default connector auths. - * - * @param session Current session - * @param schema Schema name - * @param table Table Name - * @return Scan authorizations - * @throws AccumuloException If a generic Accumulo error occurs - * @throws AccumuloSecurityException If a security exception occurs - */ - private Authorizations getScanAuthorizations(ConnectorSession session, String schema, - String table) - throws AccumuloException, AccumuloSecurityException - { - String sessionScanUser = AccumuloSessionProperties.getScanUsername(session); - if (sessionScanUser != null) { - Authorizations scanAuths = connector.securityOperations().getUserAuthorizations(sessionScanUser); - LOG.debug("Using session scan auths for user %s: %s", sessionScanUser, scanAuths); - return scanAuths; - } - - AccumuloTable accumuloTable = this.getTable(new SchemaTableName(schema, table)); - if (accumuloTable == null) { - throw new TableNotFoundException(new SchemaTableName(schema, table)); - } - - Optional strAuths = accumuloTable.getScanAuthorizations(); - if (strAuths.isPresent()) { - Authorizations scanAuths = new Authorizations(Iterables.toArray(COMMA_SPLITTER.split(strAuths.get()), String.class)); - LOG.debug("scan_auths table property set, using: %s", scanAuths); - return scanAuths; - } - - LOG.debug("scan_auths table property not set, using connector auths: %s", this.auths); - return this.auths; - } - - private Collection splitByTabletBoundaries(String tableName, Collection ranges) - throws org.apache.accumulo.core.client.TableNotFoundException, AccumuloException, AccumuloSecurityException - { - ImmutableSet.Builder rangeBuilder = ImmutableSet.builder(); - for (Range range : ranges) { - // if start and end key are equivalent, no need to split the range - if (range.getStartKey() != null && range.getEndKey() != null && range.getStartKey().equals(range.getEndKey())) { - rangeBuilder.add(range); - } - else { - // Call out to Accumulo to split the range on tablets - rangeBuilder.addAll(connector.tableOperations().splitRangeByTablets(tableName, range, Integer.MAX_VALUE)); - } - } - return rangeBuilder.build(); - } - - /** - * Gets the TabletServer hostname for where the given key is located in the given table - * - * @param table Fully-qualified table name - * @param key Key to locate - * @return The tablet location, or DUMMY_LOCATION if an error occurs - */ - private Optional getTabletLocation(String table, Key key) - { - try { - // Get the Accumulo table ID so we can scan some fun stuff - String tableId = connector.tableOperations().tableIdMap().get(table); - - // Create our scanner against the metadata table, fetching 'loc' family - Scanner scanner = connector.createScanner("accumulo.metadata", auths); - scanner.fetchColumnFamily(new Text("loc")); - - // Set the scan range to just this table, from the table ID to the default tablet - // row, which is the last listed tablet - Key defaultTabletRow = new Key(tableId + '<'); - Key start = new Key(tableId); - Key end = defaultTabletRow.followingKey(PartialKey.ROW); - scanner.setRange(new Range(start, end)); - - Optional location = Optional.empty(); - if (key == null) { - // if the key is null, then it is -inf, so get first tablet location - Iterator> iter = scanner.iterator(); - if (iter.hasNext()) { - location = Optional.of(iter.next().getValue().toString()); - } - } - else { - // Else, we will need to scan through the tablet location data and find the location - - // Create some text objects to do comparison for what we are looking for - Text splitCompareKey = new Text(); - key.getRow(splitCompareKey); - Text scannedCompareKey = new Text(); - - // Scan the table! - for (Entry entry : scanner) { - // Get the bytes of the key - byte[] keyBytes = entry.getKey().getRow().copyBytes(); - - // If the last byte is <, then we have hit the default tablet, so use this location - if (keyBytes[keyBytes.length - 1] == '<') { - location = Optional.of(entry.getValue().toString()); - break; - } - else { - // Chop off some magic nonsense - scannedCompareKey.set(keyBytes, 3, keyBytes.length - 3); - - // Compare the keys, moving along the tablets until the location is found - if (scannedCompareKey.getLength() > 0) { - int compareTo = splitCompareKey.compareTo(scannedCompareKey); - if (compareTo <= 0) { - location = Optional.of(entry.getValue().toString()); - } - else { - // all future tablets will be greater than this key - break; - } - } - } - } - scanner.close(); - } - - // If we were unable to find the location for some reason, return the default tablet - // location - return location.isPresent() ? location : getDefaultTabletLocation(table); - } - catch (Exception e) { - // Swallow this exception so the query does not fail due to being unable - // to locate the tablet server for the provided Key. - // This is purely an optimization, but we will want to log the error. - LOG.error("Failed to get tablet location, returning dummy location", e); - return Optional.empty(); - } - } - - private Optional getDefaultTabletLocation(String fulltable) - { - try { - String tableId = connector.tableOperations().tableIdMap().get(fulltable); - - // Create a scanner over the metadata table, fetching the 'loc' column of the default tablet row - Scanner scan = connector.createScanner("accumulo.metadata", connector.securityOperations().getUserAuthorizations(username)); - scan.fetchColumnFamily(new Text("loc")); - scan.setRange(new Range(tableId + '<')); - - // scan the entry - Optional location = Optional.empty(); - for (Entry entry : scan) { - if (location.isPresent()) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Scan for default tablet returned more than one entry"); - } - - location = Optional.of(entry.getValue().toString()); - } - - scan.close(); - return location; - } - catch (Exception e) { - // Swallow this exception so the query does not fail due to being unable to locate the tablet server for the default tablet. - // This is purely an optimization, but we will want to log the error. - LOG.error("Failed to get tablet location, returning dummy location", e); - return Optional.empty(); - } - } - - /** - * Gets a collection of Accumulo Range objects from the given Presto domain. - * This maps the column constraints of the given Domain to an Accumulo Range scan. - * - * @param domain Domain, can be null (returns (-inf, +inf) Range) - * @param serializer Instance of an {@link AccumuloRowSerializer} - * @return A collection of Accumulo Range objects - * @throws TableNotFoundException If the Accumulo table is not found - */ - public static Collection getRangesFromDomain(Optional domain, AccumuloRowSerializer serializer) - throws TableNotFoundException - { - // if we have no predicate pushdown, use the full range - if (!domain.isPresent()) { - return ImmutableSet.of(new Range()); - } - - ImmutableSet.Builder rangeBuilder = ImmutableSet.builder(); - for (io.prestosql.spi.predicate.Range range : domain.get().getValues().getRanges().getOrderedRanges()) { - rangeBuilder.add(getRangeFromPrestoRange(range, serializer)); - } - - return rangeBuilder.build(); - } - - private static Range getRangeFromPrestoRange(io.prestosql.spi.predicate.Range prestoRange, AccumuloRowSerializer serializer) - throws TableNotFoundException - { - Range accumuloRange; - if (prestoRange.isAll()) { - accumuloRange = new Range(); - } - else if (prestoRange.isSingleValue()) { - Text split = new Text(serializer.encode(prestoRange.getType(), prestoRange.getSingleValue())); - accumuloRange = new Range(split); - } - else { - if (prestoRange.getLow().isLowerUnbounded()) { - // If low is unbounded, then create a range from (-inf, value), checking inclusivity - boolean inclusive = prestoRange.getHigh().getBound() == Bound.EXACTLY; - Text split = new Text(serializer.encode(prestoRange.getType(), prestoRange.getHigh().getValue())); - accumuloRange = new Range(null, false, split, inclusive); - } - else if (prestoRange.getHigh().isUpperUnbounded()) { - // If high is unbounded, then create a range from (value, +inf), checking inclusivity - boolean inclusive = prestoRange.getLow().getBound() == Bound.EXACTLY; - Text split = new Text(serializer.encode(prestoRange.getType(), prestoRange.getLow().getValue())); - accumuloRange = new Range(split, inclusive, null, false); - } - else { - // If high is unbounded, then create a range from low to high, checking inclusivity - boolean startKeyInclusive = prestoRange.getLow().getBound() == Bound.EXACTLY; - Text startSplit = new Text(serializer.encode(prestoRange.getType(), prestoRange.getLow().getValue())); - - boolean endKeyInclusive = prestoRange.getHigh().getBound() == Bound.EXACTLY; - Text endSplit = new Text(serializer.encode(prestoRange.getType(), prestoRange.getHigh().getValue())); - accumuloRange = new Range(startSplit, startKeyInclusive, endSplit, endKeyInclusive); - } - } - - return accumuloRange; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloConnector.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloConnector.java deleted file mode 100644 index 74bd9c957921..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloConnector.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import io.airlift.bootstrap.LifeCycleManager; -import io.airlift.log.Logger; -import io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties; -import io.prestosql.plugin.accumulo.conf.AccumuloTableProperties; -import io.prestosql.plugin.accumulo.io.AccumuloPageSinkProvider; -import io.prestosql.plugin.accumulo.io.AccumuloRecordSetProvider; -import io.prestosql.spi.connector.Connector; -import io.prestosql.spi.connector.ConnectorMetadata; -import io.prestosql.spi.connector.ConnectorPageSinkProvider; -import io.prestosql.spi.connector.ConnectorRecordSetProvider; -import io.prestosql.spi.connector.ConnectorSplitManager; -import io.prestosql.spi.connector.ConnectorTransactionHandle; -import io.prestosql.spi.session.PropertyMetadata; -import io.prestosql.spi.transaction.IsolationLevel; - -import javax.inject.Inject; - -import java.util.List; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; - -import static com.google.common.base.Preconditions.checkArgument; -import static io.prestosql.spi.transaction.IsolationLevel.READ_UNCOMMITTED; -import static io.prestosql.spi.transaction.IsolationLevel.checkConnectorSupports; -import static java.util.Objects.requireNonNull; - -/** - * Presto Connector for Accumulo. - * Defines several high-level classes for properties, metadata, retrieving splits, providing I/O operations, etc. - */ -public class AccumuloConnector - implements Connector -{ - private static final Logger LOG = Logger.get(AccumuloConnector.class); - - private final LifeCycleManager lifeCycleManager; - private final AccumuloMetadataFactory metadataFactory; - private final AccumuloSplitManager splitManager; - private final AccumuloRecordSetProvider recordSetProvider; - private final AccumuloPageSinkProvider pageSinkProvider; - private final AccumuloSessionProperties sessionProperties; - private final AccumuloTableProperties tableProperties; - private final ConcurrentMap transactions = new ConcurrentHashMap<>(); - - @Inject - public AccumuloConnector( - LifeCycleManager lifeCycleManager, - AccumuloMetadataFactory metadataFactory, - AccumuloSplitManager splitManager, - AccumuloRecordSetProvider recordSetProvider, - AccumuloPageSinkProvider pageSinkProvider, - AccumuloSessionProperties sessionProperties, - AccumuloTableProperties tableProperties) - { - this.lifeCycleManager = requireNonNull(lifeCycleManager, "lifeCycleManager is null"); - this.metadataFactory = requireNonNull(metadataFactory, "metadata is null"); - this.splitManager = requireNonNull(splitManager, "splitManager is null"); - this.recordSetProvider = requireNonNull(recordSetProvider, "recordSetProvider is null"); - this.pageSinkProvider = requireNonNull(pageSinkProvider, "pageSinkProvider is null"); - this.sessionProperties = requireNonNull(sessionProperties, "sessionProperties is null"); - this.tableProperties = requireNonNull(tableProperties, "tableProperties is null"); - } - - @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) - { - ConnectorMetadata metadata = transactions.get(transactionHandle); - checkArgument(metadata != null, "no such transaction: %s", transactionHandle); - return metadata; - } - - @Override - public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel, boolean readOnly) - { - checkConnectorSupports(READ_UNCOMMITTED, isolationLevel); - ConnectorTransactionHandle transaction = new AccumuloTransactionHandle(); - transactions.put(transaction, metadataFactory.create()); - return transaction; - } - - @Override - public void commit(ConnectorTransactionHandle transactionHandle) - { - checkArgument(transactions.remove(transactionHandle) != null, "no such transaction: %s", transactionHandle); - } - - @Override - public void rollback(ConnectorTransactionHandle transactionHandle) - { - AccumuloMetadata metadata = transactions.remove(transactionHandle); - checkArgument(metadata != null, "no such transaction: %s", transactionHandle); - metadata.rollback(); - } - - @Override - public ConnectorSplitManager getSplitManager() - { - return splitManager; - } - - @Override - public ConnectorRecordSetProvider getRecordSetProvider() - { - return recordSetProvider; - } - - @Override - public ConnectorPageSinkProvider getPageSinkProvider() - { - return pageSinkProvider; - } - - @Override - public List> getTableProperties() - { - return tableProperties.getTableProperties(); - } - - @Override - public List> getSessionProperties() - { - return sessionProperties.getSessionProperties(); - } - - @Override - public final void shutdown() - { - lifeCycleManager.stop(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloConnectorFactory.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloConnectorFactory.java deleted file mode 100644 index 624daec9a183..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloConnectorFactory.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.inject.Injector; -import io.airlift.bootstrap.Bootstrap; -import io.airlift.json.JsonModule; -import io.prestosql.spi.connector.Connector; -import io.prestosql.spi.connector.ConnectorContext; -import io.prestosql.spi.connector.ConnectorFactory; -import io.prestosql.spi.connector.ConnectorHandleResolver; - -import java.util.Map; - -import static java.util.Objects.requireNonNull; - -public class AccumuloConnectorFactory - implements ConnectorFactory -{ - public static final String CONNECTOR_NAME = "accumulo"; - - @Override - public String getName() - { - return CONNECTOR_NAME; - } - - @Override - public Connector create(String catalogName, Map config, ConnectorContext context) - { - requireNonNull(catalogName, "catalogName is null"); - requireNonNull(config, "requiredConfig is null"); - requireNonNull(context, "context is null"); - - Bootstrap app = new Bootstrap( - new JsonModule(), - new AccumuloModule(context.getTypeManager())); - - Injector injector = app - .strictConfig() - .doNotInitializeLogging() - .setRequiredConfigurationProperties(config) - .initialize(); - - return injector.getInstance(AccumuloConnector.class); - } - - @Override - public ConnectorHandleResolver getHandleResolver() - { - return new AccumuloHandleResolver(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloErrorCode.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloErrorCode.java deleted file mode 100644 index 45b0329196ad..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloErrorCode.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import io.prestosql.spi.ErrorCode; -import io.prestosql.spi.ErrorCodeSupplier; -import io.prestosql.spi.ErrorType; - -import static io.prestosql.spi.ErrorType.EXTERNAL; - -public enum AccumuloErrorCode - implements ErrorCodeSupplier -{ - // Thrown when an Accumulo error is caught that we were not expecting, - // such as when a create table operation fails (even though we know it will succeed due to our validation steps) - UNEXPECTED_ACCUMULO_ERROR(1, EXTERNAL), - - // Thrown when a ZooKeeper error is caught due to a failed operation - ZOOKEEPER_ERROR(2, EXTERNAL), - - // Thrown when a serialization error occurs when reading/writing data from/to Accumulo - IO_ERROR(3, EXTERNAL), - - // Thrown when a table that is expected to exist does not exist - ACCUMULO_TABLE_DNE(4, EXTERNAL), - - // Thrown when a table that is *not* expected to exist, does exist - ACCUMULO_TABLE_EXISTS(5, EXTERNAL), - - // Thrown when an attempt to start/stop MiniAccumuloCluster fails (testing only) - MINI_ACCUMULO(6, EXTERNAL); - - private final ErrorCode errorCode; - - AccumuloErrorCode(int code, ErrorType type) - { - errorCode = new ErrorCode(code + 0x0103_0000, name(), type); - } - - @Override - public ErrorCode toErrorCode() - { - return errorCode; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloHandleResolver.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloHandleResolver.java deleted file mode 100644 index 5479eb84742e..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloHandleResolver.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.model.AccumuloSplit; -import io.prestosql.plugin.accumulo.model.AccumuloTableHandle; -import io.prestosql.spi.connector.ColumnHandle; -import io.prestosql.spi.connector.ConnectorHandleResolver; -import io.prestosql.spi.connector.ConnectorInsertTableHandle; -import io.prestosql.spi.connector.ConnectorOutputTableHandle; -import io.prestosql.spi.connector.ConnectorSplit; -import io.prestosql.spi.connector.ConnectorTableHandle; -import io.prestosql.spi.connector.ConnectorTransactionHandle; - -public class AccumuloHandleResolver - implements ConnectorHandleResolver -{ - @Override - public Class getTableHandleClass() - { - return AccumuloTableHandle.class; - } - - @Override - public Class getInsertTableHandleClass() - { - return AccumuloTableHandle.class; - } - - @Override - public Class getOutputTableHandleClass() - { - return AccumuloTableHandle.class; - } - - @Override - public Class getColumnHandleClass() - { - return AccumuloColumnHandle.class; - } - - @Override - public Class getSplitClass() - { - return AccumuloSplit.class; - } - - @Override - public Class getTransactionHandleClass() - { - return AccumuloTransactionHandle.class; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloMetadata.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloMetadata.java deleted file mode 100644 index 3311f0868690..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloMetadata.java +++ /dev/null @@ -1,429 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import io.airlift.json.JsonCodec; -import io.airlift.json.JsonCodecFactory; -import io.airlift.json.ObjectMapperProvider; -import io.airlift.slice.Slice; -import io.prestosql.plugin.accumulo.metadata.AccumuloTable; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.model.AccumuloTableHandle; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.ColumnHandle; -import io.prestosql.spi.connector.ColumnMetadata; -import io.prestosql.spi.connector.ConnectorInsertTableHandle; -import io.prestosql.spi.connector.ConnectorMetadata; -import io.prestosql.spi.connector.ConnectorNewTableLayout; -import io.prestosql.spi.connector.ConnectorOutputMetadata; -import io.prestosql.spi.connector.ConnectorOutputTableHandle; -import io.prestosql.spi.connector.ConnectorSession; -import io.prestosql.spi.connector.ConnectorTableHandle; -import io.prestosql.spi.connector.ConnectorTableMetadata; -import io.prestosql.spi.connector.ConnectorTableProperties; -import io.prestosql.spi.connector.ConnectorViewDefinition; -import io.prestosql.spi.connector.Constraint; -import io.prestosql.spi.connector.ConstraintApplicationResult; -import io.prestosql.spi.connector.SchemaTableName; -import io.prestosql.spi.connector.SchemaTablePrefix; -import io.prestosql.spi.connector.TableNotFoundException; -import io.prestosql.spi.predicate.TupleDomain; -import io.prestosql.spi.statistics.ComputedStatistics; - -import javax.inject.Inject; - -import java.util.Collection; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; - -import static com.google.common.base.Preconditions.checkState; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.ACCUMULO_TABLE_EXISTS; -import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED; -import static java.lang.String.format; -import static java.util.Objects.requireNonNull; - -/** - * Presto metadata provider for Accumulo. - * Responsible for creating/dropping/listing tables, schemas, columns, all sorts of goodness. Heavily leverages {@link AccumuloClient}. - */ -public class AccumuloMetadata - implements ConnectorMetadata -{ - private static final JsonCodec VIEW_CODEC = - new JsonCodecFactory(new ObjectMapperProvider()).jsonCodec(ConnectorViewDefinition.class); - - private final AccumuloClient client; - private final AtomicReference rollbackAction = new AtomicReference<>(); - - @Inject - public AccumuloMetadata(AccumuloClient client) - { - this.client = requireNonNull(client, "client is null"); - } - - @Override - public ConnectorOutputTableHandle beginCreateTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, Optional layout) - { - checkNoRollback(); - - SchemaTableName tableName = tableMetadata.getTable(); - AccumuloTable table = client.createTable(tableMetadata); - - AccumuloTableHandle handle = new AccumuloTableHandle( - tableName.getSchemaName(), - tableName.getTableName(), - table.getRowId(), - table.isExternal(), - table.getSerializerClassName(), - table.getScanAuthorizations()); - - setRollback(() -> rollbackCreateTable(table)); - - return handle; - } - - @Override - public Optional finishCreateTable(ConnectorSession session, ConnectorOutputTableHandle tableHandle, Collection fragments, Collection computedStatistics) - { - clearRollback(); - return Optional.empty(); - } - - private void rollbackCreateTable(AccumuloTable table) - { - client.dropTable(table); - } - - @Override - public void createTable(ConnectorSession session, ConnectorTableMetadata tableMetadata, boolean ignoreExisting) - { - client.createTable(tableMetadata); - } - - @Override - public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle) - { - AccumuloTableHandle handle = (AccumuloTableHandle) tableHandle; - AccumuloTable table = client.getTable(handle.toSchemaTableName()); - if (table != null) { - client.dropTable(table); - } - } - - @Override - public void renameTable(ConnectorSession session, ConnectorTableHandle tableHandle, - SchemaTableName newTableName) - { - if (client.getTable(newTableName) != null) { - throw new PrestoException(ACCUMULO_TABLE_EXISTS, "Table " + newTableName + " already exists"); - } - - AccumuloTableHandle handle = (AccumuloTableHandle) tableHandle; - client.renameTable(handle.toSchemaTableName(), newTableName); - } - - @Override - public void createView(ConnectorSession session, SchemaTableName viewName, ConnectorViewDefinition definition, boolean replace) - { - String viewData = VIEW_CODEC.toJson(definition); - if (replace) { - client.createOrReplaceView(viewName, viewData); - } - else { - client.createView(viewName, viewData); - } - } - - @Override - public void dropView(ConnectorSession session, SchemaTableName viewName) - { - client.dropView(viewName); - } - - @Override - public Optional getView(ConnectorSession session, SchemaTableName viewName) - { - return Optional.ofNullable(client.getView(viewName)) - .map(view -> VIEW_CODEC.fromJson(view.getData())); - } - - @Override - public List listViews(ConnectorSession session, Optional schemaName) - { - return listViews(schemaName); - } - - /** - * Gets all views in the given schema, or all schemas if null. - * - * @param filterSchema Schema to filter the views, or absent to list all schemas - * @return List of views - */ - private List listViews(Optional filterSchema) - { - ImmutableList.Builder builder = ImmutableList.builder(); - if (filterSchema.isPresent()) { - for (String view : client.getViewNames(filterSchema.get())) { - builder.add(new SchemaTableName(filterSchema.get(), view)); - } - } - else { - for (String schemaName : client.getSchemaNames()) { - for (String view : client.getViewNames(schemaName)) { - builder.add(new SchemaTableName(schemaName, view)); - } - } - } - - return builder.build(); - } - - @Override - public ConnectorInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle) - { - checkNoRollback(); - AccumuloTableHandle handle = (AccumuloTableHandle) tableHandle; - setRollback(() -> rollbackInsert(handle)); - return handle; - } - - @Override - public Optional finishInsert(ConnectorSession session, ConnectorInsertTableHandle insertHandle, Collection fragments, Collection computedStatistics) - { - clearRollback(); - return Optional.empty(); - } - - private static void rollbackInsert(ConnectorInsertTableHandle insertHandle) - { - // Rollbacks for inserts are off the table when it comes to data in Accumulo. - // When a batch of Mutations fails to be inserted, the general strategy - // is to run the insert operation again until it is successful - // Any mutations that were successfully written will be overwritten - // with the same values, so that isn't a problem. - AccumuloTableHandle handle = (AccumuloTableHandle) insertHandle; - throw new PrestoException(NOT_SUPPORTED, format("Unable to rollback insert for table %s.%s. Some rows may have been written. Please run your insert again.", handle.getSchema(), handle.getTable())); - } - - @Override - public ConnectorTableHandle getTableHandle(ConnectorSession session, SchemaTableName tableName) - { - if (!listSchemaNames(session).contains(tableName.getSchemaName().toLowerCase(Locale.ENGLISH))) { - return null; - } - - // Need to validate that SchemaTableName is a table - if (!this.listViews(session, Optional.of(tableName.getSchemaName())).contains(tableName)) { - AccumuloTable table = client.getTable(tableName); - if (table == null) { - return null; - } - - return new AccumuloTableHandle( - table.getSchema(), - table.getTable(), - table.getRowId(), - table.isExternal(), - table.getSerializerClassName(), - table.getScanAuthorizations()); - } - - return null; - } - - @Override - public ConnectorTableMetadata getTableMetadata(ConnectorSession session, ConnectorTableHandle table) - { - AccumuloTableHandle handle = (AccumuloTableHandle) table; - SchemaTableName tableName = new SchemaTableName(handle.getSchema(), handle.getTable()); - ConnectorTableMetadata metadata = getTableMetadata(tableName); - if (metadata == null) { - throw new TableNotFoundException(tableName); - } - return metadata; - } - - @Override - public Map getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle) - { - AccumuloTableHandle handle = (AccumuloTableHandle) tableHandle; - - AccumuloTable table = client.getTable(handle.toSchemaTableName()); - if (table == null) { - throw new TableNotFoundException(handle.toSchemaTableName()); - } - - ImmutableMap.Builder columnHandles = ImmutableMap.builder(); - for (AccumuloColumnHandle column : table.getColumns()) { - columnHandles.put(column.getName(), column); - } - return columnHandles.build(); - } - - @Override - public ColumnMetadata getColumnMetadata(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle columnHandle) - { - return ((AccumuloColumnHandle) columnHandle).getColumnMetadata(); - } - - @Override - public void renameColumn(ConnectorSession session, ConnectorTableHandle tableHandle, ColumnHandle source, String target) - { - AccumuloTableHandle handle = (AccumuloTableHandle) tableHandle; - AccumuloColumnHandle columnHandle = (AccumuloColumnHandle) source; - AccumuloTable table = client.getTable(handle.toSchemaTableName()); - if (table == null) { - throw new TableNotFoundException(new SchemaTableName(handle.getSchema(), handle.getTable())); - } - - client.renameColumn(table, columnHandle.getName(), target); - } - - @Override - public List listSchemaNames(ConnectorSession session) - { - return ImmutableList.copyOf(client.getSchemaNames()); - } - - @Override - public List listTables(ConnectorSession session, Optional filterSchema) - { - Set schemaNames = filterSchema.>map(ImmutableSet::of) - .orElseGet(client::getSchemaNames); - - ImmutableList.Builder builder = ImmutableList.builder(); - for (String schemaName : schemaNames) { - for (String tableName : client.getTableNames(schemaName)) { - builder.add(new SchemaTableName(schemaName, tableName)); - } - } - return builder.build(); - } - - @Override - public Map> listTableColumns(ConnectorSession session, SchemaTablePrefix prefix) - { - requireNonNull(prefix, "prefix is null"); - ImmutableMap.Builder> columns = ImmutableMap.builder(); - for (SchemaTableName tableName : listTables(session, prefix)) { - ConnectorTableMetadata tableMetadata = getTableMetadata(tableName); - // table can disappear during listing operation - if (tableMetadata != null) { - columns.put(tableName, tableMetadata.getColumns()); - } - } - return columns.build(); - } - - @Override - public boolean usesLegacyTableLayouts() - { - return false; - } - - @Override - public Optional> applyFilter(ConnectorSession session, ConnectorTableHandle table, Constraint constraint) - { - AccumuloTableHandle handle = (AccumuloTableHandle) table; - - TupleDomain oldDomain = handle.getConstraint(); - TupleDomain newDomain = oldDomain.intersect(constraint.getSummary()); - if (oldDomain.equals(newDomain)) { - return Optional.empty(); - } - - handle = new AccumuloTableHandle( - handle.getSchema(), - handle.getTable(), - handle.getRowId(), - newDomain, - handle.isExternal(), - handle.getSerializerClassName(), - handle.getScanAuthorizations()); - - return Optional.of(new ConstraintApplicationResult<>(handle, constraint.getSummary())); - } - - @Override - public ConnectorTableProperties getTableProperties(ConnectorSession session, ConnectorTableHandle handle) - { - return new ConnectorTableProperties(); - } - - private void checkNoRollback() - { - checkState(rollbackAction.get() == null, "Cannot begin a new write while in an existing one"); - } - - private void setRollback(Runnable action) - { - checkState(rollbackAction.compareAndSet(null, action), "Should not have to override existing rollback action"); - } - - private void clearRollback() - { - rollbackAction.set(null); - } - - public void rollback() - { - Runnable rollbackAction = this.rollbackAction.getAndSet(null); - if (rollbackAction != null) { - rollbackAction.run(); - } - } - - private ConnectorTableMetadata getTableMetadata(SchemaTableName tableName) - { - if (!client.getSchemaNames().contains(tableName.getSchemaName())) { - return null; - } - - // Need to validate that SchemaTableName is a table - if (!this.listViews(Optional.ofNullable(tableName.getSchemaName())).contains(tableName)) { - AccumuloTable table = client.getTable(tableName); - if (table == null) { - return null; - } - - return new ConnectorTableMetadata(tableName, table.getColumnsMetadata()); - } - - return null; - } - - private List listTables(ConnectorSession session, SchemaTablePrefix prefix) - { - // List all tables if schema or table is null - if (!prefix.getTable().isPresent()) { - return listTables(session, prefix.getSchema()); - } - - // Make sure requested table exists, returning the single table of it does - SchemaTableName table = prefix.toSchemaTableName(); - if (getTableHandle(session, table) != null) { - return ImmutableList.of(table); - } - - // Else, return empty list - return ImmutableList.of(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloMetadataFactory.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloMetadataFactory.java deleted file mode 100644 index d5f8bf27ba0d..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloMetadataFactory.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import javax.inject.Inject; - -import static java.util.Objects.requireNonNull; - -public class AccumuloMetadataFactory -{ - private final AccumuloClient client; - - @Inject - public AccumuloMetadataFactory(AccumuloClient client) - { - this.client = requireNonNull(client, "client is null"); - } - - public AccumuloMetadata create() - { - return new AccumuloMetadata(client); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloModule.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloModule.java deleted file mode 100644 index a341ef934a35..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloModule.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.fasterxml.jackson.databind.DeserializationContext; -import com.fasterxml.jackson.databind.deser.std.FromStringDeserializer; -import com.google.inject.Binder; -import com.google.inject.Module; -import com.google.inject.Scopes; -import io.airlift.json.JsonCodec; -import io.airlift.log.Logger; -import io.prestosql.plugin.accumulo.conf.AccumuloConfig; -import io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties; -import io.prestosql.plugin.accumulo.conf.AccumuloTableProperties; -import io.prestosql.plugin.accumulo.index.ColumnCardinalityCache; -import io.prestosql.plugin.accumulo.index.IndexLookup; -import io.prestosql.plugin.accumulo.io.AccumuloPageSinkProvider; -import io.prestosql.plugin.accumulo.io.AccumuloRecordSetProvider; -import io.prestosql.plugin.accumulo.metadata.AccumuloTable; -import io.prestosql.plugin.accumulo.metadata.ZooKeeperMetadataManager; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.TypeId; -import io.prestosql.spi.type.TypeManager; -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Instance; -import org.apache.accumulo.core.client.ZooKeeperInstance; -import org.apache.accumulo.core.client.security.tokens.PasswordToken; -import org.apache.log4j.JulAppender; -import org.apache.log4j.Level; -import org.apache.log4j.PatternLayout; - -import javax.inject.Inject; -import javax.inject.Provider; - -import static io.airlift.configuration.ConfigBinder.configBinder; -import static io.airlift.json.JsonBinder.jsonBinder; -import static io.airlift.json.JsonCodecBinder.jsonCodecBinder; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.UNEXPECTED_ACCUMULO_ERROR; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Objects.requireNonNull; - -/** - * Presto module to do all kinds of run Guice injection stuff! - *

- * WARNING: Contains black magick - */ -public class AccumuloModule - implements Module -{ - private final TypeManager typeManager; - - public AccumuloModule(TypeManager typeManager) - { - this.typeManager = requireNonNull(typeManager, "typeManager is null"); - } - - @Override - public void configure(Binder binder) - { - // Add appender to Log4J root logger - JulAppender appender = new JulAppender(); //create appender - appender.setLayout(new PatternLayout("%d %-5p %c - %m%n")); - appender.setThreshold(Level.INFO); - appender.activateOptions(); - org.apache.log4j.Logger.getRootLogger().addAppender(appender); - - binder.bind(TypeManager.class).toInstance(typeManager); - - binder.bind(AccumuloConnector.class).in(Scopes.SINGLETON); - binder.bind(AccumuloMetadata.class).in(Scopes.SINGLETON); - binder.bind(AccumuloMetadataFactory.class).in(Scopes.SINGLETON); - binder.bind(AccumuloClient.class).in(Scopes.SINGLETON); - binder.bind(AccumuloSplitManager.class).in(Scopes.SINGLETON); - binder.bind(AccumuloRecordSetProvider.class).in(Scopes.SINGLETON); - binder.bind(AccumuloPageSinkProvider.class).in(Scopes.SINGLETON); - binder.bind(AccumuloHandleResolver.class).in(Scopes.SINGLETON); - binder.bind(AccumuloSessionProperties.class).in(Scopes.SINGLETON); - binder.bind(AccumuloTableProperties.class).in(Scopes.SINGLETON); - binder.bind(ZooKeeperMetadataManager.class).in(Scopes.SINGLETON); - binder.bind(AccumuloTableManager.class).in(Scopes.SINGLETON); - binder.bind(IndexLookup.class).in(Scopes.SINGLETON); - binder.bind(ColumnCardinalityCache.class).in(Scopes.SINGLETON); - binder.bind(Connector.class).toProvider(ConnectorProvider.class); - - configBinder(binder).bindConfig(AccumuloConfig.class); - - jsonBinder(binder).addDeserializerBinding(Type.class).to(TypeDeserializer.class); - jsonCodecBinder(binder).bindMapJsonCodec(String.class, JsonCodec.listJsonCodec(AccumuloTable.class)); - } - - public static final class TypeDeserializer - extends FromStringDeserializer - { - private final TypeManager typeManager; - - @Inject - public TypeDeserializer(TypeManager typeManager) - { - super(Type.class); - this.typeManager = requireNonNull(typeManager, "typeManager is null"); - } - - @Override - protected Type _deserialize(String value, DeserializationContext context) - { - return typeManager.getType(TypeId.of(value)); - } - } - - private static class ConnectorProvider - implements Provider - { - private static final Logger LOG = Logger.get(ConnectorProvider.class); - - private final String instance; - private final String zooKeepers; - private final String username; - private final String password; - - @Inject - public ConnectorProvider(AccumuloConfig config) - { - requireNonNull(config, "config is null"); - this.instance = config.getInstance(); - this.zooKeepers = config.getZooKeepers(); - this.username = config.getUsername(); - this.password = config.getPassword(); - } - - @Override - public Connector get() - { - try { - Instance inst = new ZooKeeperInstance(instance, zooKeepers); - Connector connector = inst.getConnector(username, new PasswordToken(password.getBytes(UTF_8))); - LOG.info("Connection to instance %s at %s established, user %s", instance, zooKeepers, username); - return connector; - } - catch (AccumuloException | AccumuloSecurityException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to get connector to Accumulo", e); - } - } - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloPlugin.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloPlugin.java deleted file mode 100644 index 888ab9cb7f34..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloPlugin.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.common.collect.ImmutableList; -import io.prestosql.spi.Plugin; -import io.prestosql.spi.connector.ConnectorFactory; - -public class AccumuloPlugin - implements Plugin -{ - @Override - public Iterable getConnectorFactories() - { - return ImmutableList.of(new AccumuloConnectorFactory()); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloSplitManager.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloSplitManager.java deleted file mode 100644 index 196ce3745236..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloSplitManager.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.common.collect.ImmutableList; -import io.prestosql.plugin.accumulo.model.AccumuloColumnConstraint; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.model.AccumuloSplit; -import io.prestosql.plugin.accumulo.model.AccumuloTableHandle; -import io.prestosql.plugin.accumulo.model.TabletSplitMetadata; -import io.prestosql.plugin.accumulo.model.WrappedRange; -import io.prestosql.spi.connector.ColumnHandle; -import io.prestosql.spi.connector.ConnectorSession; -import io.prestosql.spi.connector.ConnectorSplit; -import io.prestosql.spi.connector.ConnectorSplitManager; -import io.prestosql.spi.connector.ConnectorSplitSource; -import io.prestosql.spi.connector.ConnectorTableHandle; -import io.prestosql.spi.connector.ConnectorTransactionHandle; -import io.prestosql.spi.connector.FixedSplitSource; -import io.prestosql.spi.predicate.Domain; -import io.prestosql.spi.predicate.TupleDomain; -import io.prestosql.spi.predicate.TupleDomain.ColumnDomain; - -import javax.inject.Inject; - -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; - -import static java.util.Objects.requireNonNull; - -public class AccumuloSplitManager - implements ConnectorSplitManager -{ - private final AccumuloClient client; - - @Inject - public AccumuloSplitManager(AccumuloClient client) - { - this.client = requireNonNull(client, "client is null"); - } - - @Override - public ConnectorSplitSource getSplits(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorTableHandle tableHandle, SplitSchedulingStrategy splitSchedulingStrategy) - { - AccumuloTableHandle handle = (AccumuloTableHandle) tableHandle; - - String schemaName = handle.getSchema(); - String tableName = handle.getTable(); - String rowIdName = handle.getRowId(); - - // Get non-row ID column constraints - List constraints = getColumnConstraints(rowIdName, handle.getConstraint()); - - // Get the row domain column range - Optional rDom = getRangeDomain(rowIdName, handle.getConstraint()); - - // Call out to our client to retrieve all tablet split metadata using the row ID domain and the secondary index - List tabletSplits = client.getTabletSplits(session, schemaName, tableName, rDom, constraints, handle.getSerializerInstance()); - - // Pack the tablet split metadata into a connector split - ImmutableList.Builder cSplits = ImmutableList.builder(); - for (TabletSplitMetadata splitMetadata : tabletSplits) { - AccumuloSplit split = new AccumuloSplit( - splitMetadata.getRanges().stream().map(WrappedRange::new).collect(Collectors.toList()), - splitMetadata.getHostPort()); - cSplits.add(split); - } - - return new FixedSplitSource(cSplits.build()); - } - - private static Optional getRangeDomain(String rowIdName, TupleDomain constraint) - { - if (constraint.getColumnDomains().isPresent()) { - for (ColumnDomain cd : constraint.getColumnDomains().get()) { - AccumuloColumnHandle col = (AccumuloColumnHandle) cd.getColumn(); - if (col.getName().equals(rowIdName)) { - return Optional.of(cd.getDomain()); - } - } - } - - return Optional.empty(); - } - - /** - * Gets a list of {@link AccumuloColumnConstraint} based on the given constraint ID, excluding the row ID column - * - * @param rowIdName Presto column name mapping to the Accumulo row ID - * @param constraint Set of query constraints - * @return List of all column constraints - */ - private static List getColumnConstraints(String rowIdName, TupleDomain constraint) - { - ImmutableList.Builder constraintBuilder = ImmutableList.builder(); - for (ColumnDomain columnDomain : constraint.getColumnDomains().get()) { - AccumuloColumnHandle columnHandle = (AccumuloColumnHandle) columnDomain.getColumn(); - - if (!columnHandle.getName().equals(rowIdName)) { - // Family and qualifier will exist for non-row ID columns - constraintBuilder.add(new AccumuloColumnConstraint( - columnHandle.getName(), - columnHandle.getFamily().get(), - columnHandle.getQualifier().get(), - Optional.of(columnDomain.getDomain()), - columnHandle.isIndexed())); - } - } - - return constraintBuilder.build(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloTableManager.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloTableManager.java deleted file mode 100644 index 7b2f2f792d02..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloTableManager.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import io.airlift.log.Logger; -import io.prestosql.spi.PrestoException; -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.IteratorSetting; -import org.apache.accumulo.core.client.NamespaceExistsException; -import org.apache.accumulo.core.client.TableExistsException; -import org.apache.accumulo.core.client.TableNotFoundException; -import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope; -import org.apache.hadoop.io.Text; - -import javax.inject.Inject; - -import java.util.EnumSet; -import java.util.Map; -import java.util.Set; - -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.ACCUMULO_TABLE_DNE; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.ACCUMULO_TABLE_EXISTS; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.UNEXPECTED_ACCUMULO_ERROR; -import static java.util.Objects.requireNonNull; - -/** - * This class is a light wrapper for Accumulo's Connector object. - * It will perform the given operation, or throw an exception if an Accumulo- or ZooKeeper-based error occurs. - */ -public class AccumuloTableManager -{ - private static final Logger LOG = Logger.get(AccumuloTableManager.class); - private static final String DEFAULT = "default"; - private final Connector connector; - - @Inject - public AccumuloTableManager(Connector connector) - { - this.connector = requireNonNull(connector, "connector is null"); - } - - /** - * Ensures the given Accumulo namespace exist, creating it if necessary - * - * @param schema Presto schema (Accumulo namespace) - */ - public void ensureNamespace(String schema) - { - try { - // If the table schema is not "default" and the namespace does not exist, create it - if (!schema.equals(DEFAULT) && !connector.namespaceOperations().exists(schema)) { - connector.namespaceOperations().create(schema); - } - } - catch (AccumuloException | AccumuloSecurityException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to check for existence or create Accumulo namespace", e); - } - catch (NamespaceExistsException e) { - // Suppress race condition between test for existence and creation - LOG.warn("NamespaceExistsException suppressed when creating " + schema); - } - } - - public boolean exists(String table) - { - return connector.tableOperations().exists(table); - } - - public void createAccumuloTable(String table) - { - try { - connector.tableOperations().create(table); - } - catch (AccumuloException | AccumuloSecurityException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to create Accumulo table", e); - } - catch (TableExistsException e) { - throw new PrestoException(ACCUMULO_TABLE_EXISTS, "Accumulo table already exists", e); - } - } - - public void setLocalityGroups(String tableName, Map> groups) - { - if (groups.isEmpty()) { - return; - } - - try { - connector.tableOperations().setLocalityGroups(tableName, groups); - LOG.debug("Set locality groups for %s to %s", tableName, groups); - } - catch (AccumuloException | AccumuloSecurityException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to set locality groups", e); - } - catch (TableNotFoundException e) { - throw new PrestoException(ACCUMULO_TABLE_DNE, "Failed to set locality groups, table does not exist", e); - } - } - - public void setIterator(String table, IteratorSetting setting) - { - try { - // Remove any existing iterator settings of the same name, if applicable - Map> iterators = connector.tableOperations().listIterators(table); - if (iterators.containsKey(setting.getName())) { - connector.tableOperations().removeIterator(table, setting.getName(), iterators.get(setting.getName())); - } - - connector.tableOperations().attachIterator(table, setting); - } - catch (AccumuloSecurityException | AccumuloException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to set iterator on table " + table, e); - } - catch (TableNotFoundException e) { - throw new PrestoException(ACCUMULO_TABLE_DNE, "Failed to set iterator, table does not exist", e); - } - } - - public void deleteAccumuloTable(String tableName) - { - try { - connector.tableOperations().delete(tableName); - } - catch (AccumuloException | AccumuloSecurityException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to delete Accumulo table", e); - } - catch (TableNotFoundException e) { - throw new PrestoException(ACCUMULO_TABLE_DNE, "Failed to delete Accumulo table, does not exist", e); - } - } - - public void renameAccumuloTable(String oldName, String newName) - { - try { - connector.tableOperations().rename(oldName, newName); - } - catch (AccumuloSecurityException | AccumuloException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to rename table", e); - } - catch (TableNotFoundException e) { - throw new PrestoException(ACCUMULO_TABLE_DNE, "Failed to rename table, old table does not exist", e); - } - catch (TableExistsException e) { - throw new PrestoException(ACCUMULO_TABLE_EXISTS, "Failed to rename table, new table already exists", e); - } - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloTransactionHandle.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloTransactionHandle.java deleted file mode 100644 index 317bcdcc0116..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/AccumuloTransactionHandle.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import io.prestosql.spi.connector.ConnectorTransactionHandle; - -import java.util.Objects; -import java.util.UUID; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static java.util.Objects.requireNonNull; - -public class AccumuloTransactionHandle - implements ConnectorTransactionHandle -{ - private final UUID uuid; - - public AccumuloTransactionHandle() - { - this(UUID.randomUUID()); - } - - @JsonCreator - public AccumuloTransactionHandle(@JsonProperty("uuid") UUID uuid) - { - this.uuid = requireNonNull(uuid, "uuid is null"); - } - - @JsonProperty - public UUID getUuid() - { - return uuid; - } - - @Override - public boolean equals(Object obj) - { - if (this == obj) { - return true; - } - if ((obj == null) || (getClass() != obj.getClass())) { - return false; - } - - return Objects.equals(uuid, ((AccumuloTransactionHandle) obj).uuid); - } - - @Override - public int hashCode() - { - return Objects.hash(uuid); - } - - @Override - public String toString() - { - return toStringHelper(this).add("uuid", uuid).toString(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/Types.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/Types.java deleted file mode 100644 index ba5951ebebda..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/Types.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import io.prestosql.spi.type.ArrayType; -import io.prestosql.spi.type.MapType; -import io.prestosql.spi.type.Type; - -/** - * Utility class for Presto Type-related functionality. - */ -public final class Types -{ - private Types() {} - - public static boolean isArrayType(Type type) - { - return type instanceof ArrayType; - } - - public static boolean isMapType(Type type) - { - return type instanceof MapType; - } - - /** - * Gets the element type of the given array type. Does not validate that the given type is an array. - * - * @param type An array type - * @return Element type of the array - * @throws IndexOutOfBoundsException If type is not an array - * @see Types#isArrayType - */ - public static Type getElementType(Type type) - { - return type.getTypeParameters().get(0); - } - - /** - * Gets the key type of the given map type. Does not validate that the given type is a map. - * - * @param type A map type - * @return Key type of the map - * @throws IndexOutOfBoundsException If type is not a map - * @see Types#isMapType - */ - public static Type getKeyType(Type type) - { - return type.getTypeParameters().get(0); - } - - /** - * Gets the value type of the given map type. Does not validate that the given type is a map. - * - * @param type A map type - * @return Value type of the map - * @throws IndexOutOfBoundsException If type is not a map - * @see Types#isMapType - */ - public static Type getValueType(Type type) - { - return type.getTypeParameters().get(1); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloConfig.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloConfig.java deleted file mode 100644 index 7939ab88446d..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloConfig.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.conf; - -import io.airlift.configuration.Config; -import io.airlift.configuration.ConfigDescription; -import io.airlift.configuration.ConfigSecuritySensitive; -import io.airlift.units.Duration; - -import javax.validation.constraints.Min; -import javax.validation.constraints.NotNull; - -import java.util.concurrent.TimeUnit; - -/** - * File-based configuration properties for the Accumulo connector - */ -public class AccumuloConfig -{ - public static final String INSTANCE = "accumulo.instance"; - public static final String ZOOKEEPERS = "accumulo.zookeepers"; - public static final String USERNAME = "accumulo.username"; - public static final String PASSWORD = "accumulo.password"; - public static final String ZOOKEEPER_METADATA_ROOT = "accumulo.zookeeper.metadata.root"; - public static final String CARDINALITY_CACHE_SIZE = "accumulo.cardinality.cache.size"; - public static final String CARDINALITY_CACHE_EXPIRE_DURATION = "accumulo.cardinality.cache.expire.duration"; - - private String instance; - private String zooKeepers; - private String username; - private String password; - private String zkMetadataRoot = "/presto-accumulo"; - private int cardinalityCacheSize = 100_000; - private Duration cardinalityCacheExpiration = new Duration(5, TimeUnit.MINUTES); - - @NotNull - public String getInstance() - { - return this.instance; - } - - @Config(INSTANCE) - @ConfigDescription("Accumulo instance name") - public AccumuloConfig setInstance(String instance) - { - this.instance = instance; - return this; - } - - @NotNull - public String getZooKeepers() - { - return this.zooKeepers; - } - - @Config(ZOOKEEPERS) - @ConfigDescription("ZooKeeper quorum connect string for Accumulo") - public AccumuloConfig setZooKeepers(String zooKeepers) - { - this.zooKeepers = zooKeepers; - return this; - } - - @NotNull - public String getUsername() - { - return this.username; - } - - @Config(USERNAME) - @ConfigDescription("Sets the user to use when interacting with Accumulo. This user will require administrative permissions") - public AccumuloConfig setUsername(String username) - { - this.username = username; - return this; - } - - @NotNull - public String getPassword() - { - return this.password; - } - - @Config(PASSWORD) - @ConfigSecuritySensitive - @ConfigDescription("Sets the password for the configured user") - public AccumuloConfig setPassword(String password) - { - this.password = password; - return this; - } - - @NotNull - public String getZkMetadataRoot() - { - return zkMetadataRoot; - } - - @Config(ZOOKEEPER_METADATA_ROOT) - @ConfigDescription("Sets the root znode for metadata storage") - public void setZkMetadataRoot(String zkMetadataRoot) - { - this.zkMetadataRoot = zkMetadataRoot; - } - - @NotNull - @Min(1) - public int getCardinalityCacheSize() - { - return cardinalityCacheSize; - } - - @Config(CARDINALITY_CACHE_SIZE) - @ConfigDescription("Sets the cardinality cache size") - public void setCardinalityCacheSize(int cardinalityCacheSize) - { - this.cardinalityCacheSize = cardinalityCacheSize; - } - - @NotNull - public Duration getCardinalityCacheExpiration() - { - return cardinalityCacheExpiration; - } - - @Config(CARDINALITY_CACHE_EXPIRE_DURATION) - @ConfigDescription("Sets the cardinality cache expiration") - public void setCardinalityCacheExpiration(Duration cardinalityCacheExpiration) - { - this.cardinalityCacheExpiration = cardinalityCacheExpiration; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloSessionProperties.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloSessionProperties.java deleted file mode 100644 index fee1f433e314..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloSessionProperties.java +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.conf; - -import com.google.common.collect.ImmutableList; -import io.airlift.units.Duration; -import io.prestosql.spi.connector.ConnectorSession; -import io.prestosql.spi.session.PropertyMetadata; - -import javax.inject.Inject; - -import java.util.List; - -import static io.prestosql.spi.session.PropertyMetadata.booleanProperty; -import static io.prestosql.spi.session.PropertyMetadata.doubleProperty; -import static io.prestosql.spi.session.PropertyMetadata.integerProperty; -import static io.prestosql.spi.session.PropertyMetadata.stringProperty; -import static io.prestosql.spi.type.VarcharType.VARCHAR; -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -/** - * Class contains all session-based properties for the Accumulo connector. - * Use SHOW SESSION to view all available properties in the Presto CLI. - *

- * Can set the property using: - *

- * SET SESSION <property> = <value>; - */ -public final class AccumuloSessionProperties -{ - private static final String OPTIMIZE_LOCALITY_ENABLED = "optimize_locality_enabled"; - private static final String OPTIMIZE_SPLIT_RANGES_ENABLED = "optimize_split_ranges_enabled"; - private static final String OPTIMIZE_INDEX_ENABLED = "optimize_index_enabled"; - private static final String INDEX_ROWS_PER_SPLIT = "index_rows_per_split"; - private static final String INDEX_THRESHOLD = "index_threshold"; - private static final String INDEX_LOWEST_CARDINALITY_THRESHOLD = "index_lowest_cardinality_threshold"; - private static final String INDEX_METRICS_ENABLED = "index_metrics_enabled"; - private static final String SCAN_USERNAME = "scan_username"; - private static final String INDEX_SHORT_CIRCUIT_CARDINALITY_FETCH = "index_short_circuit_cardinality_fetch"; - private static final String INDEX_CARDINALITY_CACHE_POLLING_DURATION = "index_cardinality_cache_polling_duration"; - - private final List> sessionProperties; - - @Inject - public AccumuloSessionProperties() - { - sessionProperties = ImmutableList.of( - booleanProperty( - OPTIMIZE_LOCALITY_ENABLED, - "Set to true to enable data locality for non-indexed scans. Default true.", true, - false), - booleanProperty( - OPTIMIZE_SPLIT_RANGES_ENABLED, - "Set to true to split non-indexed queries by tablet splits. Should generally be true.", - true, false), - stringProperty( - SCAN_USERNAME, - "User to impersonate when scanning the tables. This property trumps the scan_auths table property. Default is the user in the configuration file.", null, false), - booleanProperty( - OPTIMIZE_INDEX_ENABLED, - "Set to true to enable usage of the secondary index on query. Default true.", - true, - false), - integerProperty( - INDEX_ROWS_PER_SPLIT, - "The number of Accumulo row IDs that are packed into a single Presto split. Default 10000", - 10000, - false), - doubleProperty( - INDEX_THRESHOLD, - "The ratio between number of rows to be scanned based on the index over the total number of rows. If the ratio is below this threshold, the index will be used. Default .2", - 0.2, - false), - doubleProperty( - INDEX_LOWEST_CARDINALITY_THRESHOLD, - "The threshold where the column with the lowest cardinality will be used instead of computing an intersection of ranges in the secondary index. Secondary index must be enabled. Default .01", - 0.01, - false), - booleanProperty( - INDEX_METRICS_ENABLED, - "Set to true to enable usage of the metrics table to optimize usage of the index. Default true", - true, - false), - booleanProperty( - INDEX_SHORT_CIRCUIT_CARDINALITY_FETCH, - "Short circuit the retrieval of index metrics once any column is less than the lowest cardinality threshold. Default true", - true, - false), - durationProperty( - INDEX_CARDINALITY_CACHE_POLLING_DURATION, - "Sets the cardinality cache polling duration for short circuit retrieval of index metrics. Default 10ms", - new Duration(10, MILLISECONDS), - false)); - } - - public List> getSessionProperties() - { - return sessionProperties; - } - - public static boolean isOptimizeLocalityEnabled(ConnectorSession session) - { - return session.getProperty(OPTIMIZE_LOCALITY_ENABLED, Boolean.class); - } - - public static boolean isOptimizeSplitRangesEnabled(ConnectorSession session) - { - return session.getProperty(OPTIMIZE_SPLIT_RANGES_ENABLED, Boolean.class); - } - - public static boolean isOptimizeIndexEnabled(ConnectorSession session) - { - return session.getProperty(OPTIMIZE_INDEX_ENABLED, Boolean.class); - } - - public static double getIndexThreshold(ConnectorSession session) - { - return session.getProperty(INDEX_THRESHOLD, Double.class); - } - - public static int getNumIndexRowsPerSplit(ConnectorSession session) - { - return session.getProperty(INDEX_ROWS_PER_SPLIT, Integer.class); - } - - public static double getIndexSmallCardThreshold(ConnectorSession session) - { - return session.getProperty(INDEX_LOWEST_CARDINALITY_THRESHOLD, Double.class); - } - - public static Duration getIndexCardinalityCachePollingDuration(ConnectorSession session) - { - return session.getProperty(INDEX_CARDINALITY_CACHE_POLLING_DURATION, Duration.class); - } - - public static boolean isIndexMetricsEnabled(ConnectorSession session) - { - return session.getProperty(INDEX_METRICS_ENABLED, Boolean.class); - } - - public static String getScanUsername(ConnectorSession session) - { - return session.getProperty(SCAN_USERNAME, String.class); - } - - public static boolean isIndexShortCircuitEnabled(ConnectorSession session) - { - return session.getProperty(INDEX_SHORT_CIRCUIT_CARDINALITY_FETCH, Boolean.class); - } - - private static PropertyMetadata durationProperty(String name, String description, Duration defaultValue, boolean hidden) - { - return new PropertyMetadata<>( - name, - description, - VARCHAR, - Duration.class, - defaultValue, - hidden, - value -> Duration.valueOf((String) value), - Duration::toString); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloTableProperties.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloTableProperties.java deleted file mode 100644 index 4cb63890156a..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/conf/AccumuloTableProperties.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.conf; - -import com.google.common.base.Splitter; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Iterables; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.plugin.accumulo.serializers.LexicoderRowSerializer; -import io.prestosql.plugin.accumulo.serializers.StringRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.session.PropertyMetadata; -import io.prestosql.spi.type.VarcharType; -import org.apache.commons.lang3.StringUtils; -import org.apache.commons.lang3.tuple.Pair; - -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Optional; -import java.util.Set; - -import static com.google.common.base.Preconditions.checkState; -import static io.prestosql.spi.StandardErrorCode.INVALID_TABLE_PROPERTY; -import static io.prestosql.spi.session.PropertyMetadata.booleanProperty; -import static io.prestosql.spi.session.PropertyMetadata.stringProperty; -import static java.util.Objects.requireNonNull; - -/** - * Class contains all table properties for the Accumulo connector. Used when creating a table: - *

- * CREATE TABLE foo (a VARCHAR, b INT) - * WITH (column_mapping = 'b:md:b', external = true); - */ -public final class AccumuloTableProperties -{ - public static final String COLUMN_MAPPING = "column_mapping"; - public static final String INDEX_COLUMNS = "index_columns"; - public static final String EXTERNAL = "external"; - public static final String LOCALITY_GROUPS = "locality_groups"; - public static final String ROW_ID = "row_id"; - public static final String SERIALIZER = "serializer"; - public static final String SCAN_AUTHS = "scan_auths"; - private static final Splitter COLON_SPLITTER = Splitter.on(':').trimResults(); - private static final Splitter COMMA_SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults(); - private static final Splitter PIPE_SPLITTER = Splitter.on('|').omitEmptyStrings().trimResults(); - - private final List> tableProperties; - - public AccumuloTableProperties() - { - PropertyMetadata s1 = stringProperty( - COLUMN_MAPPING, - "Comma-delimited list of column metadata: col_name:col_family:col_qualifier,[...]. Required for external tables. Not setting this property results in auto-generated column names.", - null, - false); - - PropertyMetadata s2 = stringProperty( - INDEX_COLUMNS, - "A comma-delimited list of Presto columns that are indexed in this table's corresponding index table. Default is no indexed columns.", - "", - false); - - PropertyMetadata s3 = booleanProperty( - EXTERNAL, - "If true, Presto will only do metadata operations for the table. Else, Presto will create and drop Accumulo tables where appropriate. Default false.", - false, - false); - - PropertyMetadata s4 = stringProperty( - LOCALITY_GROUPS, - "List of locality groups to set on the Accumulo table. Only valid on internal tables. String format is locality group name, colon, comma delimited list of Presto column names in the group. Groups are delimited by pipes. Example: group1:colA,colB,colC|group2:colD,colE,colF|etc.... Default is no locality groups.", - null, - false); - - PropertyMetadata s5 = stringProperty( - ROW_ID, - "Presto column name that maps to the Accumulo row ID. Default is the first column.", - null, - false); - - PropertyMetadata s6 = new PropertyMetadata<>( - SERIALIZER, - "Serializer for Accumulo data encodings. Can either be 'default', 'string', 'lexicoder', or a Java class name. Default is 'default', i.e. the value from AccumuloRowSerializer.getDefault(), i.e. 'lexicoder'.", - VarcharType.VARCHAR, String.class, - AccumuloRowSerializer.getDefault().getClass().getName(), - false, - x -> x.equals("default") - ? AccumuloRowSerializer.getDefault().getClass().getName() - : (x.equals("string") ? StringRowSerializer.class.getName() - : (x.equals("lexicoder") - ? LexicoderRowSerializer.class.getName() - : (String) x)), - object -> object); - - PropertyMetadata s7 = stringProperty( - SCAN_AUTHS, - "Scan-time authorizations set on the batch scanner. Default is all scan authorizations for the user", - null, - false); - - tableProperties = ImmutableList.of(s1, s2, s3, s4, s5, s6, s7); - } - - public List> getTableProperties() - { - return tableProperties; - } - - /** - * Gets the value of the column_mapping property, or Optional.empty() if not set. - *

- * Parses the value into a map of Presto column name to a pair of strings, the Accumulo column family and qualifier. - * - * @param tableProperties The map of table properties - * @return The column mapping, presto name to (accumulo column family, qualifier) - */ - public static Optional>> getColumnMapping( - Map tableProperties) - { - requireNonNull(tableProperties); - - @SuppressWarnings("unchecked") - String strMapping = (String) tableProperties.get(COLUMN_MAPPING); - if (strMapping == null) { - return Optional.empty(); - } - - // Parse out the column mapping - // This is a comma-delimited list of "(presto, column:accumulo, fam:accumulo qualifier)" triplets - ImmutableMap.Builder> mapping = ImmutableMap.builder(); - for (String m : COMMA_SPLITTER.split(strMapping)) { - String[] tokens = Iterables.toArray(COLON_SPLITTER.split(m), String.class); - checkState(tokens.length == 3, "Mapping of %s contains %s tokens instead of 3", m, tokens.length); - mapping.put(tokens[0], Pair.of(tokens[1], tokens[2])); - } - - return Optional.of(mapping.build()); - } - - public static Optional> getIndexColumns(Map tableProperties) - { - requireNonNull(tableProperties); - - @SuppressWarnings("unchecked") - String indexColumns = (String) tableProperties.get(INDEX_COLUMNS); - if (indexColumns == null) { - return Optional.empty(); - } - - return Optional.of(Arrays.asList(StringUtils.split(indexColumns, ','))); - } - - /** - * Gets the configured locality groups for the table, or Optional.empty() if not set. - *

- * All strings are lowercase. - * - * @param tableProperties The map of table properties - * @return Optional map of locality groups - */ - public static Optional>> getLocalityGroups(Map tableProperties) - { - requireNonNull(tableProperties); - - @SuppressWarnings("unchecked") - String groupStr = (String) tableProperties.get(LOCALITY_GROUPS); - if (groupStr == null) { - return Optional.empty(); - } - - ImmutableMap.Builder> groups = ImmutableMap.builder(); - - // Split all configured locality groups - for (String group : PIPE_SPLITTER.split(groupStr)) { - String[] locGroups = Iterables.toArray(COLON_SPLITTER.split(group), String.class); - - if (locGroups.length != 2) { - throw new PrestoException(INVALID_TABLE_PROPERTY, "Locality groups string is malformed. See documentation for proper format."); - } - - String grpName = locGroups[0]; - ImmutableSet.Builder colSet = ImmutableSet.builder(); - - for (String f : COMMA_SPLITTER.split(locGroups[1])) { - colSet.add(f.toLowerCase(Locale.ENGLISH)); - } - - groups.put(grpName.toLowerCase(Locale.ENGLISH), colSet.build()); - } - - return Optional.of(groups.build()); - } - - public static Optional getRowId(Map tableProperties) - { - requireNonNull(tableProperties); - - @SuppressWarnings("unchecked") - String rowId = (String) tableProperties.get(ROW_ID); - return Optional.ofNullable(rowId); - } - - public static Optional getScanAuthorizations(Map tableProperties) - { - requireNonNull(tableProperties); - - @SuppressWarnings("unchecked") - String scanAuths = (String) tableProperties.get(SCAN_AUTHS); - return Optional.ofNullable(scanAuths); - } - - /** - * Gets the {@link AccumuloRowSerializer} class name to use for this table - * - * @param tableProperties The map of table properties - * @return The name of the AccumuloRowSerializer class - */ - public static String getSerializerClass(Map tableProperties) - { - requireNonNull(tableProperties); - - @SuppressWarnings("unchecked") - String serializerClass = (String) tableProperties.get(SERIALIZER); - return serializerClass; - } - - public static boolean isExternal(Map tableProperties) - { - requireNonNull(tableProperties); - - @SuppressWarnings("unchecked") - Boolean serializerClass = (Boolean) tableProperties.get(EXTERNAL); - return serializerClass; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/ColumnCardinalityCache.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/ColumnCardinalityCache.java deleted file mode 100644 index 4160e8fb1b3b..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/ColumnCardinalityCache.java +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.index; - -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableMultimap; -import com.google.common.collect.Iterables; -import com.google.common.collect.ListMultimap; -import com.google.common.collect.Multimap; -import com.google.common.collect.MultimapBuilder; -import io.airlift.concurrent.BoundedExecutor; -import io.airlift.log.Logger; -import io.airlift.units.Duration; -import io.prestosql.plugin.accumulo.conf.AccumuloConfig; -import io.prestosql.plugin.accumulo.model.AccumuloColumnConstraint; -import io.prestosql.spi.PrestoException; -import org.apache.accumulo.core.client.BatchScanner; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.TableNotFoundException; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.PartialKey; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.io.Text; - -import javax.annotation.PreDestroy; -import javax.inject.Inject; - -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.Optional; -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.function.Function; -import java.util.stream.Collectors; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static com.google.common.collect.Streams.stream; -import static io.airlift.concurrent.Threads.daemonThreadsNamed; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.UNEXPECTED_ACCUMULO_ERROR; -import static io.prestosql.plugin.accumulo.index.Indexer.CARDINALITY_CQ_AS_TEXT; -import static io.prestosql.plugin.accumulo.index.Indexer.getIndexColumnFamily; -import static io.prestosql.plugin.accumulo.index.Indexer.getMetricsTableName; -import static io.prestosql.spi.StandardErrorCode.FUNCTION_IMPLEMENTATION_ERROR; -import static java.lang.Long.parseLong; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.Executors.newCachedThreadPool; -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -/** - * This class is an indexing utility to cache the cardinality of a column value for every table. - * Each table has its own cache that is independent of every other, and every column also has its - * own Guava cache. Use of this utility can have a significant impact for retrieving the cardinality - * of many columns, preventing unnecessary accesses to the metrics table in Accumulo for a - * cardinality that won't change much. - */ -public class ColumnCardinalityCache -{ - private static final Logger LOG = Logger.get(ColumnCardinalityCache.class); - private final Connector connector; - private final ExecutorService coreExecutor; - private final BoundedExecutor executorService; - private final LoadingCache cache; - - @Inject - public ColumnCardinalityCache(Connector connector, AccumuloConfig config) - { - this.connector = requireNonNull(connector, "connector is null"); - int size = requireNonNull(config, "config is null").getCardinalityCacheSize(); - Duration expireDuration = config.getCardinalityCacheExpiration(); - - // Create a bounded executor with a pool size at 4x number of processors - this.coreExecutor = newCachedThreadPool(daemonThreadsNamed("cardinality-lookup-%s")); - this.executorService = new BoundedExecutor(coreExecutor, 4 * Runtime.getRuntime().availableProcessors()); - - LOG.debug("Created new cache size %d expiry %s", size, expireDuration); - cache = CacheBuilder.newBuilder() - .maximumSize(size) - .expireAfterWrite(expireDuration.toMillis(), MILLISECONDS) - .build(new CardinalityCacheLoader()); - } - - @PreDestroy - public void shutdown() - { - coreExecutor.shutdownNow(); - } - - /** - * Gets the cardinality for each {@link AccumuloColumnConstraint}. - * Given constraints are expected to be indexed! Who knows what would happen if they weren't! - * - * @param schema Schema name - * @param table Table name - * @param auths Scan authorizations - * @param idxConstraintRangePairs Mapping of all ranges for a given constraint - * @param earlyReturnThreshold Smallest acceptable cardinality to return early while other tasks complete - * @param pollingDuration Duration for polling the cardinality completion service - * @return An immutable multimap of cardinality to column constraint, sorted by cardinality from smallest to largest - * @throws TableNotFoundException If the metrics table does not exist - * @throws ExecutionException If another error occurs; I really don't even know anymore. - */ - public Multimap getCardinalities(String schema, String table, Authorizations auths, Multimap idxConstraintRangePairs, long earlyReturnThreshold, Duration pollingDuration) - { - // Submit tasks to the executor to fetch column cardinality, adding it to the Guava cache if necessary - CompletionService> executor = new ExecutorCompletionService<>(executorService); - idxConstraintRangePairs.asMap().forEach((key, value) -> executor.submit(() -> { - long cardinality = getColumnCardinality(schema, table, auths, key.getFamily(), key.getQualifier(), value); - LOG.debug("Cardinality for column %s is %s", key.getName(), cardinality); - return Pair.of(cardinality, key); - })); - - // Create a multi map sorted by cardinality - ListMultimap cardinalityToConstraints = MultimapBuilder.treeKeys().arrayListValues().build(); - try { - boolean earlyReturn = false; - int numTasks = idxConstraintRangePairs.asMap().entrySet().size(); - do { - // Sleep for the polling duration to allow concurrent tasks to run for this time - Thread.sleep(pollingDuration.toMillis()); - - // Poll each task, retrieving the result if it is done - for (int i = 0; i < numTasks; ++i) { - Future> futureCardinality = executor.poll(); - if (futureCardinality != null && futureCardinality.isDone()) { - Pair columnCardinality = futureCardinality.get(); - cardinalityToConstraints.put(columnCardinality.getLeft(), columnCardinality.getRight()); - } - } - - // If the smallest cardinality is present and below the threshold, set the earlyReturn flag - Optional> smallestCardinality = cardinalityToConstraints.entries().stream().findFirst(); - if (smallestCardinality.isPresent()) { - if (smallestCardinality.get().getKey() <= earlyReturnThreshold) { - LOG.info("Cardinality %s, is below threshold. Returning early while other tasks finish", smallestCardinality); - earlyReturn = true; - } - } - } - while (!earlyReturn && cardinalityToConstraints.entries().size() < numTasks); - } - catch (ExecutionException | InterruptedException e) { - if (e instanceof InterruptedException) { - Thread.currentThread().interrupt(); - } - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting cardinality", e); - } - - // Create a copy of the cardinalities - return ImmutableMultimap.copyOf(cardinalityToConstraints); - } - - /** - * Gets the column cardinality for all of the given range values. May reach out to the - * metrics table in Accumulo to retrieve new cache elements. - * - * @param schema Table schema - * @param table Table name - * @param auths Scan authorizations - * @param family Accumulo column family - * @param qualifier Accumulo column qualifier - * @param colValues All range values to summarize for the cardinality - * @return The cardinality of the column - */ - public long getColumnCardinality(String schema, String table, Authorizations auths, String family, String qualifier, Collection colValues) - throws ExecutionException - { - LOG.debug("Getting cardinality for %s:%s", family, qualifier); - - // Collect all exact Accumulo Ranges, i.e. single value entries vs. a full scan - Collection exactRanges = colValues.stream() - .filter(ColumnCardinalityCache::isExact) - .map(range -> new CacheKey(schema, table, family, qualifier, range, auths)) - .collect(Collectors.toList()); - - LOG.debug("Column values contain %s exact ranges of %s", exactRanges.size(), colValues.size()); - - // Sum the cardinalities for the exact-value Ranges - // This is where the reach-out to Accumulo occurs for all Ranges that have not - // previously been fetched - long sum = cache.getAll(exactRanges).values().stream().mapToLong(Long::longValue).sum(); - - // If these collection sizes are not equal, - // then there is at least one non-exact range - if (exactRanges.size() != colValues.size()) { - // for each range in the column value - for (Range range : colValues) { - // if this range is not exact - if (!isExact(range)) { - // Then get the value for this range using the single-value cache lookup - sum += cache.get(new CacheKey(schema, table, family, qualifier, range, auths)); - } - } - } - - return sum; - } - - private static boolean isExact(Range range) - { - return !range.isInfiniteStartKey() && !range.isInfiniteStopKey() && - range.getStartKey().followingKey(PartialKey.ROW).equals(range.getEndKey()); - } - - /** - * Complex key for the CacheLoader - */ - private static class CacheKey - { - private final String schema; - private final String table; - private final String family; - private final String qualifier; - private final Range range; - private final Authorizations auths; - - public CacheKey( - String schema, - String table, - String family, - String qualifier, - Range range, - Authorizations auths) - { - this.schema = requireNonNull(schema, "schema is null"); - this.table = requireNonNull(table, "table is null"); - this.family = requireNonNull(family, "family is null"); - this.qualifier = requireNonNull(qualifier, "qualifier is null"); - this.range = requireNonNull(range, "range is null"); - this.auths = requireNonNull(auths, "auths is null"); - } - - public String getSchema() - { - return schema; - } - - public String getTable() - { - return table; - } - - public String getFamily() - { - return family; - } - - public String getQualifier() - { - return qualifier; - } - - public Range getRange() - { - return range; - } - - public Authorizations getAuths() - { - return auths; - } - - @Override - public int hashCode() - { - return Objects.hash(schema, table, family, qualifier, range); - } - - @Override - public boolean equals(Object obj) - { - if (this == obj) { - return true; - } - - if ((obj == null) || (getClass() != obj.getClass())) { - return false; - } - - CacheKey other = (CacheKey) obj; - return Objects.equals(this.schema, other.schema) - && Objects.equals(this.table, other.table) - && Objects.equals(this.family, other.family) - && Objects.equals(this.qualifier, other.qualifier) - && Objects.equals(this.range, other.range); - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("schema", schema) - .add("table", table) - .add("family", family) - .add("qualifier", qualifier) - .add("range", range).toString(); - } - } - - /** - * Internal class for loading the cardinality from Accumulo - */ - private class CardinalityCacheLoader - extends CacheLoader - { - /** - * Loads the cardinality for the given Range. Uses a BatchScanner and sums the cardinality for all values that encapsulate the Range. - * - * @param key Range to get the cardinality for - * @return The cardinality of the column, which would be zero if the value does not exist - */ - @Override - public Long load(CacheKey key) - throws Exception - { - LOG.debug("Loading a non-exact range from Accumulo: %s", key); - // Get metrics table name and the column family for the scanner - String metricsTable = getMetricsTableName(key.getSchema(), key.getTable()); - Text columnFamily = new Text(getIndexColumnFamily(key.getFamily().getBytes(UTF_8), key.getQualifier().getBytes(UTF_8)).array()); - - // Create scanner for querying the range - BatchScanner scanner = connector.createBatchScanner(metricsTable, key.auths, 10); - scanner.setRanges(connector.tableOperations().splitRangeByTablets(metricsTable, key.range, Integer.MAX_VALUE)); - scanner.fetchColumn(columnFamily, CARDINALITY_CQ_AS_TEXT); - - try { - return stream(scanner) - .map(Entry::getValue) - .map(Value::toString) - .mapToLong(Long::parseLong) - .sum(); - } - finally { - scanner.close(); - } - } - - @Override - public Map loadAll(Iterable keys) - throws Exception - { - int size = Iterables.size(keys); - if (size == 0) { - return ImmutableMap.of(); - } - - LOG.debug("Loading %s exact ranges from Accumulo", size); - - // In order to simplify the implementation, we are making a (safe) assumption - // that the CacheKeys will all contain the same combination of schema/table/family/qualifier - // This is asserted with the below implementation error just to make sure - CacheKey anyKey = stream(keys).findAny().get(); - if (stream(keys).anyMatch(k -> !k.getSchema().equals(anyKey.getSchema()) || !k.getTable().equals(anyKey.getTable()) || !k.getFamily().equals(anyKey.getFamily()) || !k.getQualifier().equals(anyKey.getQualifier()))) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "loadAll called with a non-homogeneous collection of cache keys"); - } - - Map rangeToKey = stream(keys).collect(Collectors.toMap(CacheKey::getRange, Function.identity())); - LOG.debug("rangeToKey size is %s", rangeToKey.size()); - - // Get metrics table name and the column family for the scanner - String metricsTable = getMetricsTableName(anyKey.getSchema(), anyKey.getTable()); - Text columnFamily = new Text(getIndexColumnFamily(anyKey.getFamily().getBytes(UTF_8), anyKey.getQualifier().getBytes(UTF_8)).array()); - - BatchScanner scanner = connector.createBatchScanner(metricsTable, anyKey.getAuths(), 10); - try { - scanner.setRanges(stream(keys).map(CacheKey::getRange).collect(Collectors.toList())); - scanner.fetchColumn(columnFamily, CARDINALITY_CQ_AS_TEXT); - - // Create a new map to hold our cardinalities for each range, returning a default of - // Zero for each non-existent Key - Map rangeValues = new HashMap<>(); - stream(keys).forEach(key -> rangeValues.put(key, 0L)); - - for (Entry entry : scanner) { - rangeValues.put(rangeToKey.get(Range.exact(entry.getKey().getRow())), parseLong(entry.getValue().toString())); - } - - return rangeValues; - } - finally { - if (scanner != null) { - scanner.close(); - } - } - } - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/IndexLookup.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/IndexLookup.java deleted file mode 100644 index 759f9b174a11..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/IndexLookup.java +++ /dev/null @@ -1,401 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.index; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableListMultimap; -import com.google.common.collect.ImmutableMultimap; -import com.google.common.collect.Multimap; -import io.airlift.concurrent.BoundedExecutor; -import io.airlift.log.Logger; -import io.airlift.units.Duration; -import io.prestosql.plugin.accumulo.model.AccumuloColumnConstraint; -import io.prestosql.plugin.accumulo.model.TabletSplitMetadata; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.ConnectorSession; -import org.apache.accumulo.core.client.BatchScanner; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.client.TableNotFoundException; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.hadoop.io.Text; - -import javax.annotation.PreDestroy; -import javax.inject.Inject; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashSet; -import java.util.List; -import java.util.Map.Entry; -import java.util.Optional; -import java.util.Set; -import java.util.concurrent.CompletionService; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorCompletionService; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - -import static com.google.common.base.Preconditions.checkArgument; -import static io.airlift.concurrent.Threads.daemonThreadsNamed; -import static io.prestosql.plugin.accumulo.AccumuloClient.getRangesFromDomain; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.UNEXPECTED_ACCUMULO_ERROR; -import static io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties.getIndexCardinalityCachePollingDuration; -import static io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties.getIndexSmallCardThreshold; -import static io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties.getIndexThreshold; -import static io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties.getNumIndexRowsPerSplit; -import static io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties.isIndexMetricsEnabled; -import static io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties.isIndexShortCircuitEnabled; -import static io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties.isOptimizeIndexEnabled; -import static io.prestosql.plugin.accumulo.index.Indexer.CARDINALITY_CQ_AS_TEXT; -import static io.prestosql.plugin.accumulo.index.Indexer.METRICS_TABLE_ROWID_AS_TEXT; -import static io.prestosql.plugin.accumulo.index.Indexer.METRICS_TABLE_ROWS_CF_AS_TEXT; -import static io.prestosql.plugin.accumulo.index.Indexer.getIndexTableName; -import static io.prestosql.plugin.accumulo.index.Indexer.getMetricsTableName; -import static io.prestosql.spi.StandardErrorCode.FUNCTION_IMPLEMENTATION_ERROR; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.Executors.newCachedThreadPool; - -/** - * Class to assist the Presto connector, and maybe external applications, - * leverage the secondary * index built by the {@link Indexer}. - * Leverages {@link ColumnCardinalityCache} to assist in * retrieving row IDs. - * Currently pretty bound to the Presto connector APIs. - */ -public class IndexLookup -{ - private static final Logger LOG = Logger.get(IndexLookup.class); - private static final Range METRICS_TABLE_ROWID_RANGE = new Range(METRICS_TABLE_ROWID_AS_TEXT); - private final ColumnCardinalityCache cardinalityCache; - private final Connector connector; - private final ExecutorService coreExecutor; - private final BoundedExecutor executorService; - - @Inject - public IndexLookup(Connector connector, ColumnCardinalityCache cardinalityCache) - { - this.connector = requireNonNull(connector, "connector is null"); - this.cardinalityCache = requireNonNull(cardinalityCache, "cardinalityCache is null"); - - // Create a bounded executor with a pool size at 4x number of processors - this.coreExecutor = newCachedThreadPool(daemonThreadsNamed("cardinality-lookup-%s")); - this.executorService = new BoundedExecutor(coreExecutor, 4 * Runtime.getRuntime().availableProcessors()); - } - - @PreDestroy - public void shutdown() - { - coreExecutor.shutdownNow(); - } - - /** - * Scans the index table, applying the index based on the given column constraints to return a set of tablet splits. - *

- * If this function returns true, the output parameter tabletSplits contains a list of TabletSplitMetadata objects. - * These in turn contain a collection of Ranges containing the exact row IDs determined using the index. - *

- * If this function returns false, the secondary index should not be used. In this case, - * either the accumulo session has disabled secondary indexing, - * or the number of row IDs that would be used by the secondary index is greater than the configured threshold - * (again retrieved from the session). - * - * @param schema Schema name - * @param table Table name - * @param session Current client session - * @param constraints All column constraints (this method will filter for if the column is indexed) - * @param rowIdRanges Collection of Accumulo ranges based on any predicate against a record key - * @param tabletSplits Output parameter containing the bundles of row IDs determined by the use of the index. - * @param serializer Instance of a row serializer - * @param auths Scan-time authorizations - * @return True if the tablet splits are valid and should be used, false otherwise - * @throws Exception If something bad happens. What are the odds? - */ - public boolean applyIndex( - String schema, - String table, - ConnectorSession session, - Collection constraints, - Collection rowIdRanges, - List tabletSplits, - AccumuloRowSerializer serializer, - Authorizations auths) - throws Exception - { - // Early out if index is disabled - if (!isOptimizeIndexEnabled(session)) { - LOG.debug("Secondary index is disabled"); - return false; - } - - LOG.debug("Secondary index is enabled"); - - // Collect Accumulo ranges for each indexed column constraint - Multimap constraintRanges = getIndexedConstraintRanges(constraints, serializer); - - // If there is no constraints on an index column, we again will bail out - if (constraintRanges.isEmpty()) { - LOG.debug("Query contains no constraints on indexed columns, skipping secondary index"); - return false; - } - - // If metrics are not enabled - if (!isIndexMetricsEnabled(session)) { - LOG.debug("Use of index metrics is disabled"); - // Get the ranges via the index table - List indexRanges = getIndexRanges(getIndexTableName(schema, table), constraintRanges, rowIdRanges, auths); - - if (!indexRanges.isEmpty()) { - // Bin the ranges into TabletMetadataSplits and return true to use the tablet splits - binRanges(getNumIndexRowsPerSplit(session), indexRanges, tabletSplits); - LOG.debug("Number of splits for %s.%s is %d with %d ranges", schema, table, tabletSplits.size(), indexRanges.size()); - } - else { - LOG.debug("Query would return no results, returning empty list of splits"); - } - - return true; - } - else { - LOG.debug("Use of index metrics is enabled"); - // Get ranges using the metrics - return getRangesWithMetrics(session, schema, table, constraintRanges, rowIdRanges, tabletSplits, auths); - } - } - - private static Multimap getIndexedConstraintRanges(Collection constraints, AccumuloRowSerializer serializer) - { - ImmutableListMultimap.Builder builder = ImmutableListMultimap.builder(); - for (AccumuloColumnConstraint columnConstraint : constraints) { - if (columnConstraint.isIndexed()) { - for (Range range : getRangesFromDomain(columnConstraint.getDomain(), serializer)) { - builder.put(columnConstraint, range); - } - } - else { - LOG.warn("Query contains constraint on non-indexed column %s. Is it worth indexing?", columnConstraint.getName()); - } - } - return builder.build(); - } - - private boolean getRangesWithMetrics( - ConnectorSession session, - String schema, - String table, - Multimap constraintRanges, - Collection rowIdRanges, - List tabletSplits, - Authorizations auths) - throws Exception - { - String metricsTable = getMetricsTableName(schema, table); - long numRows = getNumRowsInTable(metricsTable, auths); - - // Get the cardinalities from the metrics table - Multimap cardinalities; - if (isIndexShortCircuitEnabled(session)) { - cardinalities = cardinalityCache.getCardinalities( - schema, - table, - auths, - constraintRanges, - (long) (numRows * getIndexSmallCardThreshold(session)), - getIndexCardinalityCachePollingDuration(session)); - } - else { - // disable short circuit using 0 - cardinalities = cardinalityCache.getCardinalities(schema, table, auths, constraintRanges, 0, new Duration(0, TimeUnit.MILLISECONDS)); - } - - Optional> entry = cardinalities.entries().stream().findFirst(); - if (!entry.isPresent()) { - return false; - } - - Entry lowestCardinality = entry.get(); - String indexTable = getIndexTableName(schema, table); - double threshold = getIndexThreshold(session); - List indexRanges; - - // If the smallest cardinality in our list is above the lowest cardinality threshold, - // we should look at intersecting the row ID ranges to try and get under the threshold. - if (smallestCardAboveThreshold(session, numRows, lowestCardinality.getKey())) { - // If we only have one column, we can skip the intersection process and just check the index threshold - if (cardinalities.size() == 1) { - long numEntries = lowestCardinality.getKey(); - double ratio = ((double) numEntries / (double) numRows); - LOG.debug("Use of index would scan %s of %s rows, ratio %s. Threshold %2f, Using for index table? %s", numEntries, numRows, ratio, threshold, ratio < threshold); - if (ratio >= threshold) { - return false; - } - } - - // Else, get the intersection of all row IDs for all column constraints - LOG.debug("%d indexed columns, intersecting ranges", constraintRanges.size()); - indexRanges = getIndexRanges(indexTable, constraintRanges, rowIdRanges, auths); - LOG.debug("Intersection results in %d ranges from secondary index", indexRanges.size()); - } - else { - // Else, we don't need to intersect the columns and we can just use the column with the lowest cardinality, - // so get all those row IDs in a set of ranges. - LOG.debug("Not intersecting columns, using column with lowest cardinality "); - ImmutableMultimap.Builder lcBldr = ImmutableMultimap.builder(); - lcBldr.putAll(lowestCardinality.getValue(), constraintRanges.get(lowestCardinality.getValue())); - indexRanges = getIndexRanges(indexTable, lcBldr.build(), rowIdRanges, auths); - } - - if (indexRanges.isEmpty()) { - LOG.debug("Query would return no results, returning empty list of splits"); - return true; - } - - // Okay, we now check how many rows we would scan by using the index vs. the overall number - // of rows - long numEntries = indexRanges.size(); - double ratio = (double) numEntries / (double) numRows; - LOG.debug("Use of index would scan %d of %d rows, ratio %s. Threshold %2f, Using for table? %b", numEntries, numRows, ratio, threshold, ratio < threshold, table); - - // If the percentage of scanned rows, the ratio, less than the configured threshold - if (ratio < threshold) { - // Bin the ranges into TabletMetadataSplits and return true to use the tablet splits - binRanges(getNumIndexRowsPerSplit(session), indexRanges, tabletSplits); - LOG.debug("Number of splits for %s.%s is %d with %d ranges", schema, table, tabletSplits.size(), indexRanges.size()); - return true; - } - else { - // We are going to do too much work to use the secondary index, so return false - return false; - } - } - - private static boolean smallestCardAboveThreshold(ConnectorSession session, long numRows, long smallestCardinality) - { - double ratio = ((double) smallestCardinality / (double) numRows); - double threshold = getIndexSmallCardThreshold(session); - LOG.debug("Smallest cardinality is %d, num rows is %d, ratio is %2f with threshold of %f", smallestCardinality, numRows, ratio, threshold); - return ratio > threshold; - } - - private long getNumRowsInTable(String metricsTable, Authorizations auths) - throws TableNotFoundException - { - // Create scanner against the metrics table, pulling the special column and the rows column - Scanner scanner = connector.createScanner(metricsTable, auths); - scanner.setRange(METRICS_TABLE_ROWID_RANGE); - scanner.fetchColumn(METRICS_TABLE_ROWS_CF_AS_TEXT, CARDINALITY_CQ_AS_TEXT); - - // Scan the entry and get the number of rows - long numRows = -1; - for (Entry entry : scanner) { - if (numRows > 0) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Should have received only one entry when scanning for number of rows in metrics table"); - } - numRows = Long.parseLong(entry.getValue().toString()); - } - scanner.close(); - - LOG.debug("Number of rows in table is %d", numRows); - return numRows; - } - - private List getIndexRanges(String indexTable, Multimap constraintRanges, Collection rowIDRanges, Authorizations auths) - { - Set finalRanges = new HashSet<>(); - // For each column/constraint pair we submit a task to scan the index ranges - List>> tasks = new ArrayList<>(); - CompletionService> executor = new ExecutorCompletionService<>(executorService); - for (Entry> constraintEntry : constraintRanges.asMap().entrySet()) { - tasks.add(executor.submit(() -> { - // Create a batch scanner against the index table, setting the ranges - BatchScanner scan = connector.createBatchScanner(indexTable, auths, 10); - scan.setRanges(constraintEntry.getValue()); - - // Fetch the column family for this specific column - scan.fetchColumnFamily(new Text(Indexer.getIndexColumnFamily(constraintEntry.getKey().getFamily().getBytes(UTF_8), constraintEntry.getKey().getQualifier().getBytes(UTF_8)).array())); - - // For each entry in the scanner - Text tmpQualifier = new Text(); - Set columnRanges = new HashSet<>(); - for (Entry entry : scan) { - entry.getKey().getColumnQualifier(tmpQualifier); - - // Add to our column ranges if it is in one of the row ID ranges - if (inRange(tmpQualifier, rowIDRanges)) { - columnRanges.add(new Range(tmpQualifier)); - } - } - - LOG.debug("Retrieved %d ranges for index column %s", columnRanges.size(), constraintEntry.getKey().getName()); - scan.close(); - return columnRanges; - })); - } - tasks.forEach(future -> { - try { - // If finalRanges is null, we have not yet added any column ranges - if (finalRanges.isEmpty()) { - finalRanges.addAll(future.get()); - } - else { - // Retain only the row IDs for this column that have already been added - // This is your set intersection operation! - finalRanges.retainAll(future.get()); - } - } - catch (ExecutionException | InterruptedException e) { - if (e instanceof InterruptedException) { - Thread.currentThread().interrupt(); - } - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Exception when getting index ranges", e.getCause()); - } - }); - return ImmutableList.copyOf(finalRanges); - } - - private static void binRanges(int numRangesPerBin, List splitRanges, List prestoSplits) - { - checkArgument(numRangesPerBin > 0, "number of ranges per bin must positivebe greater than zero"); - int toAdd = splitRanges.size(); - int fromIndex = 0; - int toIndex = Math.min(toAdd, numRangesPerBin); - do { - // Add the sublist of range handles - // Use an empty location because we are binning multiple Ranges spread across many tablet servers - prestoSplits.add(new TabletSplitMetadata(Optional.empty(), splitRanges.subList(fromIndex, toIndex))); - toAdd -= toIndex - fromIndex; - fromIndex = toIndex; - toIndex += Math.min(toAdd, numRangesPerBin); - } - while (toAdd > 0); - } - - /** - * Gets a Boolean value indicating if the given value is in one of the Ranges in the given collection - * - * @param text Text object to check against the Range collection - * @param ranges Ranges to look into - * @return True if the text object is in one of the ranges, false otherwise - */ - private static boolean inRange(Text text, Collection ranges) - { - Key kCq = new Key(text); - return ranges.stream().anyMatch(r -> !r.beforeStartKey(kCq) && !r.afterEndKey(kCq)); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/Indexer.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/Indexer.java deleted file mode 100644 index c9a2ce90e598..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/index/Indexer.java +++ /dev/null @@ -1,559 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.index; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableMultimap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Multimap; -import com.google.common.primitives.UnsignedBytes; -import io.prestosql.plugin.accumulo.Types; -import io.prestosql.plugin.accumulo.iterators.MaxByteArrayCombiner; -import io.prestosql.plugin.accumulo.iterators.MinByteArrayCombiner; -import io.prestosql.plugin.accumulo.metadata.AccumuloTable; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.SchemaTableName; -import io.prestosql.spi.type.Type; -import org.apache.accumulo.core.client.BatchWriter; -import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.IteratorSetting; -import org.apache.accumulo.core.client.MutationsRejectedException; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.client.TableNotFoundException; -import org.apache.accumulo.core.data.ColumnUpdate; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.iterators.LongCombiner; -import org.apache.accumulo.core.iterators.TypedValueCombiner; -import org.apache.accumulo.core.iterators.user.SummingCombiner; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.security.ColumnVisibility; -import org.apache.commons.lang.ArrayUtils; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.io.Text; - -import javax.annotation.concurrent.NotThreadSafe; - -import java.io.Closeable; -import java.nio.ByteBuffer; -import java.util.Collection; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.ACCUMULO_TABLE_DNE; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.UNEXPECTED_ACCUMULO_ERROR; -import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED; -import static java.nio.ByteBuffer.wrap; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Objects.requireNonNull; - -/** - * This utility class assists the Presto connector, and external applications, - * in populating the index table and metrics table for Accumulo-backed Presto tables. - *

- * This class is totally not thread safe. - *

- * When creating a table, if it contains indexed columns, users will have to create the index table - * and the index metrics table, the names of which can be retrieved using the static functions in - * this class. Additionally, users MUST add iterators to the index metrics table (also available via - * static function), and, while not required, recommended to add the locality groups to the index - * table to improve index lookup times. - *

- * Sample usage of an indexer: - *

- *

- * 
- * Indexer indexer = new Indexer(connector, userAuths, table, writerConf);
- * for (Mutation m : mutationsToNormalTable) {
- *      indexer.index(m);
- * }
- *
- * // can flush indexer w/regular BatchWriter
- * indexer.flush()
- *
- * // finished adding new mutations, close the indexer
- * indexer.close();
- * 
- * 
- */ -@NotThreadSafe -public class Indexer - implements Closeable -{ - public static final ByteBuffer METRICS_TABLE_ROW_ID = wrap("___METRICS_TABLE___".getBytes(UTF_8)); - public static final ByteBuffer METRICS_TABLE_ROWS_CF = wrap("___rows___".getBytes(UTF_8)); - public static final MetricsKey METRICS_TABLE_ROW_COUNT = new MetricsKey(METRICS_TABLE_ROW_ID, METRICS_TABLE_ROWS_CF); - public static final ByteBuffer METRICS_TABLE_FIRST_ROW_CQ = wrap("___first_row___".getBytes(UTF_8)); - public static final ByteBuffer METRICS_TABLE_LAST_ROW_CQ = wrap("___last_row___".getBytes(UTF_8)); - public static final byte[] CARDINALITY_CQ = "___card___".getBytes(UTF_8); - public static final Text CARDINALITY_CQ_AS_TEXT = new Text(CARDINALITY_CQ); - public static final Text METRICS_TABLE_ROWS_CF_AS_TEXT = new Text(METRICS_TABLE_ROWS_CF.array()); - public static final Text METRICS_TABLE_ROWID_AS_TEXT = new Text(METRICS_TABLE_ROW_ID.array()); - - private static final byte[] EMPTY_BYTES = new byte[0]; - private static final byte UNDERSCORE = '_'; - private static final TypedValueCombiner.Encoder ENCODER = new LongCombiner.StringEncoder(); - - private final AccumuloTable table; - private final BatchWriter indexWriter; - private final BatchWriterConfig writerConfig; - private final Connector connector; - private final Map metrics = new HashMap<>(); - private final Multimap indexColumns; - private final Map> indexColumnTypes; - private final AccumuloRowSerializer serializer; - private final Comparator byteArrayComparator = UnsignedBytes.lexicographicalComparator(); - - private byte[] firstRow; - private byte[] lastRow; - - public Indexer( - Connector connector, - Authorizations auths, - AccumuloTable table, - BatchWriterConfig writerConfig) - throws TableNotFoundException - { - this.connector = requireNonNull(connector, "connector is null"); - this.table = requireNonNull(table, "table is null"); - this.writerConfig = requireNonNull(writerConfig, "writerConfig is null"); - requireNonNull(auths, "auths is null"); - - this.serializer = table.getSerializerInstance(); - - // Create our batch writer - indexWriter = connector.createBatchWriter(table.getIndexTableName(), writerConfig); - - ImmutableMultimap.Builder indexColumnsBuilder = ImmutableMultimap.builder(); - Map> indexColumnTypesBuilder = new HashMap<>(); - - // Initialize metadata - table.getColumns().forEach(columnHandle -> { - if (columnHandle.isIndexed()) { - // Wrap the column family and qualifier for this column and add it to - // collection of indexed columns - ByteBuffer family = wrap(columnHandle.getFamily().get().getBytes(UTF_8)); - ByteBuffer qualifier = wrap(columnHandle.getQualifier().get().getBytes(UTF_8)); - indexColumnsBuilder.put(family, qualifier); - - // Create a mapping for this column's Presto type, again creating a new one for the - // family if necessary - Map types = indexColumnTypesBuilder.get(family); - if (types == null) { - types = new HashMap<>(); - indexColumnTypesBuilder.put(family, types); - } - types.put(qualifier, columnHandle.getType()); - } - }); - - indexColumns = indexColumnsBuilder.build(); - indexColumnTypes = ImmutableMap.copyOf(indexColumnTypesBuilder); - - // If there are no indexed columns, throw an exception - if (indexColumns.isEmpty()) { - throw new PrestoException(NOT_SUPPORTED, "No indexed columns in table metadata. Refusing to index a table with no indexed columns"); - } - - // Initialize metrics map - // This metrics map is for column cardinality - metrics.put(METRICS_TABLE_ROW_COUNT, new AtomicLong(0)); - - // Scan the metrics table for existing first row and last row - Pair minmax = getMinMaxRowIds(connector, table, auths); - firstRow = minmax.getLeft(); - lastRow = minmax.getRight(); - } - - /** - * Index the given mutation, adding mutations to the index and metrics table - *

- * Like typical use of a BatchWriter, this method does not flush mutations to the underlying index table. - * For higher throughput the modifications to the metrics table are tracked in memory and added to the metrics table when the indexer is flushed or closed. - * - * @param mutation Mutation to index - */ - public void index(Mutation mutation) - { - // Increment the cardinality for the number of rows in the table - metrics.get(METRICS_TABLE_ROW_COUNT).incrementAndGet(); - - // Set the first and last row values of the table based on existing row IDs - if (firstRow == null || byteArrayComparator.compare(mutation.getRow(), firstRow) < 0) { - firstRow = mutation.getRow(); - } - - if (lastRow == null || byteArrayComparator.compare(mutation.getRow(), lastRow) > 0) { - lastRow = mutation.getRow(); - } - - // For each column update in this mutation - for (ColumnUpdate columnUpdate : mutation.getUpdates()) { - // Get the column qualifiers we want to index for this column family (if any) - ByteBuffer family = wrap(columnUpdate.getColumnFamily()); - Collection indexQualifiers = indexColumns.get(family); - - // If we have column qualifiers we want to index for this column family - if (indexQualifiers != null) { - // Check if we want to index this particular qualifier - ByteBuffer qualifier = wrap(columnUpdate.getColumnQualifier()); - if (indexQualifiers.contains(qualifier)) { - // If so, create a mutation using the following mapping: - // Row ID = column value - // Column Family = columnqualifier_columnfamily - // Column Qualifier = row ID - // Value = empty - ByteBuffer indexFamily = getIndexColumnFamily(columnUpdate.getColumnFamily(), columnUpdate.getColumnQualifier()); - Type type = indexColumnTypes.get(family).get(qualifier); - ColumnVisibility visibility = new ColumnVisibility(columnUpdate.getColumnVisibility()); - - // If this is an array type, then index each individual element in the array - if (Types.isArrayType(type)) { - Type elementType = Types.getElementType(type); - List elements = serializer.decode(type, columnUpdate.getValue()); - for (Object element : elements) { - addIndexMutation(wrap(serializer.encode(elementType, element)), indexFamily, visibility, mutation.getRow()); - } - } - else { - addIndexMutation(wrap(columnUpdate.getValue()), indexFamily, visibility, mutation.getRow()); - } - } - } - } - } - - public void index(Iterable mutations) - { - for (Mutation mutation : mutations) { - index(mutation); - } - } - - private void addIndexMutation(ByteBuffer row, ByteBuffer family, ColumnVisibility visibility, byte[] qualifier) - { - // Create the mutation and add it to the batch writer - Mutation indexMutation = new Mutation(row.array()); - indexMutation.put(family.array(), qualifier, visibility, EMPTY_BYTES); - try { - indexWriter.addMutation(indexMutation); - } - catch (MutationsRejectedException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation rejected by server", e); - } - - // Increment the cardinality metrics for this value of index - // metrics is a mapping of row ID to column family - MetricsKey key = new MetricsKey(row, family, visibility); - AtomicLong count = metrics.get(key); - if (count == null) { - count = new AtomicLong(0); - metrics.put(key, count); - } - - count.incrementAndGet(); - } - - /** - * Flushes all Mutations in the index writer. And all metric mutations to the metrics table. - * Note that the metrics table is not updated until this method is explicitly called (or implicitly via close). - */ - public void flush() - { - try { - // Flush index writer - indexWriter.flush(); - - // Write out metrics mutations - BatchWriter metricsWriter = connector.createBatchWriter(table.getMetricsTableName(), writerConfig); - metricsWriter.addMutations(getMetricsMutations()); - metricsWriter.close(); - - // Re-initialize the metrics - metrics.clear(); - metrics.put(METRICS_TABLE_ROW_COUNT, new AtomicLong(0)); - } - catch (MutationsRejectedException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation was rejected by server on flush", e); - } - catch (TableNotFoundException e) { - throw new PrestoException(ACCUMULO_TABLE_DNE, "Accumulo table does not exist", e); - } - } - - /** - * Flushes all remaining mutations via {@link Indexer#flush} and closes the index writer. - */ - @Override - public void close() - { - try { - flush(); - indexWriter.close(); - } - catch (MutationsRejectedException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Mutation was rejected by server on close", e); - } - } - - private Collection getMetricsMutations() - { - ImmutableList.Builder mutationBuilder = ImmutableList.builder(); - // Mapping of column value to column to number of row IDs that contain that value - for (Entry entry : metrics.entrySet()) { - // Row ID: Column value - // Family: columnfamily_columnqualifier - // Qualifier: CARDINALITY_CQ - // Visibility: Inherited from indexed Mutation - // Value: Cardinality - Mutation mut = new Mutation(entry.getKey().row.array()); - mut.put(entry.getKey().family.array(), CARDINALITY_CQ, entry.getKey().visibility, ENCODER.encode(entry.getValue().get())); - - // Add to our list of mutations - mutationBuilder.add(mut); - } - - // If the first row and last row are both not null, - // which would really be for a brand new table that has zero rows and no indexed elements... - // Talk about your edge cases! - if (firstRow != null && lastRow != null) { - // Add a some columns to the special metrics table row ID for the first/last row. - // Note that if the values on the server side are greater/lesser, - // the configured iterator will take care of this at scan/compaction time - Mutation firstLastMutation = new Mutation(METRICS_TABLE_ROW_ID.array()); - firstLastMutation.put(METRICS_TABLE_ROWS_CF.array(), METRICS_TABLE_FIRST_ROW_CQ.array(), firstRow); - firstLastMutation.put(METRICS_TABLE_ROWS_CF.array(), METRICS_TABLE_LAST_ROW_CQ.array(), lastRow); - mutationBuilder.add(firstLastMutation); - } - - return mutationBuilder.build(); - } - - /** - * Gets a collection of iterator settings that should be added to the metric table for the given Accumulo table. Don't forget! Please! - * - * @param table Table for retrieving metrics iterators, see AccumuloClient#getTable - * @return Collection of iterator settings - */ - public static Collection getMetricIterators(AccumuloTable table) - { - String cardQualifier = new String(CARDINALITY_CQ, UTF_8); - String rowsFamily = new String(METRICS_TABLE_ROWS_CF.array(), UTF_8); - - // Build a string for all columns where the summing combiner should be applied, - // i.e. all indexed columns - StringBuilder cardBuilder = new StringBuilder(rowsFamily + ":" + cardQualifier + ","); - for (String s : getLocalityGroups(table).keySet()) { - cardBuilder.append(s).append(":").append(cardQualifier).append(','); - } - cardBuilder.deleteCharAt(cardBuilder.length() - 1); - - // Configuration rows for the Min/Max combiners - String firstRowColumn = rowsFamily + ":" + new String(METRICS_TABLE_FIRST_ROW_CQ.array(), UTF_8); - String lastRowColumn = rowsFamily + ":" + new String(METRICS_TABLE_LAST_ROW_CQ.array(), UTF_8); - - // Summing combiner for cardinality columns - IteratorSetting s1 = new IteratorSetting(1, SummingCombiner.class, ImmutableMap.of("columns", cardBuilder.toString(), "type", "STRING")); - - // Min/Max combiner for the first/last rows of the table - IteratorSetting s2 = new IteratorSetting(2, MinByteArrayCombiner.class, ImmutableMap.of("columns", firstRowColumn)); - IteratorSetting s3 = new IteratorSetting(3, MaxByteArrayCombiner.class, ImmutableMap.of("columns", lastRowColumn)); - - return ImmutableList.of(s1, s2, s3); - } - - /** - * Gets the column family of the index table based on the given column family and qualifier. - * - * @param columnFamily Presto column family - * @param columnQualifier Presto column qualifier - * @return ByteBuffer of the given index column family - */ - public static ByteBuffer getIndexColumnFamily(byte[] columnFamily, byte[] columnQualifier) - { - return wrap(ArrayUtils.addAll(ArrayUtils.add(columnFamily, UNDERSCORE), columnQualifier)); - } - - /** - * Gets a set of locality groups that should be added to the index table (not the metrics table). - * - * @param table Table for the locality groups, see AccumuloClient#getTable - * @return Mapping of locality group to column families in the locality group, 1:1 mapping in - * this case - */ - public static Map> getLocalityGroups(AccumuloTable table) - { - Map> groups = new HashMap<>(); - // For each indexed column - for (AccumuloColumnHandle columnHandle : table.getColumns().stream().filter(AccumuloColumnHandle::isIndexed).collect(Collectors.toList())) { - // Create a Text version of the index column family - Text indexColumnFamily = new Text(getIndexColumnFamily(columnHandle.getFamily().get().getBytes(UTF_8), columnHandle.getQualifier().get().getBytes(UTF_8)).array()); - - // Add this to the locality groups, - // it is a 1:1 mapping of locality group to column families - groups.put(indexColumnFamily.toString(), ImmutableSet.of(indexColumnFamily)); - } - return groups; - } - - /** - * Gets the fully-qualified index table name for the given table. - * - * @param schema Schema name - * @param table Table name - * @return Qualified index table name - */ - public static String getIndexTableName(String schema, String table) - { - return schema.equals("default") ? table + "_idx" : schema + '.' + table + "_idx"; - } - - /** - * Gets the fully-qualified index table name for the given table. - * - * @param tableName Schema table name - * @return Qualified index table name - */ - public static String getIndexTableName(SchemaTableName tableName) - { - return getIndexTableName(tableName.getSchemaName(), tableName.getTableName()); - } - - /** - * Gets the fully-qualified index metrics table name for the given table. - * - * @param schema Schema name - * @param table Table name - * @return Qualified index metrics table name - */ - public static String getMetricsTableName(String schema, String table) - { - return schema.equals("default") ? table + "_idx_metrics" - : schema + '.' + table + "_idx_metrics"; - } - - /** - * Gets the fully-qualified index metrics table name for the given table. - * - * @param tableName Schema table name - * @return Qualified index metrics table name - */ - public static String getMetricsTableName(SchemaTableName tableName) - { - return getMetricsTableName(tableName.getSchemaName(), tableName.getTableName()); - } - - public static Pair getMinMaxRowIds(Connector connector, AccumuloTable table, Authorizations auths) - throws TableNotFoundException - { - Scanner scanner = connector.createScanner(table.getMetricsTableName(), auths); - scanner.setRange(new Range(new Text(Indexer.METRICS_TABLE_ROW_ID.array()))); - Text family = new Text(Indexer.METRICS_TABLE_ROWS_CF.array()); - Text firstRowQualifier = new Text(Indexer.METRICS_TABLE_FIRST_ROW_CQ.array()); - Text lastRowQualifier = new Text(Indexer.METRICS_TABLE_LAST_ROW_CQ.array()); - scanner.fetchColumn(family, firstRowQualifier); - scanner.fetchColumn(family, lastRowQualifier); - - byte[] firstRow = null; - byte[] lastRow = null; - for (Entry entry : scanner) { - if (entry.getKey().compareColumnQualifier(firstRowQualifier) == 0) { - firstRow = entry.getValue().get(); - } - - if (entry.getKey().compareColumnQualifier(lastRowQualifier) == 0) { - lastRow = entry.getValue().get(); - } - } - scanner.close(); - return Pair.of(firstRow, lastRow); - } - - /** - * Class containing the key for aggregating the local metrics counter. - */ - private static class MetricsKey - { - private static final ColumnVisibility EMPTY_VISIBILITY = new ColumnVisibility(); - - public final ByteBuffer row; - public final ByteBuffer family; - public final ColumnVisibility visibility; - - public MetricsKey(ByteBuffer row, ByteBuffer family) - { - requireNonNull(row, "row is null"); - requireNonNull(family, "family is null"); - this.row = row; - this.family = family; - this.visibility = EMPTY_VISIBILITY; - } - - public MetricsKey(ByteBuffer row, ByteBuffer family, ColumnVisibility visibility) - { - requireNonNull(row, "row is null"); - requireNonNull(family, "family is null"); - requireNonNull(visibility, "visibility is null"); - this.row = row; - this.family = family; - this.visibility = visibility.getExpression() != null ? visibility : EMPTY_VISIBILITY; - } - - @Override - public boolean equals(Object obj) - { - if (this == obj) { - return true; - } - if ((obj == null) || (getClass() != obj.getClass())) { - return false; - } - - MetricsKey other = (MetricsKey) obj; - return Objects.equals(this.row, other.row) - && Objects.equals(this.family, other.family) - && Objects.equals(this.visibility, other.visibility); - } - - @Override - public int hashCode() - { - return Objects.hash(row, family, visibility); - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("row", new String(row.array(), UTF_8)) - .add("family", new String(row.array(), UTF_8)) - .add("visibility", visibility.toString()) - .toString(); - } - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloPageSink.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloPageSink.java deleted file mode 100644 index 56fffe80cb47..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloPageSink.java +++ /dev/null @@ -1,304 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.io; - -import com.google.common.collect.ImmutableList; -import io.airlift.slice.Slice; -import io.prestosql.plugin.accumulo.Types; -import io.prestosql.plugin.accumulo.index.Indexer; -import io.prestosql.plugin.accumulo.metadata.AccumuloTable; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.model.Field; -import io.prestosql.plugin.accumulo.model.Row; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.Page; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.ConnectorPageSink; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.TypeUtils; -import io.prestosql.spi.type.VarcharType; -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.BatchWriter; -import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.MutationsRejectedException; -import org.apache.accumulo.core.client.TableNotFoundException; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Value; -import org.apache.hadoop.io.Text; - -import java.util.Collection; -import java.util.List; -import java.util.Optional; -import java.util.concurrent.CompletableFuture; - -import static io.airlift.concurrent.MoreFutures.getFutureValue; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.ACCUMULO_TABLE_DNE; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.UNEXPECTED_ACCUMULO_ERROR; -import static io.prestosql.spi.StandardErrorCode.FUNCTION_IMPLEMENTATION_ERROR; -import static io.prestosql.spi.StandardErrorCode.INVALID_FUNCTION_ARGUMENT; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.BooleanType.BOOLEAN; -import static io.prestosql.spi.type.DateType.DATE; -import static io.prestosql.spi.type.DoubleType.DOUBLE; -import static io.prestosql.spi.type.IntegerType.INTEGER; -import static io.prestosql.spi.type.RealType.REAL; -import static io.prestosql.spi.type.SmallintType.SMALLINT; -import static io.prestosql.spi.type.TimeType.TIME; -import static io.prestosql.spi.type.TimestampType.TIMESTAMP; -import static io.prestosql.spi.type.TinyintType.TINYINT; -import static io.prestosql.spi.type.VarbinaryType.VARBINARY; -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.CompletableFuture.completedFuture; - -/** - * Output class for serializing Presto pages (blocks of rows of data) to Accumulo. - * This class converts the rows from within a page to a collection of Accumulo Mutations, - * writing and indexed the rows. Writers are flushed and closed on commit, and if a rollback occurs... - * we'll you're gonna have a bad time. - * - * @see AccumuloPageSinkProvider - */ -public class AccumuloPageSink - implements ConnectorPageSink -{ - public static final Text ROW_ID_COLUMN = new Text("___ROW___"); - private final AccumuloRowSerializer serializer; - private final BatchWriter writer; - private final Optional indexer; - private final List columns; - private final int rowIdOrdinal; - private long numRows; - - public AccumuloPageSink( - Connector connector, - AccumuloTable table, - String username) - { - requireNonNull(table, "table is null"); - - this.columns = table.getColumns(); - - // Fetch the row ID ordinal, throwing an exception if not found for safety - Optional ordinal = columns.stream() - .filter(columnHandle -> columnHandle.getName().equals(table.getRowId())) - .map(AccumuloColumnHandle::getOrdinal) - .findAny(); - - if (!ordinal.isPresent()) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Row ID ordinal not found"); - } - - this.rowIdOrdinal = ordinal.get(); - this.serializer = table.getSerializerInstance(); - - try { - // Create a BatchWriter to the Accumulo table - BatchWriterConfig conf = new BatchWriterConfig(); - writer = connector.createBatchWriter(table.getFullTableName(), conf); - - // If the table is indexed, create an instance of an Indexer, else empty - if (table.isIndexed()) { - indexer = Optional.of( - new Indexer( - connector, - connector.securityOperations().getUserAuthorizations(username), - table, - conf)); - } - else { - indexer = Optional.empty(); - } - } - catch (AccumuloException | AccumuloSecurityException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Accumulo error when creating BatchWriter and/or Indexer", e); - } - catch (TableNotFoundException e) { - throw new PrestoException(ACCUMULO_TABLE_DNE, "Accumulo error when creating BatchWriter and/or Indexer, table does not exist", e); - } - } - - /** - * Converts a {@link Row} to an Accumulo mutation. - * - * @param row Row object - * @param rowIdOrdinal Ordinal in the list of columns that is the row ID. This isn't checked at all, so I hope you're right. Also, it is expected that the list of column handles is sorted in ordinal order. This is a very demanding function. - * @param columns All column handles for the Row, sorted by ordinal. - * @param serializer Instance of {@link AccumuloRowSerializer} used to encode the values of the row to the Mutation - * @return Mutation - */ - public static Mutation toMutation(Row row, int rowIdOrdinal, List columns, AccumuloRowSerializer serializer) - { - // Set our value to the row ID - Text value = new Text(); - Field rowField = row.getField(rowIdOrdinal); - if (rowField.isNull()) { - throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Column mapped as the Accumulo row ID cannot be null"); - } - - setText(rowField, value, serializer); - - // Iterate through all the column handles, setting the Mutation's columns - Mutation mutation = new Mutation(value); - - // Store row ID in a special column - mutation.put(ROW_ID_COLUMN, ROW_ID_COLUMN, new Value(value.copyBytes())); - for (AccumuloColumnHandle columnHandle : columns) { - // Skip the row ID ordinal - if (columnHandle.getOrdinal() == rowIdOrdinal) { - continue; - } - - // If the value of the field is not null - if (!row.getField(columnHandle.getOrdinal()).isNull()) { - // Serialize the value to the text - setText(row.getField(columnHandle.getOrdinal()), value, serializer); - - // And add the bytes to the Mutation - mutation.put(columnHandle.getFamily().get(), columnHandle.getQualifier().get(), new Value(value.copyBytes())); - } - } - - return mutation; - } - - private static void setText(Field field, Text value, AccumuloRowSerializer serializer) - { - Type type = field.getType(); - if (Types.isArrayType(type)) { - serializer.setArray(value, type, field.getArray()); - } - else if (Types.isMapType(type)) { - serializer.setMap(value, type, field.getMap()); - } - else { - if (type.equals(BIGINT)) { - serializer.setLong(value, field.getLong()); - } - else if (type.equals(BOOLEAN)) { - serializer.setBoolean(value, field.getBoolean()); - } - else if (type.equals(DATE)) { - serializer.setDate(value, field.getDate()); - } - else if (type.equals(DOUBLE)) { - serializer.setDouble(value, field.getDouble()); - } - else if (type.equals(INTEGER)) { - serializer.setInt(value, field.getInt()); - } - else if (type.equals(REAL)) { - serializer.setFloat(value, field.getFloat()); - } - else if (type.equals(SMALLINT)) { - serializer.setShort(value, field.getShort()); - } - else if (type.equals(TIME)) { - serializer.setTime(value, field.getTime()); - } - else if (type.equals(TINYINT)) { - serializer.setByte(value, field.getByte()); - } - else if (type.equals(TIMESTAMP)) { - serializer.setTimestamp(value, field.getTimestamp()); - } - else if (type.equals(VARBINARY)) { - serializer.setVarbinary(value, field.getVarbinary()); - } - else if (type instanceof VarcharType) { - serializer.setVarchar(value, field.getVarchar()); - } - else { - throw new UnsupportedOperationException("Unsupported type " + type); - } - } - } - - @Override - public CompletableFuture appendPage(Page page) - { - // For each position within the page, i.e. row - for (int position = 0; position < page.getPositionCount(); ++position) { - Row row = new Row(); - // For each channel within the page, i.e. column - for (int channel = 0; channel < page.getChannelCount(); ++channel) { - // Get the type for this channel - Type type = columns.get(channel).getType(); - - // Read the value from the page and append the field to the row - row.addField(TypeUtils.readNativeValue(type, page.getBlock(channel), position), type); - } - - try { - // Convert row to a Mutation, writing and indexing it - Mutation mutation = toMutation(row, rowIdOrdinal, columns, serializer); - writer.addMutation(mutation); - if (indexer.isPresent()) { - indexer.get().index(mutation); - } - ++numRows; - } - catch (MutationsRejectedException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Mutation rejected by server", e); - } - - // TODO Fix arbitrary flush every 100k rows - if (numRows % 100_000 == 0) { - flush(); - } - } - - return NOT_BLOCKED; - } - - @Override - public CompletableFuture> finish() - { - try { - // Done serializing rows, so flush and close the writer and indexer - writer.flush(); - writer.close(); - if (indexer.isPresent()) { - indexer.get().close(); - } - } - catch (MutationsRejectedException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Mutation rejected by server on flush", e); - } - - // TODO Look into any use of the metadata for writing out the rows - return completedFuture(ImmutableList.of()); - } - - @Override - public void abort() - { - getFutureValue(finish()); - } - - private void flush() - { - try { - if (indexer.isPresent()) { - indexer.get().flush(); - // MetricsWriter is non-null if Indexer is present - } - writer.flush(); - } - catch (MutationsRejectedException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Mutation rejected by server on flush", e); - } - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloPageSinkProvider.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloPageSinkProvider.java deleted file mode 100644 index 7a75181cd6ac..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloPageSinkProvider.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.io; - -import io.prestosql.plugin.accumulo.AccumuloClient; -import io.prestosql.plugin.accumulo.conf.AccumuloConfig; -import io.prestosql.plugin.accumulo.model.AccumuloTableHandle; -import io.prestosql.spi.connector.ConnectorInsertTableHandle; -import io.prestosql.spi.connector.ConnectorOutputTableHandle; -import io.prestosql.spi.connector.ConnectorPageSink; -import io.prestosql.spi.connector.ConnectorPageSinkProvider; -import io.prestosql.spi.connector.ConnectorSession; -import io.prestosql.spi.connector.ConnectorTransactionHandle; -import org.apache.accumulo.core.client.Connector; - -import javax.inject.Inject; - -import static java.util.Objects.requireNonNull; - -/** - * Page sink provider for Accumulo connector. Creates {@link AccumuloPageSink} objects for output tables (CTAS) and inserts. - * - * @see AccumuloPageSink - */ -public class AccumuloPageSinkProvider - implements ConnectorPageSinkProvider -{ - private final AccumuloClient client; - private final Connector connector; - private final String username; - - @Inject - public AccumuloPageSinkProvider( - Connector connector, - AccumuloConfig config, - AccumuloClient client) - { - this.client = requireNonNull(client, "client is null"); - this.connector = requireNonNull(connector, "connector is null"); - this.username = requireNonNull(config, "config is null").getUsername(); - } - - @Override - public ConnectorPageSink createPageSink(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorOutputTableHandle outputTableHandle) - { - AccumuloTableHandle tableHandle = (AccumuloTableHandle) outputTableHandle; - return new AccumuloPageSink(connector, client.getTable(tableHandle.toSchemaTableName()), username); - } - - @Override - public ConnectorPageSink createPageSink(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorInsertTableHandle insertTableHandle) - { - return createPageSink(transactionHandle, session, (ConnectorOutputTableHandle) insertTableHandle); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordCursor.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordCursor.java deleted file mode 100644 index 6f1c4fd64b4e..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordCursor.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.io; - -import io.airlift.slice.Slice; -import io.airlift.slice.Slices; -import io.prestosql.plugin.accumulo.Types; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.RecordCursor; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.VarbinaryType; -import io.prestosql.spi.type.VarcharType; -import org.apache.accumulo.core.client.BatchScanner; -import org.apache.accumulo.core.client.IteratorSetting; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.iterators.FirstEntryInRowIterator; -import org.apache.accumulo.core.iterators.user.WholeRowIterator; -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.io.Text; - -import java.io.IOException; -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; - -import static com.google.common.base.Preconditions.checkArgument; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.IO_ERROR; -import static io.prestosql.plugin.accumulo.io.AccumuloPageSink.ROW_ID_COLUMN; -import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.BooleanType.BOOLEAN; -import static io.prestosql.spi.type.DateType.DATE; -import static io.prestosql.spi.type.DoubleType.DOUBLE; -import static io.prestosql.spi.type.IntegerType.INTEGER; -import static io.prestosql.spi.type.RealType.REAL; -import static io.prestosql.spi.type.SmallintType.SMALLINT; -import static io.prestosql.spi.type.TimeType.TIME; -import static io.prestosql.spi.type.TimestampType.TIMESTAMP; -import static io.prestosql.spi.type.TinyintType.TINYINT; -import static java.lang.String.format; -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -/** - * Implementation of Presto RecordCursor, responsible for iterating over a Presto split, - * reading rows of data and then implementing various methods to retrieve columns within each row. - * - * @see AccumuloRecordSet - * @see AccumuloRecordSetProvider - */ -public class AccumuloRecordCursor - implements RecordCursor -{ - private static final int WHOLE_ROW_ITERATOR_PRIORITY = Integer.MAX_VALUE; - - private final List columnHandles; - private final String[] fieldToColumnName; - private final BatchScanner scanner; - private final Iterator> iterator; - private final AccumuloRowSerializer serializer; - - private long bytesRead; - - public AccumuloRecordCursor( - AccumuloRowSerializer serializer, - BatchScanner scanner, - String rowIdName, - List columnHandles) - { - this.columnHandles = requireNonNull(columnHandles, "columnHandles is null"); - this.scanner = requireNonNull(scanner, "scanner is null"); - this.serializer = requireNonNull(serializer, "serializer is null"); - this.serializer.setRowIdName(requireNonNull(rowIdName, "rowIdName is null")); - - requireNonNull(columnHandles, "columnHandles is null"); - - if (retrieveOnlyRowIds(rowIdName)) { - this.scanner.addScanIterator(new IteratorSetting(1, "firstentryiter", FirstEntryInRowIterator.class)); - - fieldToColumnName = new String[1]; - fieldToColumnName[0] = rowIdName; - - // Set a flag on the serializer saying we are only going to be retrieving the row ID - this.serializer.setRowOnly(true); - } - else { - // Else, we will be scanning some more columns here - this.serializer.setRowOnly(false); - - // Fetch the reserved row ID column - this.scanner.fetchColumn(ROW_ID_COLUMN, ROW_ID_COLUMN); - - Text family = new Text(); - Text qualifier = new Text(); - - // Create an array which maps the column ordinal to the name of the column - fieldToColumnName = new String[columnHandles.size()]; - for (int i = 0; i < columnHandles.size(); ++i) { - AccumuloColumnHandle columnHandle = columnHandles.get(i); - fieldToColumnName[i] = columnHandle.getName(); - - // Make sure to skip the row ID! - if (!columnHandle.getName().equals(rowIdName)) { - // Set the mapping of presto column name to the family/qualifier - this.serializer.setMapping(columnHandle.getName(), columnHandle.getFamily().get(), columnHandle.getQualifier().get()); - - // Set our scanner to fetch this family/qualifier column - // This will help us prune which data we receive from Accumulo - family.set(columnHandle.getFamily().get()); - qualifier.set(columnHandle.getQualifier().get()); - this.scanner.fetchColumn(family, qualifier); - } - } - } - - IteratorSetting setting = new IteratorSetting(WHOLE_ROW_ITERATOR_PRIORITY, WholeRowIterator.class); - scanner.addScanIterator(setting); - - iterator = this.scanner.iterator(); - } - - @Override - public long getCompletedBytes() - { - return bytesRead; - } - - @Override - public long getReadTimeNanos() - { - return 0; - } - - @Override - public Type getType(int field) - { - checkArgument(field >= 0 && field < columnHandles.size(), "Invalid field index"); - return columnHandles.get(field).getType(); - } - - @Override - public boolean advanceNextPosition() - { - try { - if (iterator.hasNext()) { - serializer.reset(); - Entry row = iterator.next(); - for (Entry entry : WholeRowIterator.decodeRow(row.getKey(), row.getValue()).entrySet()) { - bytesRead += entry.getKey().getSize() + entry.getValue().getSize(); - serializer.deserialize(entry); - } - return true; - } - else { - return false; - } - } - catch (IOException e) { - throw new PrestoException(IO_ERROR, "Caught IO error from serializer on read", e); - } - } - - @Override - public boolean isNull(int field) - { - checkArgument(field < columnHandles.size(), "Invalid field index"); - return serializer.isNull(fieldToColumnName[field]); - } - - @Override - public boolean getBoolean(int field) - { - checkFieldType(field, BOOLEAN); - return serializer.getBoolean(fieldToColumnName[field]); - } - - @Override - public double getDouble(int field) - { - checkFieldType(field, DOUBLE); - return serializer.getDouble(fieldToColumnName[field]); - } - - @Override - public long getLong(int field) - { - checkFieldType(field, BIGINT, DATE, INTEGER, REAL, SMALLINT, TIME, TIMESTAMP, TINYINT); - Type type = getType(field); - if (type.equals(BIGINT)) { - return serializer.getLong(fieldToColumnName[field]); - } - else if (type.equals(DATE)) { - return MILLISECONDS.toDays(serializer.getDate(fieldToColumnName[field]).getTime()); - } - else if (type.equals(INTEGER)) { - return serializer.getInt(fieldToColumnName[field]); - } - else if (type.equals(REAL)) { - return Float.floatToIntBits(serializer.getFloat(fieldToColumnName[field])); - } - else if (type.equals(SMALLINT)) { - return serializer.getShort(fieldToColumnName[field]); - } - else if (type.equals(TIME)) { - return serializer.getTime(fieldToColumnName[field]).getTime(); - } - else if (type.equals(TIMESTAMP)) { - return serializer.getTimestamp(fieldToColumnName[field]).getTime(); - } - else if (type.equals(TINYINT)) { - return serializer.getByte(fieldToColumnName[field]); - } - else { - throw new PrestoException(NOT_SUPPORTED, "Unsupported type " + getType(field)); - } - } - - @Override - public Object getObject(int field) - { - Type type = getType(field); - checkArgument(Types.isArrayType(type) || Types.isMapType(type), "Expected field %s to be a type of array or map but is %s", field, type); - - if (Types.isArrayType(type)) { - return serializer.getArray(fieldToColumnName[field], type); - } - - return serializer.getMap(fieldToColumnName[field], type); - } - - @Override - public Slice getSlice(int field) - { - Type type = getType(field); - if (type instanceof VarbinaryType) { - return Slices.wrappedBuffer(serializer.getVarbinary(fieldToColumnName[field])); - } - else if (type instanceof VarcharType) { - return Slices.utf8Slice(serializer.getVarchar(fieldToColumnName[field])); - } - else { - throw new PrestoException(NOT_SUPPORTED, "Unsupported type " + type); - } - } - - @Override - public void close() - { - scanner.close(); - } - - /** - * Gets a Boolean value indicating whether or not the scanner should only return row IDs. - *

- * This can occur in cases such as SELECT COUNT(*) or the table only has one column. - * Presto doesn't need the entire contents of the row to count them, - * so we can configure Accumulo to only give us the first key/value pair in the row - * - * @param rowIdName Row ID column name - * @return True if scanner should retriev eonly row IDs, false otherwise - */ - private boolean retrieveOnlyRowIds(String rowIdName) - { - return columnHandles.isEmpty() || (columnHandles.size() == 1 && columnHandles.get(0).getName().equals(rowIdName)); - } - - /** - * Checks that the given field is one of the provided types. - * - * @param field Ordinal of the field - * @param expected An array of expected types - * @throws IllegalArgumentException If the given field does not match one of the types - */ - private void checkFieldType(int field, Type... expected) - { - Type actual = getType(field); - for (Type type : expected) { - if (actual.equals(type)) { - return; - } - } - - throw new IllegalArgumentException(format("Expected field %s to be a type of %s but is %s", field, StringUtils.join(expected, ","), actual)); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordSet.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordSet.java deleted file mode 100644 index 82cc65bca8fe..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordSet.java +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.io; - -import com.google.common.base.Splitter; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Iterables; -import io.airlift.log.Logger; -import io.prestosql.plugin.accumulo.conf.AccumuloSessionProperties; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.model.AccumuloSplit; -import io.prestosql.plugin.accumulo.model.AccumuloTableHandle; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.ConnectorSession; -import io.prestosql.spi.connector.RecordCursor; -import io.prestosql.spi.connector.RecordSet; -import io.prestosql.spi.type.Type; -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.BatchScanner; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.security.Authorizations; - -import java.util.List; -import java.util.Optional; - -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.UNEXPECTED_ACCUMULO_ERROR; -import static java.lang.String.format; -import static java.util.Objects.requireNonNull; - -/** - * Implementation of a Presto RecordSet, responsible for returning the column types and the RecordCursor to the framework. - * - * @see AccumuloRecordCursor - * @see AccumuloRecordSetProvider - */ -public class AccumuloRecordSet - implements RecordSet -{ - private static final Logger LOG = Logger.get(AccumuloRecordSet.class); - private static final Splitter COMMA_SPLITTER = Splitter.on(',').omitEmptyStrings().trimResults(); - - private final List columnHandles; - private final List columnTypes; - private final AccumuloRowSerializer serializer; - private final BatchScanner scanner; - private final String rowIdName; - - public AccumuloRecordSet( - Connector connector, - ConnectorSession session, - AccumuloSplit split, - String username, - AccumuloTableHandle table, - List columnHandles) - { - requireNonNull(session, "session is null"); - requireNonNull(split, "split is null"); - requireNonNull(username, "username is null"); - requireNonNull(table, "table is null"); - - rowIdName = table.getRowId(); - - serializer = table.getSerializerInstance(); - - // Save off the column handles and createa list of the Accumulo types - this.columnHandles = requireNonNull(columnHandles, "column handles is null"); - ImmutableList.Builder types = ImmutableList.builder(); - for (AccumuloColumnHandle column : columnHandles) { - types.add(column.getType()); - } - this.columnTypes = types.build(); - - try { - // Create the BatchScanner and set the ranges from the split - scanner = connector.createBatchScanner(table.getFullTableName(), getScanAuthorizations(session, table, connector, username), 10); - scanner.setRanges(split.getRanges()); - } - catch (Exception e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, format("Failed to create batch scanner for table %s", table.getFullTableName()), e); - } - } - - /** - * Gets the scanner authorizations to use for scanning tables. - *

- * In order of priority: session username authorizations, then table property, then the default connector auths. - * - * @param session Current session - * @param table Accumulo table - * @param connector Accumulo connector - * @param username Accumulo username - * @return Scan authorizations - * @throws AccumuloException If a generic Accumulo error occurs - * @throws AccumuloSecurityException If a security exception occurs - */ - private static Authorizations getScanAuthorizations(ConnectorSession session, AccumuloTableHandle table, Connector connector, String username) - throws AccumuloException, AccumuloSecurityException - { - String sessionScanUser = AccumuloSessionProperties.getScanUsername(session); - if (sessionScanUser != null) { - Authorizations scanAuths = connector.securityOperations().getUserAuthorizations(sessionScanUser); - LOG.debug("Using session scanner auths for user %s: %s", sessionScanUser, scanAuths); - return scanAuths; - } - - Optional scanAuths = table.getScanAuthorizations(); - if (scanAuths.isPresent()) { - Authorizations auths = new Authorizations(Iterables.toArray(COMMA_SPLITTER.split(scanAuths.get()), String.class)); - LOG.debug("scan_auths table property set: %s", auths); - return auths; - } - else { - Authorizations auths = connector.securityOperations().getUserAuthorizations(username); - LOG.debug("scan_auths table property not set, using user auths: %s", auths); - return auths; - } - } - - @Override - public List getColumnTypes() - { - return columnTypes; - } - - @Override - public RecordCursor cursor() - { - return new AccumuloRecordCursor(serializer, scanner, rowIdName, columnHandles); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordSetProvider.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordSetProvider.java deleted file mode 100644 index f71ddbf953df..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/io/AccumuloRecordSetProvider.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.io; - -import io.prestosql.plugin.accumulo.conf.AccumuloConfig; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.model.AccumuloSplit; -import io.prestosql.plugin.accumulo.model.AccumuloTableHandle; -import io.prestosql.spi.connector.ColumnHandle; -import io.prestosql.spi.connector.ConnectorRecordSetProvider; -import io.prestosql.spi.connector.ConnectorSession; -import io.prestosql.spi.connector.ConnectorSplit; -import io.prestosql.spi.connector.ConnectorTableHandle; -import io.prestosql.spi.connector.ConnectorTransactionHandle; -import io.prestosql.spi.connector.RecordSet; -import org.apache.accumulo.core.client.Connector; - -import javax.inject.Inject; - -import java.util.List; - -import static com.google.common.collect.ImmutableList.toImmutableList; -import static java.util.Objects.requireNonNull; - -/** - * Implementation of a ConnectorRecordSetProvider for Accumulo. Generates {@link AccumuloRecordSet} objects for a provided split. - * - * @see AccumuloRecordSet - * @see AccumuloRecordCursor - */ -public class AccumuloRecordSetProvider - implements ConnectorRecordSetProvider -{ - private final Connector connector; - private final String username; - - @Inject - public AccumuloRecordSetProvider( - Connector connector, - AccumuloConfig config) - { - this.connector = requireNonNull(connector, "connector is null"); - this.username = requireNonNull(config, "config is null").getUsername(); - } - - @Override - public RecordSet getRecordSet(ConnectorTransactionHandle transaction, ConnectorSession session, ConnectorSplit split, ConnectorTableHandle table, List columns) - { - AccumuloSplit accSplit = (AccumuloSplit) split; - AccumuloTableHandle accTable = (AccumuloTableHandle) table; - - List accColumns = columns.stream() - .map(AccumuloColumnHandle.class::cast) - .collect(toImmutableList()); - - return new AccumuloRecordSet(connector, session, accSplit, username, accTable, accColumns); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/iterators/MaxByteArrayCombiner.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/iterators/MaxByteArrayCombiner.java deleted file mode 100644 index d171a35cf99c..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/iterators/MaxByteArrayCombiner.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.iterators; - -import com.google.common.primitives.UnsignedBytes; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.iterators.Combiner; - -import java.util.Comparator; -import java.util.Iterator; - -/** - * A Combiner that does a lexicographic compare against values, returning the 'largest' value - */ -public class MaxByteArrayCombiner - extends Combiner -{ - private final Comparator comparator = UnsignedBytes.lexicographicalComparator(); - - @Override - public Value reduce(Key key, Iterator iter) - { - Value max = null; - while (iter.hasNext()) { - Value test = iter.next(); - if (max == null) { - max = new Value(test.get()); - } - else if (comparator.compare(test.get(), max.get()) > 0) { - max.set(test.get()); - } - } - return max; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/iterators/MinByteArrayCombiner.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/iterators/MinByteArrayCombiner.java deleted file mode 100644 index 517540d6a6d8..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/iterators/MinByteArrayCombiner.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.iterators; - -import com.google.common.primitives.UnsignedBytes; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.iterators.Combiner; - -import java.util.Comparator; -import java.util.Iterator; - -/** - * A Combiner that does a lexicographic compare against values, returning the 'smallest' value - */ -public class MinByteArrayCombiner - extends Combiner -{ - private final Comparator comparator = UnsignedBytes.lexicographicalComparator(); - - @Override - public Value reduce(Key key, Iterator iter) - { - Value min = null; - while (iter.hasNext()) { - Value test = iter.next(); - if (min == null) { - min = new Value(test.get()); - } - else if (comparator.compare(test.get(), min.get()) < 0) { - min.set(test.get()); - } - } - return min; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/AccumuloTable.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/AccumuloTable.java deleted file mode 100644 index c530b4e609be..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/AccumuloTable.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.metadata; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableList; -import io.prestosql.plugin.accumulo.index.Indexer; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.ColumnMetadata; -import io.prestosql.spi.connector.SchemaTableName; - -import java.lang.reflect.InvocationTargetException; -import java.util.List; -import java.util.Optional; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static io.prestosql.spi.StandardErrorCode.NOT_FOUND; -import static java.util.Objects.requireNonNull; - -/** - * This class encapsulates metadata regarding an Accumulo table in Presto. - */ -public class AccumuloTable -{ - private final boolean external; - private final Integer rowIdOrdinal; - private final String schema; - private final String serializerClassName; - private final Optional scanAuthorizations; - private final List columnsMetadata; - private final boolean indexed; - private final List columns; - private final String rowId; - private final String table; - private final SchemaTableName schemaTableName; - - @JsonCreator - public AccumuloTable( - @JsonProperty("schema") String schema, - @JsonProperty("table") String table, - @JsonProperty("columns") List columns, - @JsonProperty("rowId") String rowId, - @JsonProperty("external") boolean external, - @JsonProperty("serializerClassName") String serializerClassName, - @JsonProperty("scanAuthorizations") Optional scanAuthorizations) - { - this.external = external; - this.rowId = requireNonNull(rowId, "rowId is null"); - this.schema = requireNonNull(schema, "schema is null"); - this.table = requireNonNull(table, "table is null"); - this.columns = ImmutableList.copyOf(requireNonNull(columns, "columns are null")); - this.serializerClassName = requireNonNull(serializerClassName, "serializerClassName is null"); - this.scanAuthorizations = scanAuthorizations; - - boolean indexed = false; - Optional rowIdOrdinal = Optional.empty(); - - // Extract the ColumnMetadata from the handles for faster access - ImmutableList.Builder columnMetadataBuilder = ImmutableList.builder(); - for (AccumuloColumnHandle column : this.columns) { - columnMetadataBuilder.add(column.getColumnMetadata()); - indexed |= column.isIndexed(); - if (column.getName().equals(this.rowId)) { - rowIdOrdinal = Optional.of(column.getOrdinal()); - } - } - - if (rowIdOrdinal.isPresent()) { - this.rowIdOrdinal = rowIdOrdinal.get(); - } - else { - throw new IllegalArgumentException("rowIdOrdinal is null, enable to locate rowId in given column list"); - } - - this.indexed = indexed; - this.columnsMetadata = columnMetadataBuilder.build(); - this.schemaTableName = new SchemaTableName(this.schema, this.table); - } - - @JsonProperty - public String getRowId() - { - return rowId; - } - - @JsonProperty - public String getSchema() - { - return schema; - } - - @JsonProperty - public String getTable() - { - return table; - } - - @JsonIgnore - public String getIndexTableName() - { - return Indexer.getIndexTableName(schema, table); - } - - @JsonIgnore - public String getMetricsTableName() - { - return Indexer.getMetricsTableName(schema, table); - } - - @JsonIgnore - public String getFullTableName() - { - return getFullTableName(schema, table); - } - - @JsonProperty - public List getColumns() - { - return columns; - } - - @JsonProperty - public Optional getScanAuthorizations() - { - return scanAuthorizations; - } - - @JsonProperty - public String getSerializerClassName() - { - return serializerClassName; - } - - @JsonIgnore - public List getColumnsMetadata() - { - return columnsMetadata; - } - - @JsonProperty - public boolean isExternal() - { - return external; - } - - @JsonIgnore - public boolean isIndexed() - { - return indexed; - } - - @JsonIgnore - public int getRowIdOrdinal() - { - return this.rowIdOrdinal; - } - - @JsonIgnore - public AccumuloRowSerializer getSerializerInstance() - { - try { - return (AccumuloRowSerializer) Class.forName(serializerClassName).getConstructor().newInstance(); - } - catch (ClassNotFoundException | InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException e) { - throw new PrestoException(NOT_FOUND, "Configured serializer class not found", e); - } - } - - @JsonIgnore - public static String getFullTableName(String schema, String table) - { - return schema.equals("default") ? table : schema + '.' + table; - } - - @JsonIgnore - public static String getFullTableName(SchemaTableName tableName) - { - return getFullTableName(tableName.getSchemaName(), tableName.getTableName()); - } - - @JsonIgnore - public SchemaTableName getSchemaTableName() - { - return schemaTableName; - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("schemaName", schema) - .add("tableName", table) - .add("columns", columns) - .add("rowIdName", rowId) - .add("external", external) - .add("serializerClassName", serializerClassName) - .add("scanAuthorizations", scanAuthorizations) - .toString(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/AccumuloView.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/AccumuloView.java deleted file mode 100644 index 551e9449524f..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/AccumuloView.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.metadata; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import io.prestosql.spi.connector.SchemaTableName; - -import java.util.Objects; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static java.util.Objects.requireNonNull; - -/** - * This class encapsulates metadata regarding an Accumulo view in Presto. - */ -public class AccumuloView -{ - private final String schema; - private final String table; - private final String data; - private final SchemaTableName schemaTableName; - - @JsonCreator - public AccumuloView( - @JsonProperty("schema") String schema, - @JsonProperty("table") String table, - @JsonProperty("data") String data) - { - this.schema = requireNonNull(schema, "schema is null"); - this.table = requireNonNull(table, "table is null"); - this.data = requireNonNull(data, "data is null"); - this.schemaTableName = new SchemaTableName(schema, table); - } - - @JsonProperty - public String getSchema() - { - return schema; - } - - @JsonProperty - public String getTable() - { - return table; - } - - @JsonProperty - public String getData() - { - return data; - } - - @JsonIgnore - public SchemaTableName getSchemaTableName() - { - return schemaTableName; - } - - @Override - public int hashCode() - { - return Objects.hash(schema, table, data); - } - - @Override - public boolean equals(Object obj) - { - if (this == obj) { - return true; - } - if ((obj == null) || (getClass() != obj.getClass())) { - return false; - } - - AccumuloView other = (AccumuloView) obj; - return Objects.equals(this.schema, other.schema) - && Objects.equals(this.table, other.table) - && Objects.equals(this.data, other.data); - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("schema", schema) - .add("table", table) - .add("data", data) - .toString(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/ZooKeeperMetadataManager.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/ZooKeeperMetadataManager.java deleted file mode 100644 index e582cadc32e6..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/metadata/ZooKeeperMetadataManager.java +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.metadata; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import io.airlift.json.ObjectMapperProvider; -import io.prestosql.plugin.accumulo.AccumuloModule; -import io.prestosql.plugin.accumulo.conf.AccumuloConfig; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.SchemaTableName; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.TypeManager; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.retry.RetryForever; -import org.apache.zookeeper.KeeperException; - -import javax.inject.Inject; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Locale; -import java.util.Set; - -import static com.google.common.collect.ImmutableSet.toImmutableSet; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.ZOOKEEPER_ERROR; -import static java.lang.String.format; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Objects.requireNonNull; -import static org.apache.zookeeper.KeeperException.Code.NONODE; - -public class ZooKeeperMetadataManager -{ - private static final String DEFAULT_SCHEMA = "default"; - - private final CuratorFramework curator; - private final ObjectMapper mapper; - - @Inject - public ZooKeeperMetadataManager(AccumuloConfig config, TypeManager typeManager) - { - requireNonNull(typeManager, "typeManager is null"); - - // Create JSON deserializer for the AccumuloTable - ObjectMapperProvider objectMapperProvider = new ObjectMapperProvider(); - objectMapperProvider.setJsonDeserializers(ImmutableMap.of(Type.class, new AccumuloModule.TypeDeserializer(typeManager))); - mapper = objectMapperProvider.get(); - - String zkMetadataRoot = config.getZkMetadataRoot(); - String zookeepers = config.getZooKeepers(); - - // Create the connection to ZooKeeper to check if the metadata root exists - CuratorFramework checkRoot = CuratorFrameworkFactory.newClient(zookeepers, new RetryForever(1000)); - checkRoot.start(); - - try { - // If the metadata root does not exist, create it - if (checkRoot.checkExists().forPath(zkMetadataRoot) == null) { - checkRoot.create().forPath(zkMetadataRoot); - } - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "ZK error checking metadata root", e); - } - checkRoot.close(); - - // Create the curator client framework to use for metadata management, set at the ZK root - curator = CuratorFrameworkFactory.newClient(zookeepers + zkMetadataRoot, new RetryForever(1000)); - curator.start(); - - try { - // Create default schema should it not exist - if (curator.checkExists().forPath("/" + DEFAULT_SCHEMA) == null) { - curator.create().forPath("/" + DEFAULT_SCHEMA); - } - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "ZK error checking/creating default schema", e); - } - } - - public Set getSchemaNames() - { - try { - Set schemas = new HashSet<>(); - schemas.addAll(curator.getChildren().forPath("/")); - return schemas; - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "Error fetching schemas", e); - } - } - - public Set getTableNames(String schema) - { - String schemaPath = getSchemaPath(schema); - try { - if (curator.checkExists().forPath(schemaPath) == null) { - return ImmutableSet.of(); - } - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "Error checking if schema exists", e); - } - - try { - return curator.getChildren().forPath(schemaPath).stream() - .filter(x -> isAccumuloTable(new SchemaTableName(schema, x))) - .collect(toImmutableSet()); - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "Error fetching schemas", e); - } - } - - public AccumuloTable getTable(SchemaTableName stName) - { - try { - if (curator.checkExists().forPath(getTablePath(stName)) != null) { - return toAccumuloTable(curator.getData().forPath(getTablePath(stName))); - } - - return null; - } - catch (Exception e) { - // Capture race condition between checkExists and getData - if (e instanceof KeeperException && ((KeeperException) e).code() == NONODE) { - return null; - } - - throw new PrestoException(ZOOKEEPER_ERROR, "Error fetching table", e); - } - } - - public Set getViewNames(String schema) - { - String schemaPath = getSchemaPath(schema); - try { - if (curator.checkExists().forPath(schemaPath) == null) { - return ImmutableSet.of(); - } - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "Error checking if schema exists", e); - } - - try { - return curator.getChildren().forPath(schemaPath).stream() - .filter(x -> isAccumuloView(new SchemaTableName(schema, x))) - .collect(toImmutableSet()); - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "Error fetching schemas", e); - } - } - - public AccumuloView getView(SchemaTableName stName) - { - try { - String tablePath = getTablePath(stName); - if (curator.checkExists().forPath(tablePath) != null) { - byte[] data = curator.getData().forPath(tablePath); - if (isAccumuloView(data)) { - return toAccumuloView(data); - } - } - - return null; - } - catch (Exception e) { - // Capture race condition between checkExists and getData - if (e instanceof KeeperException && ((KeeperException) e).code() == NONODE) { - return null; - } - - throw new PrestoException(ZOOKEEPER_ERROR, "Error fetching view", e); - } - } - - public void createTableMetadata(AccumuloTable table) - { - SchemaTableName tableName = table.getSchemaTableName(); - String tablePath = getTablePath(tableName); - try { - if (curator.checkExists().forPath(tablePath) != null) { - throw new IOException(format("Metadata for table %s already exists", tableName)); - } - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "ZK error when checking if table already exists", e); - } - - try { - curator.create().creatingParentsIfNeeded().forPath(tablePath, toJsonBytes(table)); - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "Error creating table znode in ZooKeeper", e); - } - } - - public void deleteTableMetadata(SchemaTableName tableName) - { - try { - curator.delete().deletingChildrenIfNeeded().forPath(getTablePath(tableName)); - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "ZK error when deleting table metadata", e); - } - } - - public void createViewMetadata(AccumuloView view) - { - SchemaTableName tableName = view.getSchemaTableName(); - String viewPath = getTablePath(tableName); - try { - if (curator.checkExists().forPath(viewPath) != null) { - throw new IOException(format("Metadata for view %s already exists", tableName)); - } - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "ZK error when checking if view already exists", e); - } - - try { - curator.create().creatingParentsIfNeeded().forPath(viewPath, toJsonBytes(view)); - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "Error creating view znode in ZooKeeper", e); - } - } - - public void deleteViewMetadata(SchemaTableName tableName) - { - try { - curator.delete().deletingChildrenIfNeeded().forPath(getTablePath(tableName)); - } - catch (Exception e) { - throw new PrestoException(ZOOKEEPER_ERROR, "ZK error when deleting view metadata", e); - } - } - - private static String getSchemaPath(String schema) - { - return "/" + schema.toLowerCase(Locale.ENGLISH); - } - - private static String getSchemaPath(SchemaTableName tableName) - { - return getSchemaPath(tableName.getSchemaName()); - } - - private static String getTablePath(SchemaTableName tableName) - { - return getSchemaPath(tableName) + '/' + tableName.getTableName().toLowerCase(Locale.ENGLISH); - } - - private boolean isAccumuloTable(SchemaTableName tableName) - { - try { - String path = getTablePath(tableName); - return curator.checkExists().forPath(path) != null && isAccumuloTable(curator.getData().forPath(path)); - } - catch (Exception e) { - // Capture race condition between checkExists and getData - if (e instanceof KeeperException && ((KeeperException) e).code() == NONODE) { - return false; - } - - throw new PrestoException(ZOOKEEPER_ERROR, "Error checking if path %s is an AccumuloTable object", e); - } - } - - private boolean isAccumuloView(SchemaTableName tableName) - { - try { - String path = getTablePath(tableName); - return curator.checkExists().forPath(path) != null && isAccumuloView(curator.getData().forPath(path)); - } - catch (Exception e) { - // Capture race condition between checkExists and getData - if (e instanceof KeeperException && ((KeeperException) e).code() == NONODE) { - return false; - } - - throw new PrestoException(ZOOKEEPER_ERROR, "Error checking if path is an AccumuloView object", e); - } - } - - private boolean isAccumuloTable(byte[] data) - throws IOException - { - // AccumuloTable does not contain a 'data' node - return !mapper.reader().readTree(new String(data, UTF_8)).has("data"); - } - - private boolean isAccumuloView(byte[] data) - throws IOException - { - // AccumuloView contains a 'data' node - return mapper.reader().readTree(new String(data, UTF_8)).has("data"); - } - - private AccumuloTable toAccumuloTable(byte[] data) - throws IOException - { - return mapper.readValue(new String(data, UTF_8), AccumuloTable.class); - } - - private AccumuloView toAccumuloView(byte[] data) - throws IOException - { - return mapper.readValue(new String(data, UTF_8), AccumuloView.class); - } - - private byte[] toJsonBytes(Object obj) - throws IOException - { - return mapper.writeValueAsBytes(obj); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloColumnConstraint.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloColumnConstraint.java deleted file mode 100644 index 7f4100982415..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloColumnConstraint.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import io.prestosql.spi.predicate.Domain; - -import java.util.Objects; -import java.util.Optional; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static java.util.Objects.requireNonNull; - -public class AccumuloColumnConstraint -{ - private final String name; - private final String family; - private final String qualifier; - private final boolean indexed; - private final Optional domain; - - @JsonCreator - public AccumuloColumnConstraint( - @JsonProperty("name") String name, - @JsonProperty("family") String family, - @JsonProperty("qualifier") String qualifier, - @JsonProperty("domain") Optional domain, - @JsonProperty("indexed") boolean indexed) - { - this.name = requireNonNull(name, "name is null"); - this.family = requireNonNull(family, "family is null"); - this.qualifier = requireNonNull(qualifier, "qualifier is null"); - this.indexed = indexed; - this.domain = requireNonNull(domain, "domain is null"); - } - - @JsonProperty - public boolean isIndexed() - { - return indexed; - } - - @JsonProperty - public String getName() - { - return name; - } - - @JsonProperty - public String getFamily() - { - return family; - } - - @JsonProperty - public String getQualifier() - { - return qualifier; - } - - @JsonProperty - public Optional getDomain() - { - return domain; - } - - @Override - public int hashCode() - { - return Objects.hash(name, family, qualifier, domain, indexed); - } - - @Override - public boolean equals(Object obj) - { - if (this == obj) { - return true; - } - - if ((obj == null) || (getClass() != obj.getClass())) { - return false; - } - - AccumuloColumnConstraint other = (AccumuloColumnConstraint) obj; - return Objects.equals(this.name, other.name) - && Objects.equals(this.family, other.family) - && Objects.equals(this.qualifier, other.qualifier) - && Objects.equals(this.domain, other.domain) - && Objects.equals(this.indexed, other.indexed); - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("name", this.name) - .add("family", this.family) - .add("qualifier", this.qualifier) - .add("indexed", this.indexed) - .add("domain", this.domain) - .toString(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloColumnHandle.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloColumnHandle.java deleted file mode 100644 index 58e11f37079f..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloColumnHandle.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import io.prestosql.spi.connector.ColumnHandle; -import io.prestosql.spi.connector.ColumnMetadata; -import io.prestosql.spi.type.Type; - -import java.util.Objects; -import java.util.Optional; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static com.google.common.base.Preconditions.checkArgument; -import static java.util.Objects.requireNonNull; - -public final class AccumuloColumnHandle - implements ColumnHandle, Comparable -{ - private final boolean indexed; - private final Optional family; - private final Optional qualifier; - private final Type type; - private final String comment; - private final String name; - private final int ordinal; - - @JsonCreator - public AccumuloColumnHandle( - @JsonProperty("name") String name, - @JsonProperty("family") Optional family, - @JsonProperty("qualifier") Optional qualifier, - @JsonProperty("type") Type type, - @JsonProperty("ordinal") int ordinal, - @JsonProperty("comment") String comment, - @JsonProperty("indexed") boolean indexed) - { - this.name = requireNonNull(name, "name is null"); - this.family = requireNonNull(family, "family is null"); - this.qualifier = requireNonNull(qualifier, "qualifier is null"); - this.type = requireNonNull(type, "type is null"); - checkArgument(ordinal >= 0, "ordinal must be >= zero"); - this.ordinal = ordinal; - - this.comment = requireNonNull(comment, "comment is null"); - this.indexed = indexed; - } - - @JsonProperty - public String getName() - { - return name; - } - - @JsonProperty - public Optional getFamily() - { - return family; - } - - @JsonProperty - public Optional getQualifier() - { - return qualifier; - } - - @JsonProperty - public Type getType() - { - return type; - } - - @JsonProperty - public int getOrdinal() - { - return ordinal; - } - - @JsonProperty - public String getComment() - { - return comment; - } - - @JsonIgnore - public ColumnMetadata getColumnMetadata() - { - return ColumnMetadata.builder() - .setName(name) - .setType(type) - .setComment(Optional.ofNullable(comment)) - .build(); - } - - @JsonProperty - public boolean isIndexed() - { - return indexed; - } - - @Override - public int hashCode() - { - return Objects.hash(indexed, name, family, qualifier, type, ordinal, comment); - } - - @Override - public boolean equals(Object obj) - { - if (this == obj) { - return true; - } - - if ((obj == null) || (getClass() != obj.getClass())) { - return false; - } - - AccumuloColumnHandle other = (AccumuloColumnHandle) obj; - return Objects.equals(this.indexed, other.indexed) - && Objects.equals(this.name, other.name) - && Objects.equals(this.family, other.family) - && Objects.equals(this.qualifier, other.qualifier) - && Objects.equals(this.type, other.type) - && Objects.equals(this.ordinal, other.ordinal) - && Objects.equals(this.comment, other.comment); - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("name", name) - .add("columnFamily", family.orElse(null)) - .add("columnQualifier", qualifier.orElse(null)) - .add("type", type) - .add("ordinal", ordinal) - .add("comment", comment) - .add("indexed", indexed) - .toString(); - } - - @Override - public int compareTo(AccumuloColumnHandle obj) - { - return Integer.compare(this.getOrdinal(), obj.getOrdinal()); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloSplit.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloSplit.java deleted file mode 100644 index f6feb0d040b3..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloSplit.java +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableList; -import io.prestosql.spi.HostAddress; -import io.prestosql.spi.connector.ConnectorSplit; -import org.apache.accumulo.core.data.Range; - -import java.util.List; -import java.util.Optional; -import java.util.stream.Collectors; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static java.util.Objects.requireNonNull; - -public class AccumuloSplit - implements ConnectorSplit -{ - private final Optional hostPort; - private final List addresses; - private final List ranges; - - @JsonCreator - public AccumuloSplit( - @JsonProperty("ranges") List ranges, - @JsonProperty("hostPort") Optional hostPort) - { - this.hostPort = requireNonNull(hostPort, "hostPort is null"); - this.ranges = ImmutableList.copyOf(requireNonNull(ranges, "ranges is null")); - - // Parse the host address into a list of addresses, this would be an Accumulo Tablet server or some localhost thing - if (hostPort.isPresent()) { - addresses = ImmutableList.of(HostAddress.fromString(hostPort.get())); - } - else { - addresses = ImmutableList.of(); - } - } - - @JsonProperty - public Optional getHostPort() - { - return hostPort; - } - - @JsonProperty("ranges") - public List getWrappedRanges() - { - return ranges; - } - - @JsonIgnore - public List getRanges() - { - return ranges.stream().map(WrappedRange::getRange).collect(Collectors.toList()); - } - - @Override - public boolean isRemotelyAccessible() - { - return true; - } - - @Override - public List getAddresses() - { - return addresses; - } - - @Override - public Object getInfo() - { - return this; - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("addresses", addresses) - .add("numRanges", ranges.size()) - .add("hostPort", hostPort) - .toString(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloTableHandle.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloTableHandle.java deleted file mode 100644 index 3019a71a59de..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/AccumuloTableHandle.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonIgnore; -import com.fasterxml.jackson.annotation.JsonProperty; -import io.prestosql.plugin.accumulo.metadata.AccumuloTable; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.connector.ColumnHandle; -import io.prestosql.spi.connector.ConnectorInsertTableHandle; -import io.prestosql.spi.connector.ConnectorOutputTableHandle; -import io.prestosql.spi.connector.ConnectorTableHandle; -import io.prestosql.spi.connector.SchemaTableName; -import io.prestosql.spi.predicate.TupleDomain; - -import java.util.Objects; -import java.util.Optional; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static io.prestosql.spi.StandardErrorCode.NOT_FOUND; -import static java.util.Objects.requireNonNull; - -public final class AccumuloTableHandle - implements ConnectorInsertTableHandle, ConnectorOutputTableHandle, ConnectorTableHandle -{ - private final boolean external; - private final String rowId; - private final Optional scanAuthorizations; - private final String schema; - private final String serializerClassName; - private final String table; - private final TupleDomain constraint; - - public AccumuloTableHandle( - String schema, - String table, - String rowId, - boolean external, - String serializerClassName, - Optional scanAuthorizations) - { - this(schema, table, rowId, TupleDomain.all(), external, serializerClassName, scanAuthorizations); - } - - @JsonCreator - public AccumuloTableHandle( - @JsonProperty("schema") String schema, - @JsonProperty("table") String table, - @JsonProperty("rowId") String rowId, - @JsonProperty("constraint") TupleDomain constraint, - @JsonProperty("external") boolean external, - @JsonProperty("serializerClassName") String serializerClassName, - @JsonProperty("scanAuthorizations") Optional scanAuthorizations) - { - this.external = external; - this.rowId = requireNonNull(rowId, "rowId is null"); - this.scanAuthorizations = scanAuthorizations; - this.schema = requireNonNull(schema, "schema is null"); - this.serializerClassName = requireNonNull(serializerClassName, "serializerClassName is null"); - this.table = requireNonNull(table, "table is null"); - this.constraint = requireNonNull(constraint, "constraints is null"); - } - - @JsonProperty - public String getRowId() - { - return rowId; - } - - @JsonProperty - public Optional getScanAuthorizations() - { - return scanAuthorizations; - } - - @JsonProperty - public String getSchema() - { - return schema; - } - - @JsonProperty - public String getSerializerClassName() - { - return serializerClassName; - } - - @JsonIgnore - public AccumuloRowSerializer getSerializerInstance() - { - try { - return (AccumuloRowSerializer) Class.forName(serializerClassName).getConstructor().newInstance(); - } - catch (Exception e) { - throw new PrestoException(NOT_FOUND, "Configured serializer class not found", e); - } - } - - @JsonProperty - public String getTable() - { - return table; - } - - @JsonProperty - public boolean isExternal() - { - return external; - } - - @JsonProperty - public TupleDomain getConstraint() - { - return constraint; - } - - public SchemaTableName toSchemaTableName() - { - return new SchemaTableName(schema, table); - } - - @JsonIgnore - public String getFullTableName() - { - return AccumuloTable.getFullTableName(schema, table); - } - - @Override - public int hashCode() - { - return Objects.hash(schema, table, rowId, external, serializerClassName); - } - - @Override - public boolean equals(Object obj) - { - if (this == obj) { - return true; - } - - if ((obj == null) || (getClass() != obj.getClass())) { - return false; - } - - AccumuloTableHandle other = (AccumuloTableHandle) obj; - return Objects.equals(this.schema, other.schema) - && Objects.equals(this.table, other.table) - && Objects.equals(this.rowId, other.rowId) - && Objects.equals(this.external, other.external) - && Objects.equals(this.serializerClassName, other.serializerClassName) - && Objects.equals(this.scanAuthorizations, other.scanAuthorizations); - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("schema", schema) - .add("table", table) - .add("rowId", rowId) - .add("internal", external) - .add("serializerClassName", serializerClassName) - .add("scanAuthorizations", scanAuthorizations) - .toString(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/Field.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/Field.java deleted file mode 100644 index 7513e287cc97..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/Field.java +++ /dev/null @@ -1,546 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import io.airlift.slice.Slice; -import io.prestosql.plugin.accumulo.Types; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.block.ArrayBlock; -import io.prestosql.spi.block.Block; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.VarcharType; - -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Arrays; -import java.util.Calendar; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Objects; - -import static io.prestosql.spi.StandardErrorCode.FUNCTION_IMPLEMENTATION_ERROR; -import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.BooleanType.BOOLEAN; -import static io.prestosql.spi.type.DateType.DATE; -import static io.prestosql.spi.type.DoubleType.DOUBLE; -import static io.prestosql.spi.type.IntegerType.INTEGER; -import static io.prestosql.spi.type.RealType.REAL; -import static io.prestosql.spi.type.SmallintType.SMALLINT; -import static io.prestosql.spi.type.TimeType.TIME; -import static io.prestosql.spi.type.TimestampType.TIMESTAMP; -import static io.prestosql.spi.type.TinyintType.TINYINT; -import static io.prestosql.spi.type.VarbinaryType.VARBINARY; -import static io.prestosql.spi.type.VarcharType.VARCHAR; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Objects.requireNonNull; -import static java.util.concurrent.TimeUnit.DAYS; - -public class Field -{ - private final Object value; - private final Type type; - private final boolean indexed; - - public Field(Object value, Type type) - { - this(value, type, false); - } - - public Field(Object value, Type type, boolean indexed) - { - this.value = cleanObject(value, type); - this.type = requireNonNull(type, "type is null"); - this.indexed = indexed; - } - - public Field(Field field) - { - this.type = field.type; - this.indexed = false; - - if (Types.isArrayType(this.type) || Types.isMapType(this.type)) { - this.value = field.value; - return; - } - - if (type.equals(BIGINT)) { - this.value = field.getLong(); - } - else if (type.equals(BOOLEAN)) { - this.value = field.getBoolean(); - } - else if (type.equals(DATE)) { - this.value = new Date(field.getDate().getTime()); - } - else if (type.equals(DOUBLE)) { - this.value = field.getDouble(); - } - else if (type.equals(INTEGER)) { - this.value = field.getInt(); - } - else if (type.equals(REAL)) { - this.value = field.getFloat(); - } - else if (type.equals(SMALLINT)) { - this.value = field.getShort(); - } - else if (type.equals(TIME)) { - this.value = new Time(field.getTime().getTime()); - } - else if (type.equals(TIMESTAMP)) { - this.value = new Timestamp(field.getTimestamp().getTime()); - } - else if (type.equals(TINYINT)) { - this.value = field.getByte(); - } - else if (type.equals(VARBINARY)) { - this.value = Arrays.copyOf(field.getVarbinary(), field.getVarbinary().length); - } - else if (type.equals(VARCHAR)) { - this.value = field.getVarchar(); - } - else { - throw new PrestoException(NOT_SUPPORTED, "Unsupported type " + type); - } - } - - public Type getType() - { - return type; - } - - public Block getArray() - { - return (Block) value; - } - - public Long getLong() - { - return (Long) value; - } - - public Boolean getBoolean() - { - return (Boolean) value; - } - - public Byte getByte() - { - return (Byte) value; - } - - public Date getDate() - { - return (Date) value; - } - - public Double getDouble() - { - return (Double) value; - } - - public Float getFloat() - { - return (Float) value; - } - - public Integer getInt() - { - return (Integer) value; - } - - public Block getMap() - { - return (Block) value; - } - - public Object getObject() - { - return value; - } - - public Short getShort() - { - return (Short) value; - } - - public Timestamp getTimestamp() - { - return (Timestamp) value; - } - - public Time getTime() - { - return (Time) value; - } - - public byte[] getVarbinary() - { - return (byte[]) value; - } - - public String getVarchar() - { - return (String) value; - } - - public boolean isIndexed() - { - return indexed; - } - - public boolean isNull() - { - return value == null; - } - - @Override - public int hashCode() - { - return Objects.hash(value, type, indexed); - } - - @Override - public boolean equals(Object obj) - { - boolean retval = true; - if (obj instanceof Field) { - Field field = (Field) obj; - if (type.equals(field.getType())) { - if (this.isNull() && field.isNull()) { - retval = true; - } - else if (this.isNull() != field.isNull()) { - retval = false; - } - else if (type.equals(VARBINARY)) { - // special case for byte arrays - // aren't they so fancy - retval = Arrays.equals((byte[]) value, (byte[]) field.getObject()); - } - else if (type.equals(DATE) || type.equals(TIME) || type.equals(TIMESTAMP)) { - retval = value.toString().equals(field.getObject().toString()); - } - else { - if (value instanceof Block) { - retval = equals((Block) value, (Block) field.getObject()); - } - else { - retval = value.equals(field.getObject()); - } - } - } - } - return retval; - } - - private static boolean equals(Block block1, Block block2) - { - boolean retval = block1.getPositionCount() == block2.getPositionCount(); - for (int i = 0; i < block1.getPositionCount() && retval; ++i) { - if (block1 instanceof ArrayBlock && block2 instanceof ArrayBlock) { - retval = equals(block1.getObject(i, Block.class), block2.getObject(i, Block.class)); - } - else { - retval = block1.compareTo(i, 0, block1.getSliceLength(i), block2, i, 0, block2.getSliceLength(i)) == 0; - } - } - return retval; - } - - @Override - public String toString() - { - if (value == null) { - return "null"; - } - - if (Types.isArrayType(type)) { - Type elementType = Types.getElementType(type); - StringBuilder builder = new StringBuilder("ARRAY ["); - for (Object element : AccumuloRowSerializer.getArrayFromBlock(elementType, this.getArray())) { - if (Types.isArrayType(elementType)) { - Type elementElementType = Types.getElementType(elementType); - builder.append( - new Field( - AccumuloRowSerializer.getBlockFromArray(elementElementType, (List) element), - elementType)) - .append(','); - } - else if (Types.isMapType(elementType)) { - builder.append( - new Field( - AccumuloRowSerializer.getBlockFromMap(elementType, (Map) element), - elementType)) - .append(','); - } - else { - builder.append(new Field(element, elementType)) - .append(','); - } - } - - return builder.deleteCharAt(builder.length() - 1).append("]").toString(); - } - - if (Types.isMapType(type)) { - StringBuilder builder = new StringBuilder("MAP("); - StringBuilder keys = new StringBuilder("ARRAY ["); - StringBuilder values = new StringBuilder("ARRAY ["); - for (Entry entry : AccumuloRowSerializer - .getMapFromBlock(type, this.getMap()).entrySet()) { - Type keyType = Types.getKeyType(type); - if (Types.isArrayType(keyType)) { - keys.append( - new Field( - AccumuloRowSerializer.getBlockFromArray(Types.getElementType(keyType), (List) entry.getKey()), - keyType)) - .append(','); - } - else if (Types.isMapType(keyType)) { - keys.append( - new Field( - AccumuloRowSerializer.getBlockFromMap(keyType, (Map) entry.getKey()), - keyType)) - .append(','); - } - else { - keys.append(new Field(entry.getKey(), keyType)) - .append(','); - } - - Type valueType = Types.getValueType(type); - if (Types.isArrayType(valueType)) { - values.append( - new Field(AccumuloRowSerializer.getBlockFromArray(Types.getElementType(valueType), - (List) entry.getValue()), valueType)) - .append(','); - } - else if (Types.isMapType(valueType)) { - values.append( - new Field( - AccumuloRowSerializer.getBlockFromMap(valueType, (Map) entry.getValue()), - valueType)) - .append(','); - } - else { - values.append(new Field(entry.getValue(), valueType)).append(','); - } - } - - keys.deleteCharAt(keys.length() - 1).append(']'); - values.deleteCharAt(values.length() - 1).append(']'); - return builder.append(keys).append(", ").append(values).append(")").toString(); - } - - // Validate the object is the given type - if (type.equals(BIGINT) || type.equals(BOOLEAN) || type.equals(DOUBLE) || type.equals(INTEGER) || type.equals(REAL) || type.equals(TINYINT) || type.equals(SMALLINT)) { - return value.toString(); - } - if (type.equals(DATE)) { - return "DATE '" + value.toString() + "'"; - } - if (type.equals(TIME)) { - return "TIME '" + value.toString() + "'"; - } - if (type.equals(TIMESTAMP)) { - return "TIMESTAMP '" + value.toString() + "'"; - } - if (type.equals(VARBINARY)) { - return "CAST('" + new String((byte[]) value, UTF_8).replaceAll("'", "''") + "' AS VARBINARY)"; - } - if (type instanceof VarcharType) { - return "'" + value.toString().replaceAll("'", "''") + "'"; - } - throw new PrestoException(NOT_SUPPORTED, "Unsupported PrestoType " + type); - } - - /** - * Does it's damnedest job to convert the given object to the given type. - * - * @param value Object to convert - * @param type Destination Presto type - * @return Null if null, the converted type of it could convert it, or the same value if it is fine just the way it is :D - * @throws PrestoException If the given object is not any flavor of the given type - */ - private static Object cleanObject(Object value, Type type) - { - if (value == null) { - return null; - } - - // Array? Better be a block! - if (Types.isArrayType(type)) { - if (!(value instanceof Block)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Block, but " + value.getClass()); - } - return value; - } - - // Map? Better be a block! - if (Types.isMapType(type)) { - if (!(value instanceof Block)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Block, but " + value.getClass()); - } - return value; - } - - // And now for the plain types - if (type.equals(BIGINT)) { - if (!(value instanceof Long)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Long, but " + value.getClass()); - } - return value; - } - - if (type.equals(INTEGER)) { - if (value instanceof Long) { - return ((Long) value).intValue(); - } - - if (!(value instanceof Integer)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Long or Integer, but " + value.getClass()); - } - return value; - } - - if (type.equals(BOOLEAN)) { - if (!(value instanceof Boolean)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Boolean, but " + value.getClass()); - } - return value; - } - - if (type.equals(DATE)) { - if (value instanceof Long) { - return new Date(DAYS.toMillis((Long) value)); - } - - if (value instanceof Calendar) { - return new Date(((Calendar) value).getTime().getTime()); - } - - if (!(value instanceof Date)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Calendar, Date, or Long, but " + value.getClass()); - } - return value; - } - - if (type.equals(DOUBLE)) { - if (!(value instanceof Double)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Double, but " + value.getClass()); - } - return value; - } - - if (type.equals(REAL)) { - if (value instanceof Long) { - return Float.intBitsToFloat(((Long) value).intValue()); - } - - if (value instanceof Integer) { - return Float.intBitsToFloat((Integer) value); - } - - if (!(value instanceof Float)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Float, but " + value.getClass()); - } - return value; - } - - if (type.equals(SMALLINT)) { - if (value instanceof Long) { - return ((Long) value).shortValue(); - } - - if (value instanceof Integer) { - return ((Integer) value).shortValue(); - } - - if (!(value instanceof Short)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Short, but " + value.getClass()); - } - return value; - } - - if (type.equals(TIME)) { - if (value instanceof Long) { - return new Time((Long) value); - } - - if (!(value instanceof Time)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Long or Time, but " + value.getClass()); - } - return value; - } - - if (type.equals(TIMESTAMP)) { - if (value instanceof Long) { - return new Timestamp((Long) value); - } - - if (!(value instanceof Timestamp)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Long or Timestamp, but " + value.getClass()); - } - return value; - } - - if (type.equals(TINYINT)) { - if (value instanceof Long) { - return ((Long) value).byteValue(); - } - - if (value instanceof Integer) { - return ((Integer) value).byteValue(); - } - - if (value instanceof Short) { - return ((Short) value).byteValue(); - } - - if (!(value instanceof Byte)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Byte, but " + value.getClass()); - } - return value; - } - - if (type.equals(VARBINARY)) { - if (value instanceof Slice) { - return ((Slice) value).getBytes(); - } - - if (!(value instanceof byte[])) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Slice byte[], but " + value.getClass()); - } - return value; - } - - if (type instanceof VarcharType) { - if (value instanceof Slice) { - return new String(((Slice) value).getBytes(), UTF_8); - } - - if (!(value instanceof String)) { - throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Object is not a Slice or String, but " + value.getClass()); - } - return value; - } - - throw new PrestoException(NOT_SUPPORTED, "Unsupported PrestoType " + type); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/Row.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/Row.java deleted file mode 100644 index 8b29893475f9..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/Row.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.google.common.base.Splitter; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import io.prestosql.plugin.accumulo.Types; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.VarcharType; -import org.apache.commons.lang.StringUtils; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; - -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; -import java.util.stream.Collectors; - -import static com.google.common.base.Preconditions.checkArgument; -import static io.prestosql.spi.StandardErrorCode.INVALID_FUNCTION_ARGUMENT; -import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.BooleanType.BOOLEAN; -import static io.prestosql.spi.type.DateType.DATE; -import static io.prestosql.spi.type.DoubleType.DOUBLE; -import static io.prestosql.spi.type.IntegerType.INTEGER; -import static io.prestosql.spi.type.RealType.REAL; -import static io.prestosql.spi.type.SmallintType.SMALLINT; -import static io.prestosql.spi.type.TimeType.TIME; -import static io.prestosql.spi.type.TimestampType.TIMESTAMP; -import static io.prestosql.spi.type.TinyintType.TINYINT; -import static io.prestosql.spi.type.VarbinaryType.VARBINARY; -import static java.lang.String.format; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.Objects.requireNonNull; - -public class Row -{ - private static final DateTimeFormatter DATE_PARSER = ISODateTimeFormat.date(); - private static final DateTimeFormatter TIME_PARSER = DateTimeFormat.forPattern("HH:mm:ss"); - private static final DateTimeFormatter TIMESTAMP_PARSER = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss.SSS"); - - private final List fields = new ArrayList<>(); - - public Row() {} - - public Row(Row row) - { - requireNonNull(row, "row is null"); - fields.addAll(row.fields.stream().map(Field::new).collect(Collectors.toList())); - } - - public Row addField(Field field) - { - requireNonNull(field, "field is null"); - fields.add(field); - return this; - } - - public Row addField(Object value, Type type) - { - requireNonNull(type, "type is null"); - fields.add(new Field(value, type)); - return this; - } - - public Field getField(int i) - { - return fields.get(i); - } - - /** - * Gets a list of all internal fields. Any changes to this list will affect this row. - * - * @return List of fields - */ - public List getFields() - { - return fields; - } - - public int length() - { - return fields.size(); - } - - @Override - public int hashCode() - { - return Arrays.hashCode(fields.toArray()); - } - - @Override - public boolean equals(Object obj) - { - return obj instanceof Row && Objects.equals(this.fields, ((Row) obj).getFields()); - } - - @Override - public String toString() - { - if (fields.isEmpty()) { - return "()"; - } - else { - StringBuilder builder = new StringBuilder("("); - for (Field f : fields) { - builder.append(f).append(","); - } - builder.deleteCharAt(builder.length() - 1); - return builder.append(')').toString(); - } - } - - /** - * Creates a new {@link Row} from the given delimited string based on the given {@link RowSchema} - * - * @param schema Row's schema - * @param str String to parse - * @param delimiter Delimiter of the string - * @return A new Row - * @throws PrestoException If the length of the split string is not equal to the length of the schema - * @throws PrestoException If the schema contains an unsupported type - */ - public static Row fromString(RowSchema schema, String str, char delimiter) - { - Row row = new Row(); - - ImmutableList.Builder builder = ImmutableList.builder(); - List fields = builder.addAll(Splitter.on(delimiter).split(str)).build(); - - if (fields.size() != schema.getLength()) { - throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("Number of split tokens is not equal to schema length. Expected %s received %s. Schema: %s, fields {%s}, delimiter %s", schema.getLength(), fields.size(), schema, StringUtils.join(fields, ","), delimiter)); - } - - for (int i = 0; i < fields.size(); ++i) { - Type type = schema.getColumn(i).getType(); - row.addField(valueFromString(fields.get(i), type), type); - } - - return row; - } - - /** - * Converts the given String into a Java object based on the given Presto type - * - * @param str String to convert - * @param type Presto Type - * @return Java object - * @throws PrestoException If the type is not supported by this function - */ - public static Object valueFromString(String str, Type type) - { - if (str == null || str.isEmpty()) { - return null; - } - else if (Types.isArrayType(type)) { - Type elementType = Types.getElementType(type); - ImmutableList.Builder listBuilder = ImmutableList.builder(); - for (String element : Splitter.on(',').split(str)) { - listBuilder.add(valueFromString(element, elementType)); - } - return AccumuloRowSerializer.getBlockFromArray(elementType, listBuilder.build()); - } - else if (Types.isMapType(type)) { - Type keyType = Types.getKeyType(type); - Type valueType = Types.getValueType(type); - ImmutableMap.Builder mapBuilder = ImmutableMap.builder(); - for (String element : Splitter.on(',').split(str)) { - ImmutableList.Builder builder = ImmutableList.builder(); - List keyValue = builder.addAll(Splitter.on("->").split(element)).build(); - checkArgument(keyValue.size() == 2, "Map element %s has %s entries, not 2", element, keyValue.size()); - - mapBuilder.put(valueFromString(keyValue.get(0), keyType), valueFromString(keyValue.get(1), valueType)); - } - return AccumuloRowSerializer.getBlockFromMap(type, mapBuilder.build()); - } - else if (type.equals(BIGINT)) { - return Long.parseLong(str); - } - else if (type.equals(BOOLEAN)) { - return Boolean.parseBoolean(str); - } - else if (type.equals(DATE)) { - return new Date(DATE_PARSER.parseDateTime(str).getMillis()); - } - else if (type.equals(DOUBLE)) { - return Double.parseDouble(str); - } - else if (type.equals(INTEGER)) { - return Integer.parseInt(str); - } - else if (type.equals(REAL)) { - return Float.parseFloat(str); - } - else if (type.equals(SMALLINT)) { - return Short.parseShort(str); - } - else if (type.equals(TIME)) { - return new Time(TIME_PARSER.parseDateTime(str).getMillis()); - } - else if (type.equals(TIMESTAMP)) { - return new Timestamp(TIMESTAMP_PARSER.parseDateTime(str).getMillis()); - } - else if (type.equals(TINYINT)) { - return Byte.valueOf(str); - } - else if (type.equals(VARBINARY)) { - return str.getBytes(UTF_8); - } - else if (type instanceof VarcharType) { - return str; - } - else { - throw new PrestoException(NOT_SUPPORTED, "Unsupported type " + type); - } - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/RowSchema.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/RowSchema.java deleted file mode 100644 index 6f2c9e25f45d..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/RowSchema.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.type.Type; - -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; - -import static com.google.common.base.Preconditions.checkArgument; -import static io.prestosql.spi.StandardErrorCode.NOT_FOUND; -import static java.lang.String.format; - -public class RowSchema -{ - private final List columns = new ArrayList<>(); - - public RowSchema addRowId(String name, Type type) - { - columns.add(new AccumuloColumnHandle(name, Optional.empty(), Optional.empty(), type, columns.size(), "Accumulo row ID", false)); - return this; - } - - public RowSchema addColumn(String prestoName, Optional family, Optional qualifier, Type type) - { - return addColumn(prestoName, family, qualifier, type, false); - } - - public RowSchema addColumn(String prestoName, Optional family, Optional qualifier, Type type, boolean indexed) - { - columns.add( - new AccumuloColumnHandle( - prestoName, - family, - qualifier, - type, - columns.size(), - format("Accumulo column %s:%s. Indexed: %b", family, qualifier, indexed), - indexed)); - return this; - } - - public AccumuloColumnHandle getColumn(int i) - { - checkArgument(i >= 0 && i < columns.size(), "column index must be non-negative and less than length"); - return columns.get(i); - } - - public AccumuloColumnHandle getColumn(String name) - { - for (AccumuloColumnHandle columnHandle : columns) { - if (columnHandle.getName().equals(name)) { - return columnHandle; - } - } - - throw new PrestoException(NOT_FOUND, "No column with name " + name); - } - - public List getColumns() - { - return columns; - } - - public int getLength() - { - return columns.size(); - } - - /** - * Creates a new {@link RowSchema} from a list of {@link AccumuloColumnHandle} objects. Does not validate the schema. - * - * @param columns Column handles - * @return Row schema - */ - public static RowSchema fromColumns(List columns) - { - RowSchema schema = new RowSchema(); - for (AccumuloColumnHandle columnHandle : columns) { - schema.addColumn( - columnHandle.getName(), - columnHandle.getFamily(), - columnHandle.getQualifier(), - columnHandle.getType(), - columnHandle.isIndexed()); - } - return schema; - } - - @Override - public String toString() - { - StringBuilder builder = new StringBuilder("{"); - for (AccumuloColumnHandle columnHandle : columns) { - builder.append(columnHandle.getName()) - .append(' ') - .append(columnHandle.getType()) - .append(','); - } - - if (builder.length() > 1) { - builder.deleteCharAt(builder.length() - 1); - } - - builder.append('}'); - return builder.toString(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/TabletSplitMetadata.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/TabletSplitMetadata.java deleted file mode 100644 index 3539315ba764..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/TabletSplitMetadata.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.collect.ImmutableList; -import org.apache.accumulo.core.data.Range; - -import java.util.List; -import java.util.Objects; -import java.util.Optional; - -import static com.google.common.base.MoreObjects.toStringHelper; -import static java.util.Objects.requireNonNull; - -public class TabletSplitMetadata -{ - private final Optional hostPort; - private final List ranges; - - @JsonCreator - public TabletSplitMetadata( - @JsonProperty("hostPort") Optional hostPort, - @JsonProperty("ranges") List ranges) - { - this.hostPort = requireNonNull(hostPort, "hostPort is null"); - this.ranges = ImmutableList.copyOf(requireNonNull(ranges, "ranges is null")); - } - - @JsonProperty - public Optional getHostPort() - { - return hostPort; - } - - @JsonProperty - public List getRanges() - { - return ranges; - } - - @Override - public int hashCode() - { - return Objects.hash(hostPort, ranges); - } - - @Override - public boolean equals(Object obj) - { - if (this == obj) { - return true; - } - - if ((obj == null) || (getClass() != obj.getClass())) { - return false; - } - - TabletSplitMetadata other = (TabletSplitMetadata) obj; - return Objects.equals(this.hostPort, other.hostPort) - && Objects.equals(this.ranges, other.ranges); - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("hostPort", hostPort) - .add("numRanges", ranges.size()) - .toString(); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/WrappedRange.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/WrappedRange.java deleted file mode 100644 index e2e1eef9461e..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/model/WrappedRange.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonValue; -import com.google.common.io.ByteArrayDataOutput; -import com.google.common.io.ByteStreams; -import org.apache.accumulo.core.data.Range; - -import java.io.DataInput; -import java.io.IOException; - -public class WrappedRange -{ - private final Range range; - - public WrappedRange(Range range) - { - this.range = range; - } - - public Range getRange() - { - return range; - } - - @JsonValue - public byte[] toBytes() - throws IOException - { - ByteArrayDataOutput out = ByteStreams.newDataOutput(); - range.write(out); - return out.toByteArray(); - } - - @JsonCreator - public static WrappedRange fromBytes(byte[] bytes) - throws IOException - { - DataInput in = ByteStreams.newDataInput(bytes); - Range range = new Range(); - range.readFields(in); - return new WrappedRange(range); - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/AccumuloRowSerializer.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/AccumuloRowSerializer.java deleted file mode 100644 index ae158971f0c0..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/AccumuloRowSerializer.java +++ /dev/null @@ -1,625 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.serializers; - -import com.google.common.collect.ImmutableList; -import io.airlift.slice.Slice; -import io.prestosql.plugin.accumulo.Types; -import io.prestosql.spi.block.Block; -import io.prestosql.spi.block.BlockBuilder; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.TypeUtils; -import io.prestosql.spi.type.VarcharType; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Value; -import org.apache.hadoop.io.Text; - -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -/** - * Interface for deserializing the data in Accumulo into a Presto row. - *

- * Provides a means for end-users of the connector to customize how the data in an Accumulo row gets - * serialized and deserialized from/to a Presto row. - *

- * The workflow of how this class is called by the Accumulo connector for reading data is as - * follows: - *

    - *
  1. setRowIdName - Sets the Presto name which is the Accumulo row ID
  2. - *
  3. setRowOnly - True if only the row ID is going to be retrieved, false if more data is - * necessary.
  4. - *
  5. setMapping - Multiple calls for each Presto column, setting the mapping of Presto column name - * to Accumulo column family and qualifier
  6. - *
  7. deserialize - Called for each Accumulo entry in the same row. Implements should - * retrieve the Presto column value from the given key/value pair
  8. - *
  9. get* - Called to retrieve the data type for the given Presto column name
  10. - *
  11. reset - Begins a new Row, serializer is expected to clear any state
  12. - *
  13. If there are more entries left, go back to deserialize, else end!
  14. - *
- * - * @see LexicoderRowSerializer - * @see StringRowSerializer - */ -public interface AccumuloRowSerializer -{ - /** - * Gets the default AccumuloRowSerializer, {@link LexicoderRowSerializer}. - * - * @return Default serializer - */ - static AccumuloRowSerializer getDefault() - { - return new LexicoderRowSerializer(); - } - - /** - * Sets the Presto name which maps to the Accumulo row ID. - * - * @param name Presto column name - */ - void setRowIdName(String name); - - /** - * Sets the mapping for the Presto column name to Accumulo family and qualifier. - * - * @param name Presto name - * @param family Accumulo family - * @param qualifier Accumulo qualifier - */ - void setMapping(String name, String family, String qualifier); - - /** - * Sets a Boolean value indicating whether or not only the row ID is going to be retrieved from the serializer. - * - * @param rowOnly True if only the row ID is set, false otherwise - */ - void setRowOnly(boolean rowOnly); - - /** - * Reset the state of the serializer to prepare for a new set of entries with the same row ID. - */ - void reset(); - - /** - * Deserialize the given Accumulo entry, retrieving data for the Presto column. - * - * @param entry Entry to deserialize - */ - void deserialize(Entry entry); - - /** - * Gets a Boolean value indicating whether or not the Presto column is a null value. - * - * @param name Column name - * @return True if null, false otherwise. - */ - boolean isNull(String name); - - /** - * Gets the array Block of the given Presto column. - * - * @param name Column name - * @param type Array type - * @return True if null, false otherwise. - */ - Block getArray(String name, Type type); - - /** - * Encode the given array Block into the given Text object. - * - * @param text Text object to set - * @param type Array type - * @param block Array block - */ - void setArray(Text text, Type type, Block block); - - /** - * Gets the Boolean value of the given Presto column. - * - * @param name Column name - * @return Boolean value - */ - boolean getBoolean(String name); - - /** - * Encode the given Boolean value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setBoolean(Text text, Boolean value); - - /** - * Gets the Byte value of the given Presto column. - * - * @param name Column name - * @return Byte value - */ - byte getByte(String name); - - /** - * Encode the given Byte value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setByte(Text text, Byte value); - - /** - * Gets the Date value of the given Presto column. - * - * @param name Column name - * @return Date value - */ - Date getDate(String name); - - /** - * Encode the given Date value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setDate(Text text, Date value); - - /** - * Gets the Double value of the given Presto column. - * - * @param name Column name - * @return Double value - */ - double getDouble(String name); - - /** - * Encode the given Double value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setDouble(Text text, Double value); - - /** - * Gets the Float value of the given Presto column. - * - * @param name Column name - * @return Float value - */ - float getFloat(String name); - - /** - * Encode the given Float value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setFloat(Text text, Float value); - - /** - * Gets the Integer value of the given Presto column. - * - * @param name Column name - * @return Integer value - */ - int getInt(String name); - - /** - * Encode the given Integer value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setInt(Text text, Integer value); - - /** - * Gets the Long value of the given Presto column. - * - * @param name Column name - * @return Long value - */ - long getLong(String name); - - /** - * Encode the given Long value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setLong(Text text, Long value); - - /** - * Gets the Map value of the given Presto column and Map type. - * - * @param name Column name - * @param type Map type - * @return Map value - */ - Block getMap(String name, Type type); - - /** - * Encode the given map Block into the given Text object. - * - * @param text Text object to set - * @param type Map type - * @param block Map block - */ - void setMap(Text text, Type type, Block block); - - /** - * Gets the Short value of the given Presto column. - * - * @param name Column name - * @return Short value - */ - short getShort(String name); - - /** - * Encode the given Short value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setShort(Text text, Short value); - - /** - * Gets the Time value of the given Presto column. - * - * @param name Column name - * @return Time value - */ - Time getTime(String name); - - /** - * Encode the given Time value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setTime(Text text, Time value); - - /** - * Gets the Timestamp value of the given Presto column. - * - * @param name Column name - * @return Timestamp value - */ - Timestamp getTimestamp(String name); - - /** - * Encode the given Timestamp value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setTimestamp(Text text, Timestamp value); - - /** - * Gets the Varbinary value of the given Presto column. - * - * @param name Column name - * @return Varbinary value - */ - byte[] getVarbinary(String name); - - /** - * Encode the given byte[] value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setVarbinary(Text text, byte[] value); - - /** - * Gets the String value of the given Presto column. - * - * @param name Column name - * @return String value - */ - String getVarchar(String name); - - /** - * Encode the given String value into the given Text object. - * - * @param text Text object to set - * @param value Value to encode - */ - void setVarchar(Text text, String value); - - /** - * Encodes a Presto Java object to a byte array based on the given type. - *

- * Java Lists and Maps can be converted to Blocks using - * {@link AccumuloRowSerializer#getBlockFromArray(Type, java.util.List)} and - * {@link AccumuloRowSerializer#getBlockFromMap(Type, Map)} - *

- * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
Type to EncodeExpected Java Object
ARRAYio.prestosql.spi.block.Block
BIGINTInteger or Long
BOOLEANBoolean
DATEjava.sql.Date, Long
DOUBLEDouble
INTEGERInteger
Mapio.prestosql.spi.block.Block
REALFloat
SMALLINTShort
TIMEjava.sql.Time, Long
TIMESTAMPjava.sql.Timestamp, Long
TINYINTByte
VARBINARYio.airlift.slice.Slice or byte[]
VARCHARio.airlift.slice.Slice or String
- * - * @param type The presto {@link io.prestosql.spi.type.Type} - * @param value The Java object per the table in the method description - * @return Encoded bytes - */ - byte[] encode(Type type, Object value); - - /** - * Generic function to decode the given byte array to a Java object based on the given type. - *

- * Blocks from ARRAY and MAP types can be converted - * to Java Lists and Maps using {@link AccumuloRowSerializer#getArrayFromBlock(Type, Block)} - * and {@link AccumuloRowSerializer#getMapFromBlock(Type, Block)} - *

- * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
Encoded TypeReturned Java Object
ARRAYList<?>
BIGINTLong
BOOLEANBoolean
DATELong
DOUBLEDouble
MapMap<?,?>
REALDouble
SMALLINTLong
TIMELong
TIMESTAMPLong
TINYINTLong
VARBINARYbyte[]
VARCHARString
- * - * @param type The presto {@link io.prestosql.spi.type.Type} - * @param value Encoded bytes to decode - * @param The Java type of the object that has been encoded to the given byte array - * @return The Java object per the table in the method description - */ - T decode(Type type, byte[] value); - - /** - * Given the array element type and Presto Block, decodes the Block into a list of values. - * - * @param elementType Array element type - * @param block Array block - * @return List of values - */ - static List getArrayFromBlock(Type elementType, Block block) - { - ImmutableList.Builder arrayBuilder = ImmutableList.builder(); - for (int i = 0; i < block.getPositionCount(); ++i) { - arrayBuilder.add(readObject(elementType, block, i)); - } - return arrayBuilder.build(); - } - - /** - * Given the map type and Presto Block, decodes the Block into a map of values. - * - * @param type Map type - * @param block Map block - * @return List of values - */ - static Map getMapFromBlock(Type type, Block block) - { - Map map = new HashMap<>(block.getPositionCount() / 2); - Type keyType = Types.getKeyType(type); - Type valueType = Types.getValueType(type); - for (int i = 0; i < block.getPositionCount(); i += 2) { - map.put(readObject(keyType, block, i), readObject(valueType, block, i + 1)); - } - return map; - } - - /** - * Encodes the given list into a Block. - * - * @param elementType Element type of the array - * @param array Array of elements to encode - * @return Presto Block - */ - static Block getBlockFromArray(Type elementType, List array) - { - BlockBuilder builder = elementType.createBlockBuilder(null, array.size()); - for (Object item : array) { - writeObject(builder, elementType, item); - } - return builder.build(); - } - - /** - * Encodes the given map into a Block. - * - * @param mapType Presto type of the map - * @param map Map of key/value pairs to encode - * @return Presto Block - */ - static Block getBlockFromMap(Type mapType, Map map) - { - Type keyType = mapType.getTypeParameters().get(0); - Type valueType = mapType.getTypeParameters().get(1); - - BlockBuilder mapBlockBuilder = mapType.createBlockBuilder(null, 1); - BlockBuilder builder = mapBlockBuilder.beginBlockEntry(); - - for (Entry entry : map.entrySet()) { - writeObject(builder, keyType, entry.getKey()); - writeObject(builder, valueType, entry.getValue()); - } - - mapBlockBuilder.closeEntry(); - return (Block) mapType.getObject(mapBlockBuilder, 0); - } - - /** - * Recursive helper function used by {@link AccumuloRowSerializer#getBlockFromArray} and - * {@link AccumuloRowSerializer#getBlockFromMap} to add the given object to the given block - * builder. Supports nested complex types! - * - * @param builder Block builder - * @param type Presto type - * @param obj Object to write to the block builder - */ - static void writeObject(BlockBuilder builder, Type type, Object obj) - { - if (Types.isArrayType(type)) { - BlockBuilder arrayBldr = builder.beginBlockEntry(); - Type elementType = Types.getElementType(type); - for (Object item : (List) obj) { - writeObject(arrayBldr, elementType, item); - } - builder.closeEntry(); - } - else if (Types.isMapType(type)) { - BlockBuilder mapBlockBuilder = builder.beginBlockEntry(); - for (Entry entry : ((Map) obj).entrySet()) { - writeObject(mapBlockBuilder, Types.getKeyType(type), entry.getKey()); - writeObject(mapBlockBuilder, Types.getValueType(type), entry.getValue()); - } - builder.closeEntry(); - } - else { - TypeUtils.writeNativeValue(type, builder, obj); - } - } - - /** - * Recursive helper function used by {@link AccumuloRowSerializer#getArrayFromBlock} and - * {@link AccumuloRowSerializer#getMapFromBlock} to decode the Block into a Java type. - * - * @param type Presto type - * @param block Block to decode - * @param position Position in the block to get - * @return Java object from the Block - */ - static Object readObject(Type type, Block block, int position) - { - if (Types.isArrayType(type)) { - Type elementType = Types.getElementType(type); - return getArrayFromBlock(elementType, block.getObject(position, Block.class)); - } - else if (Types.isMapType(type)) { - return getMapFromBlock(type, block.getObject(position, Block.class)); - } - else { - if (type.getJavaType() == Slice.class) { - Slice slice = (Slice) TypeUtils.readNativeValue(type, block, position); - return type.equals(VarcharType.VARCHAR) ? slice.toStringUtf8() : slice.getBytes(); - } - - return TypeUtils.readNativeValue(type, block, position); - } - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/BooleanLexicoder.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/BooleanLexicoder.java deleted file mode 100644 index d48d4be32017..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/BooleanLexicoder.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.serializers; - -import org.apache.accumulo.core.client.lexicoder.Lexicoder; - -/** - * Accumulo lexicoder for Booleans - */ -public class BooleanLexicoder - implements Lexicoder -{ - public static final byte[] TRUE = new byte[] {1}; - public static final byte[] FALSE = new byte[] {0}; - - @Override - public byte[] encode(Boolean v) - { - return v ? TRUE : FALSE; - } - - @Override - public Boolean decode(byte[] b) - { - return b[0] != 0; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/LexicoderRowSerializer.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/LexicoderRowSerializer.java deleted file mode 100644 index 0e04ea9bcb2b..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/LexicoderRowSerializer.java +++ /dev/null @@ -1,422 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.serializers; - -import io.airlift.slice.Slice; -import io.prestosql.plugin.accumulo.Types; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.block.Block; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.VarcharType; -import org.apache.accumulo.core.client.lexicoder.BytesLexicoder; -import org.apache.accumulo.core.client.lexicoder.DoubleLexicoder; -import org.apache.accumulo.core.client.lexicoder.Lexicoder; -import org.apache.accumulo.core.client.lexicoder.ListLexicoder; -import org.apache.accumulo.core.client.lexicoder.LongLexicoder; -import org.apache.accumulo.core.client.lexicoder.StringLexicoder; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Value; -import org.apache.hadoop.io.Text; - -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; - -import static io.prestosql.plugin.accumulo.io.AccumuloPageSink.ROW_ID_COLUMN; -import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.BooleanType.BOOLEAN; -import static io.prestosql.spi.type.DateType.DATE; -import static io.prestosql.spi.type.DoubleType.DOUBLE; -import static io.prestosql.spi.type.IntegerType.INTEGER; -import static io.prestosql.spi.type.RealType.REAL; -import static io.prestosql.spi.type.SmallintType.SMALLINT; -import static io.prestosql.spi.type.TimeType.TIME; -import static io.prestosql.spi.type.TimestampType.TIMESTAMP; -import static io.prestosql.spi.type.TinyintType.TINYINT; -import static io.prestosql.spi.type.VarbinaryType.VARBINARY; -import static io.prestosql.spi.type.VarcharType.VARCHAR; -import static java.util.concurrent.TimeUnit.DAYS; -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -/** - * Implementation of {@link AccumuloRowSerializer} that uses Accumulo lexicoders to serialize the values of the Presto columns. - */ -@SuppressWarnings({"rawtypes", "unchecked"}) -public class LexicoderRowSerializer - implements AccumuloRowSerializer -{ - private static final Map LEXICODER_MAP = new HashMap<>(); - private static final Map> LIST_LEXICODERS = new HashMap<>(); - private static final Map> MAP_LEXICODERS = new HashMap<>(); - - private final Map> familyQualifierColumnMap = new HashMap<>(); - private final Map columnValues = new HashMap<>(); - private final Text rowId = new Text(); - private final Text family = new Text(); - private final Text qualifier = new Text(); - private final Text value = new Text(); - - private boolean rowOnly; - private String rowIdName; - - static { - LongLexicoder longLexicoder = new LongLexicoder(); - DoubleLexicoder doubleLexicoder = new DoubleLexicoder(); - LEXICODER_MAP.put(BIGINT, longLexicoder); - LEXICODER_MAP.put(BOOLEAN, new BooleanLexicoder()); - LEXICODER_MAP.put(DATE, longLexicoder); - LEXICODER_MAP.put(DOUBLE, doubleLexicoder); - LEXICODER_MAP.put(INTEGER, longLexicoder); - LEXICODER_MAP.put(REAL, doubleLexicoder); - LEXICODER_MAP.put(SMALLINT, longLexicoder); - LEXICODER_MAP.put(TIME, longLexicoder); - LEXICODER_MAP.put(TIMESTAMP, longLexicoder); - LEXICODER_MAP.put(TINYINT, longLexicoder); - LEXICODER_MAP.put(VARBINARY, new BytesLexicoder()); - LEXICODER_MAP.put(VARCHAR, new StringLexicoder()); - } - - @Override - public void setRowIdName(String name) - { - rowIdName = name; - } - - @Override - public void setRowOnly(boolean rowOnly) - { - this.rowOnly = rowOnly; - } - - @Override - public void setMapping(String name, String family, String qualifier) - { - columnValues.put(name, null); - Map qualifierToNameMap = familyQualifierColumnMap.get(family); - if (qualifierToNameMap == null) { - qualifierToNameMap = new HashMap<>(); - familyQualifierColumnMap.put(family, qualifierToNameMap); - } - - qualifierToNameMap.put(qualifier, name); - } - - @Override - public void reset() - { - columnValues.clear(); - } - - @Override - public void deserialize(Entry entry) - { - if (!columnValues.containsKey(rowIdName)) { - entry.getKey().getRow(rowId); - columnValues.put(rowIdName, rowId.copyBytes()); - } - - if (rowOnly) { - return; - } - - entry.getKey().getColumnFamily(family); - entry.getKey().getColumnQualifier(qualifier); - - if (family.equals(ROW_ID_COLUMN) && qualifier.equals(ROW_ID_COLUMN)) { - return; - } - - value.set(entry.getValue().get()); - columnValues.put(familyQualifierColumnMap.get(family.toString()).get(qualifier.toString()), value.copyBytes()); - } - - @Override - public boolean isNull(String name) - { - return columnValues.get(name) == null; - } - - @Override - public Block getArray(String name, Type type) - { - Type elementType = Types.getElementType(type); - return AccumuloRowSerializer.getBlockFromArray(elementType, decode(type, getFieldValue(name))); - } - - @Override - public void setArray(Text text, Type type, Block block) - { - text.set(encode(type, block)); - } - - @Override - public boolean getBoolean(String name) - { - return decode(BOOLEAN, getFieldValue(name)); - } - - @Override - public void setBoolean(Text text, Boolean value) - { - text.set(encode(BOOLEAN, value)); - } - - @Override - public byte getByte(String name) - { - return ((Long) decode(TINYINT, getFieldValue(name))).byteValue(); - } - - @Override - public void setByte(Text text, Byte value) - { - text.set(encode(TINYINT, value)); - } - - @Override - public Date getDate(String name) - { - return new Date(DAYS.toMillis(decode(BIGINT, getFieldValue(name)))); - } - - @Override - public void setDate(Text text, Date value) - { - text.set(encode(DATE, value)); - } - - @Override - public double getDouble(String name) - { - return decode(DOUBLE, getFieldValue(name)); - } - - @Override - public void setDouble(Text text, Double value) - { - text.set(encode(DOUBLE, value)); - } - - @Override - public float getFloat(String name) - { - return ((Double) decode(REAL, getFieldValue(name))).floatValue(); - } - - @Override - public void setFloat(Text text, Float value) - { - text.set(encode(REAL, value)); - } - - @Override - public int getInt(String name) - { - return ((Long) decode(INTEGER, getFieldValue(name))).intValue(); - } - - @Override - public void setInt(Text text, Integer value) - { - text.set(encode(INTEGER, value)); - } - - @Override - public long getLong(String name) - { - return decode(BIGINT, getFieldValue(name)); - } - - @Override - public void setLong(Text text, Long value) - { - text.set(encode(BIGINT, value)); - } - - @Override - public Block getMap(String name, Type type) - { - return AccumuloRowSerializer.getBlockFromMap(type, decode(type, getFieldValue(name))); - } - - @Override - public void setMap(Text text, Type type, Block block) - { - text.set(encode(type, block)); - } - - @Override - public short getShort(String name) - { - return ((Long) decode(SMALLINT, getFieldValue(name))).shortValue(); - } - - @Override - public void setShort(Text text, Short value) - { - text.set(encode(SMALLINT, value)); - } - - @Override - public Time getTime(String name) - { - return new Time(decode(BIGINT, getFieldValue(name))); - } - - @Override - public void setTime(Text text, Time value) - { - text.set(encode(TIME, value)); - } - - @Override - public Timestamp getTimestamp(String name) - { - return new Timestamp(decode(TIMESTAMP, getFieldValue(name))); - } - - @Override - public void setTimestamp(Text text, Timestamp value) - { - text.set(encode(TIMESTAMP, value)); - } - - @Override - public byte[] getVarbinary(String name) - { - return decode(VARBINARY, getFieldValue(name)); - } - - @Override - public void setVarbinary(Text text, byte[] value) - { - text.set(encode(VARBINARY, value)); - } - - @Override - public String getVarchar(String name) - { - return decode(VARCHAR, getFieldValue(name)); - } - - @Override - public void setVarchar(Text text, String value) - { - text.set(encode(VARCHAR, value)); - } - - private byte[] getFieldValue(String name) - { - return columnValues.get(name); - } - - @Override - public byte[] encode(Type type, Object value) - { - Object toEncode; - if (Types.isArrayType(type)) { - toEncode = AccumuloRowSerializer.getArrayFromBlock(Types.getElementType(type), (Block) value); - } - else if (Types.isMapType(type)) { - toEncode = AccumuloRowSerializer.getMapFromBlock(type, (Block) value); - } - else if (type.equals(BIGINT) && value instanceof Integer) { - toEncode = ((Integer) value).longValue(); - } - else if (type.equals(DATE) && value instanceof Date) { - toEncode = MILLISECONDS.toDays(((Date) value).getTime()); - } - else if (type.equals(INTEGER) && value instanceof Integer) { - toEncode = ((Integer) value).longValue(); - } - else if (type.equals(REAL) && value instanceof Float) { - toEncode = ((Float) value).doubleValue(); - } - else if (type.equals(SMALLINT) && value instanceof Short) { - toEncode = ((Short) value).longValue(); - } - else if (type.equals(TIME) && value instanceof Time) { - toEncode = ((Time) value).getTime(); - } - else if (type.equals(TIMESTAMP) && value instanceof Timestamp) { - toEncode = ((Timestamp) value).getTime(); - } - else if (type.equals(TINYINT) && value instanceof Byte) { - toEncode = ((Byte) value).longValue(); - } - else if (type.equals(VARBINARY) && value instanceof Slice) { - toEncode = ((Slice) value).getBytes(); - } - else if (type instanceof VarcharType && value instanceof Slice) { - toEncode = ((Slice) value).toStringUtf8(); - } - else { - toEncode = value; - } - - return getLexicoder(type).encode(toEncode); - } - - @Override - public T decode(Type type, byte[] value) - { - return (T) getLexicoder(type).decode(value); - } - - public static Lexicoder getLexicoder(Type type) - { - if (Types.isArrayType(type)) { - return getListLexicoder(type); - } - else if (Types.isMapType(type)) { - return getMapLexicoder(type); - } - else if (type instanceof VarcharType) { - return LEXICODER_MAP.get(VARCHAR); - } - else { - Lexicoder lexicoder = LEXICODER_MAP.get(type); - if (lexicoder == null) { - throw new PrestoException(NOT_SUPPORTED, "No lexicoder for type " + type); - } - return lexicoder; - } - } - - private static ListLexicoder getListLexicoder(Type elementType) - { - ListLexicoder listLexicoder = LIST_LEXICODERS.get(elementType); - if (listLexicoder == null) { - listLexicoder = new ListLexicoder(getLexicoder(Types.getElementType(elementType))); - LIST_LEXICODERS.put(elementType, listLexicoder); - } - return listLexicoder; - } - - private static MapLexicoder getMapLexicoder(Type type) - { - MapLexicoder mapLexicoder = MAP_LEXICODERS.get(type); - if (mapLexicoder == null) { - mapLexicoder = new MapLexicoder( - getLexicoder(Types.getKeyType(type)), - getLexicoder(Types.getValueType(type))); - MAP_LEXICODERS.put(type, mapLexicoder); - } - return mapLexicoder; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/MapLexicoder.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/MapLexicoder.java deleted file mode 100644 index ee4b6b3500af..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/MapLexicoder.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.serializers; - -import org.apache.accumulo.core.client.lexicoder.Lexicoder; - -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; - -import static org.apache.accumulo.core.client.lexicoder.impl.ByteUtils.concat; -import static org.apache.accumulo.core.client.lexicoder.impl.ByteUtils.escape; -import static org.apache.accumulo.core.client.lexicoder.impl.ByteUtils.split; -import static org.apache.accumulo.core.client.lexicoder.impl.ByteUtils.unescape; - -/** - * Accumulo lexicoder for encoding a Java Map - * - * @param Key data type - * @param Value data type - */ -public class MapLexicoder - implements Lexicoder> -{ - private final Lexicoder keyLexicoder; - private final Lexicoder valueLexicoder; - - public MapLexicoder( - Lexicoder keyLexicoder, - Lexicoder valueLexicoder) - { - this.keyLexicoder = keyLexicoder; - this.valueLexicoder = valueLexicoder; - } - - @Override - public byte[] encode(Map v) - { - byte[][] elements = new byte[v.size() * 2][]; - int index = 0; - for (Entry entry : v.entrySet()) { - elements[index++] = escape(keyLexicoder.encode(entry.getKey())); - elements[index++] = escape(valueLexicoder.encode(entry.getValue())); - } - - return concat(elements); - } - - @Override - public Map decode(byte[] b) - { - byte[][] escapedElements = split(b); - Map decodedMap = new HashMap<>(); - for (int i = 0; i < escapedElements.length; i += 2) { - K key = keyLexicoder.decode(unescape(escapedElements[i])); - V value = valueLexicoder.decode(unescape(escapedElements[i + 1])); - decodedMap.put(key, value); - } - - return decodedMap; - } -} diff --git a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/StringRowSerializer.java b/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/StringRowSerializer.java deleted file mode 100644 index b19d4741cc30..000000000000 --- a/presto-accumulo/src/main/java/io/prestosql/plugin/accumulo/serializers/StringRowSerializer.java +++ /dev/null @@ -1,408 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.serializers; - -import io.airlift.slice.Slice; -import io.prestosql.plugin.accumulo.Types; -import io.prestosql.spi.PrestoException; -import io.prestosql.spi.block.Block; -import io.prestosql.spi.type.Type; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Value; -import org.apache.hadoop.io.Text; - -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.HashMap; -import java.util.Map; -import java.util.Map.Entry; - -import static io.prestosql.plugin.accumulo.io.AccumuloPageSink.ROW_ID_COLUMN; -import static io.prestosql.spi.StandardErrorCode.NOT_SUPPORTED; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.BooleanType.BOOLEAN; -import static io.prestosql.spi.type.DateType.DATE; -import static io.prestosql.spi.type.DoubleType.DOUBLE; -import static io.prestosql.spi.type.IntegerType.INTEGER; -import static io.prestosql.spi.type.RealType.REAL; -import static io.prestosql.spi.type.SmallintType.SMALLINT; -import static io.prestosql.spi.type.TimeType.TIME; -import static io.prestosql.spi.type.TimestampType.TIMESTAMP; -import static io.prestosql.spi.type.TinyintType.TINYINT; -import static io.prestosql.spi.type.VarbinaryType.VARBINARY; -import static io.prestosql.spi.type.VarcharType.VARCHAR; -import static java.lang.String.format; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.concurrent.TimeUnit.DAYS; -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -/** - * Implementation of {@link StringRowSerializer} that encodes and decodes Presto column values as human-readable String objects. - */ -public class StringRowSerializer - implements AccumuloRowSerializer -{ - private final Map> familyQualifierColumnMap = new HashMap<>(); - private final Map columnValues = new HashMap<>(); - private final Text rowId = new Text(); - private final Text family = new Text(); - private final Text qualifier = new Text(); - private final Text value = new Text(); - - private boolean rowOnly; - private String rowIdName; - - @Override - public void setRowIdName(String name) - { - this.rowIdName = name; - } - - @Override - public void setRowOnly(boolean rowOnly) - { - this.rowOnly = rowOnly; - } - - @Override - public void setMapping(String name, String family, String qualifier) - { - columnValues.put(name, null); - Map qualifierColumnMap = familyQualifierColumnMap.get(family); - if (qualifierColumnMap == null) { - qualifierColumnMap = new HashMap<>(); - familyQualifierColumnMap.put(family, qualifierColumnMap); - } - - qualifierColumnMap.put(qualifier, name); - } - - @Override - public void reset() - { - columnValues.clear(); - } - - @Override - public void deserialize(Entry entry) - { - if (!columnValues.containsKey(rowIdName)) { - entry.getKey().getRow(rowId); - columnValues.put(rowIdName, rowId.toString()); - } - - if (rowOnly) { - return; - } - - entry.getKey().getColumnFamily(family); - entry.getKey().getColumnQualifier(qualifier); - - if (family.equals(ROW_ID_COLUMN) && qualifier.equals(ROW_ID_COLUMN)) { - return; - } - - value.set(entry.getValue().get()); - columnValues.put(familyQualifierColumnMap.get(family.toString()).get(qualifier.toString()), value.toString()); - } - - @Override - public boolean isNull(String name) - { - return columnValues.get(name) == null; - } - - @Override - public Block getArray(String name, Type type) - { - throw new PrestoException(NOT_SUPPORTED, "arrays are not (yet?) supported for StringRowSerializer"); - } - - @Override - public void setArray(Text text, Type type, Block block) - { - throw new PrestoException(NOT_SUPPORTED, "arrays are not (yet?) supported for StringRowSerializer"); - } - - @Override - public boolean getBoolean(String name) - { - return Boolean.parseBoolean(getFieldValue(name)); - } - - @Override - public void setBoolean(Text text, Boolean value) - { - text.set(value.toString().getBytes(UTF_8)); - } - - @Override - public byte getByte(String name) - { - return Byte.parseByte(getFieldValue(name)); - } - - @Override - public void setByte(Text text, Byte value) - { - text.set(value.toString().getBytes(UTF_8)); - } - - @Override - public Date getDate(String name) - { - return new Date(DAYS.toMillis(Long.parseLong(getFieldValue(name)))); - } - - @Override - public void setDate(Text text, Date value) - { - text.set(Long.toString(MILLISECONDS.toDays(value.getTime())).getBytes(UTF_8)); - } - - @Override - public double getDouble(String name) - { - return Double.parseDouble(getFieldValue(name)); - } - - @Override - public void setDouble(Text text, Double value) - { - text.set(value.toString().getBytes(UTF_8)); - } - - @Override - public float getFloat(String name) - { - return Float.parseFloat(getFieldValue(name)); - } - - @Override - public void setFloat(Text text, Float value) - { - text.set(value.toString().getBytes(UTF_8)); - } - - @Override - public int getInt(String name) - { - return Integer.parseInt(getFieldValue(name)); - } - - @Override - public void setInt(Text text, Integer value) - { - text.set(value.toString().getBytes(UTF_8)); - } - - @Override - public long getLong(String name) - { - return Long.parseLong(getFieldValue(name)); - } - - @Override - public void setLong(Text text, Long value) - { - text.set(value.toString().getBytes(UTF_8)); - } - - @Override - public Block getMap(String name, Type type) - { - throw new PrestoException(NOT_SUPPORTED, "maps are not (yet?) supported for StringRowSerializer"); - } - - @Override - public void setMap(Text text, Type type, Block block) - { - throw new PrestoException(NOT_SUPPORTED, "maps are not (yet?) supported for StringRowSerializer"); - } - - @Override - public short getShort(String name) - { - return Short.parseShort(getFieldValue(name)); - } - - @Override - public void setShort(Text text, Short value) - { - text.set(value.toString().getBytes(UTF_8)); - } - - @Override - public Time getTime(String name) - { - return new Time(Long.parseLong(getFieldValue(name))); - } - - @Override - public void setTime(Text text, Time value) - { - text.set(Long.toString(value.getTime()).getBytes(UTF_8)); - } - - @Override - public Timestamp getTimestamp(String name) - { - return new Timestamp(Long.parseLong(getFieldValue(name))); - } - - @Override - public void setTimestamp(Text text, Timestamp value) - { - text.set(Long.toString(value.getTime()).getBytes(UTF_8)); - } - - @Override - public byte[] getVarbinary(String name) - { - return getFieldValue(name).getBytes(UTF_8); - } - - @Override - public void setVarbinary(Text text, byte[] value) - { - text.set(value); - } - - @Override - public String getVarchar(String name) - { - return getFieldValue(name); - } - - @Override - public void setVarchar(Text text, String value) - { - text.set(value.getBytes(UTF_8)); - } - - private String getFieldValue(String name) - { - return columnValues.get(name).toString(); - } - - @Override - public byte[] encode(Type type, Object value) - { - Text text = new Text(); - if (Types.isArrayType(type)) { - throw new PrestoException(NOT_SUPPORTED, "arrays are not (yet?) supported for StringRowSerializer"); - } - if (Types.isMapType(type)) { - throw new PrestoException(NOT_SUPPORTED, "maps are not (yet?) supported for StringRowSerializer"); - } - if (type.equals(BIGINT) && value instanceof Integer) { - setLong(text, ((Integer) value).longValue()); - } - else if (type.equals(BIGINT) && value instanceof Long) { - setLong(text, (Long) value); - } - else if (type.equals(BOOLEAN)) { - setBoolean(text, value.equals(Boolean.TRUE)); - } - else if (type.equals(DATE)) { - setDate(text, (Date) value); - } - else if (type.equals(DOUBLE)) { - setDouble(text, (Double) value); - } - else if (type.equals(INTEGER) && value instanceof Integer) { - setInt(text, (Integer) value); - } - else if (type.equals(INTEGER) && value instanceof Long) { - setInt(text, ((Long) value).intValue()); - } - else if (type.equals(REAL)) { - setFloat(text, (Float) value); - } - else if (type.equals(SMALLINT)) { - setShort(text, (Short) value); - } - else if (type.equals(TIME)) { - setTime(text, (Time) value); - } - else if (type.equals(TIMESTAMP)) { - setTimestamp(text, (Timestamp) value); - } - else if (type.equals(TINYINT)) { - setByte(text, (Byte) value); - } - else if (type.equals(VARBINARY) && value instanceof byte[]) { - setVarbinary(text, (byte[]) value); - } - else if (type.equals(VARBINARY) && value instanceof Slice) { - setVarbinary(text, ((Slice) value).getBytes()); - } - else if (type.equals(VARCHAR) && value instanceof String) { - setVarchar(text, ((String) value)); - } - else if (type.equals(VARCHAR) && value instanceof Slice) { - setVarchar(text, ((Slice) value).toStringUtf8()); - } - else { - throw new PrestoException(NOT_SUPPORTED, format("StringLexicoder does not support encoding type %s, object class is %s", type, value.getClass())); - } - - return text.copyBytes(); - } - - @SuppressWarnings("unchecked") - @Override - public T decode(Type type, byte[] value) - { - String strValue = new String(value, UTF_8); - if (type.equals(BIGINT)) { - return (T) (Long) Long.parseLong(strValue); - } - if (type.equals(BOOLEAN)) { - return (T) (Boolean) Boolean.parseBoolean(strValue); - } - if (type.equals(DATE)) { - return (T) (Long) Long.parseLong(strValue); - } - if (type.equals(DOUBLE)) { - return (T) (Double) Double.parseDouble(strValue); - } - if (type.equals(INTEGER)) { - return (T) (Long) ((Integer) Integer.parseInt(strValue)).longValue(); - } - if (type.equals(REAL)) { - return (T) (Double) ((Float) Float.parseFloat(strValue)).doubleValue(); - } - if (type.equals(SMALLINT)) { - return (T) (Long) ((Short) Short.parseShort(strValue)).longValue(); - } - if (type.equals(TIME)) { - return (T) (Long) Long.parseLong(strValue); - } - if (type.equals(TIMESTAMP)) { - return (T) (Long) Long.parseLong(strValue); - } - if (type.equals(TINYINT)) { - return (T) (Long) ((Byte) Byte.parseByte(strValue)).longValue(); - } - if (type.equals(VARBINARY)) { - return (T) value; - } - if (type.equals(VARCHAR)) { - return (T) strValue; - } - throw new PrestoException(NOT_SUPPORTED, "Unsupported type " + type); - } -} diff --git a/presto-accumulo/src/main/java/org/apache/log4j/JulAppender.java b/presto-accumulo/src/main/java/org/apache/log4j/JulAppender.java deleted file mode 100644 index b12599345a97..000000000000 --- a/presto-accumulo/src/main/java/org/apache/log4j/JulAppender.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.log4j; - -import org.apache.log4j.helpers.LogLog; -import org.apache.log4j.spi.LocationInfo; -import org.apache.log4j.spi.LoggingEvent; - -import java.util.logging.LogRecord; - -import static java.lang.String.format; - -/** - * We are unable to completely remove the log4j dependency due to ZooKeeper, which explicitly requires - * an internal class of the log4j package and is unavailable in the slf4j-over-log4j artifact. - *

- * This JUL appender is a workaround for this issue, appending all log4j events to JUL. - */ -public class JulAppender - extends AppenderSkeleton -{ - /** - * Append a log event at the appropriate JUL level, depending on the log4j level. - */ - @Override - protected void append(LoggingEvent loggingEvent) - { - java.util.logging.Logger logger = java.util.logging.Logger.getLogger(loggingEvent.getLoggerName()); - if (logger == null) { - LogLog.warn(format("Cannot obtain JUL %s. Verify that this appender is used while an appropriate LogManager is active.", loggingEvent.getLoggerName())); - return; - } - - Level level = loggingEvent.getLevel(); - java.util.logging.Level julLevel = convertLog4jLevel(level); - - LogRecord record = new LogRecord(julLevel, loggingEvent.getRenderedMessage()); - record.setMillis(loggingEvent.getTimeStamp()); - LocationInfo location = loggingEvent.getLocationInformation(); - if (location != null) { - record.setSourceClassName(location.getClassName()); - record.setSourceMethodName(location.getMethodName()); - } - - logger.log(record); - } - - @Override - public boolean requiresLayout() - { - return true; - } - - @Override - public void close() {} - - private static java.util.logging.Level convertLog4jLevel(Level log4jLevel) - { - if (log4jLevel.equals(Level.TRACE)) { - return java.util.logging.Level.FINEST; - } - - if (log4jLevel.equals(Level.DEBUG)) { - return java.util.logging.Level.FINER; - } - - if (log4jLevel.equals(Level.INFO)) { - return java.util.logging.Level.INFO; - } - - if (log4jLevel.equals(Level.WARN)) { - return java.util.logging.Level.WARNING; - } - - if (log4jLevel.equals(Level.ERROR)) { - return java.util.logging.Level.SEVERE; - } - - if (log4jLevel.equals(Level.FATAL)) { - return java.util.logging.Level.SEVERE; - } - - if (log4jLevel.equals(Level.ALL)) { - return java.util.logging.Level.ALL; - } - - if (log4jLevel.equals(Level.OFF)) { - return java.util.logging.Level.OFF; - } - - return java.util.logging.Level.FINE; - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/AccumuloQueryRunner.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/AccumuloQueryRunner.java deleted file mode 100644 index 5d2950b36ef9..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/AccumuloQueryRunner.java +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSortedSet; -import io.airlift.log.Logger; -import io.airlift.log.Logging; -import io.airlift.tpch.TpchTable; -import io.prestosql.Session; -import io.prestosql.metadata.QualifiedObjectName; -import io.prestosql.plugin.accumulo.conf.AccumuloConfig; -import io.prestosql.plugin.accumulo.serializers.LexicoderRowSerializer; -import io.prestosql.plugin.tpch.TpchPlugin; -import io.prestosql.spi.PrestoException; -import io.prestosql.testing.DistributedQueryRunner; -import io.prestosql.testing.QueryRunner; -import org.apache.accumulo.core.client.AccumuloException; -import org.apache.accumulo.core.client.AccumuloSecurityException; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Instance; -import org.apache.accumulo.core.client.ZooKeeperInstance; -import org.apache.accumulo.core.client.security.tokens.PasswordToken; -import org.apache.accumulo.minicluster.MiniAccumuloCluster; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.io.Text; -import org.intellij.lang.annotations.Language; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.util.Map; - -import static io.airlift.units.Duration.nanosSince; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.MINI_ACCUMULO; -import static io.prestosql.plugin.accumulo.AccumuloErrorCode.UNEXPECTED_ACCUMULO_ERROR; -import static io.prestosql.plugin.accumulo.MiniAccumuloConfigUtil.setConfigClassPath; -import static io.prestosql.plugin.tpch.TpchMetadata.TINY_SCHEMA_NAME; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.testing.TestingSession.testSessionBuilder; -import static java.lang.String.format; -import static java.util.concurrent.TimeUnit.SECONDS; -import static org.apache.accumulo.minicluster.MemoryUnit.MEGABYTE; - -public final class AccumuloQueryRunner -{ - private static final Logger LOG = Logger.get(AccumuloQueryRunner.class); - private static final String MAC_PASSWORD = "secret"; - private static final String MAC_USER = "root"; - - private static boolean tpchLoaded; - private static Connector connector = getAccumuloConnector(); - - private AccumuloQueryRunner() {} - - public static synchronized DistributedQueryRunner createAccumuloQueryRunner(Map extraProperties) - throws Exception - { - DistributedQueryRunner queryRunner = DistributedQueryRunner.builder(createSession()) - .setNodeCount(4) - .setExtraProperties(extraProperties) - .build(); - - queryRunner.installPlugin(new TpchPlugin()); - queryRunner.createCatalog("tpch", "tpch"); - - queryRunner.installPlugin(new AccumuloPlugin()); - Map accumuloProperties = - ImmutableMap.builder() - .put(AccumuloConfig.INSTANCE, connector.getInstance().getInstanceName()) - .put(AccumuloConfig.ZOOKEEPERS, connector.getInstance().getZooKeepers()) - .put(AccumuloConfig.USERNAME, MAC_USER) - .put(AccumuloConfig.PASSWORD, MAC_PASSWORD) - .put(AccumuloConfig.ZOOKEEPER_METADATA_ROOT, "/presto-accumulo-test") - .build(); - - queryRunner.createCatalog("accumulo", "accumulo", accumuloProperties); - - if (!tpchLoaded) { - copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, createSession(), TpchTable.getTables()); - connector.tableOperations().addSplits("tpch.orders", ImmutableSortedSet.of(new Text(new LexicoderRowSerializer().encode(BIGINT, 7500L)))); - tpchLoaded = true; - } - - return queryRunner; - } - - private static void copyTpchTables( - QueryRunner queryRunner, - String sourceCatalog, - String sourceSchema, - Session session, - Iterable> tables) - { - LOG.info("Loading data from %s.%s...", sourceCatalog, sourceSchema); - long startTime = System.nanoTime(); - for (TpchTable table : tables) { - copyTable(queryRunner, sourceCatalog, session, sourceSchema, table); - } - LOG.info("Loading from %s.%s complete in %s", sourceCatalog, sourceSchema, nanosSince(startTime).toString(SECONDS)); - } - - private static void copyTable( - QueryRunner queryRunner, - String catalog, - Session session, - String schema, - TpchTable table) - { - QualifiedObjectName source = new QualifiedObjectName(catalog, schema, table.getTableName()); - String target = table.getTableName(); - - @Language("SQL") - String sql; - switch (target) { - case "customer": - sql = format("CREATE TABLE %s WITH (index_columns = 'mktsegment') AS SELECT * FROM %s", target, source); - break; - case "lineitem": - sql = format("CREATE TABLE %s WITH (index_columns = 'quantity,discount,returnflag,shipdate,receiptdate,shipinstruct,shipmode') AS SELECT cast(uuid() AS varchar) AS uuid, * FROM %s", target, source); - break; - case "orders": - sql = format("CREATE TABLE %s WITH (index_columns = 'orderdate') AS SELECT * FROM %s", target, source); - break; - case "part": - sql = format("CREATE TABLE %s WITH (index_columns = 'brand,type,size,container') AS SELECT * FROM %s", target, source); - break; - case "partsupp": - sql = format("CREATE TABLE %s WITH (index_columns = 'partkey') AS SELECT cast(uuid() AS varchar) AS uuid, * FROM %s", target, source); - break; - case "supplier": - sql = format("CREATE TABLE %s WITH (index_columns = 'name') AS SELECT * FROM %s", target, source); - break; - default: - sql = format("CREATE TABLE %s AS SELECT * FROM %s", target, source); - break; - } - - LOG.info("Running import for %s", target, sql); - LOG.info("%s", sql); - long start = System.nanoTime(); - long rows = queryRunner.execute(session, sql).getUpdateCount().getAsLong(); - LOG.info("Imported %s rows for %s in %s", rows, target, nanosSince(start)); - } - - public static Session createSession() - { - return testSessionBuilder().setCatalog("accumulo").setSchema("tpch").build(); - } - - /** - * Gets the AccumuloConnector singleton, starting the MiniAccumuloCluster on initialization. - * This singleton instance is required so all test cases access the same MiniAccumuloCluster. - * - * @return Accumulo connector - */ - public static Connector getAccumuloConnector() - { - if (connector != null) { - return connector; - } - - try { - MiniAccumuloCluster accumulo = createMiniAccumuloCluster(); - Instance instance = new ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers()); - connector = instance.getConnector(MAC_USER, new PasswordToken(MAC_PASSWORD)); - LOG.info("Connection to MAC instance %s at %s established, user %s password %s", accumulo.getInstanceName(), accumulo.getZooKeepers(), MAC_USER, MAC_PASSWORD); - return connector; - } - catch (AccumuloException | AccumuloSecurityException | InterruptedException | IOException e) { - throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Failed to get connector to Accumulo", e); - } - } - - /** - * Creates and starts an instance of MiniAccumuloCluster, returning the new instance. - * - * @return New MiniAccumuloCluster - */ - private static MiniAccumuloCluster createMiniAccumuloCluster() - throws IOException, InterruptedException - { - // Create MAC directory - File macDir = Files.createTempDirectory("mac-").toFile(); - LOG.info("MAC is enabled, starting MiniAccumuloCluster at %s", macDir); - - // Start MAC and connect to it - MiniAccumuloCluster accumulo = new MiniAccumuloCluster(macDir, MAC_PASSWORD); - accumulo.getConfig().setDefaultMemory(512, MEGABYTE); - setConfigClassPath(accumulo.getConfig()); - accumulo.start(); - - // Add shutdown hook to stop MAC and cleanup temporary files - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - try { - LOG.info("Shutting down MAC"); - accumulo.stop(); - } - catch (IOException | InterruptedException e) { - Thread.currentThread().interrupt(); - throw new PrestoException(MINI_ACCUMULO, "Failed to shut down MAC instance", e); - } - - try { - LOG.info("Cleaning up MAC directory"); - FileUtils.forceDelete(macDir); - } - catch (IOException e) { - throw new PrestoException(MINI_ACCUMULO, "Failed to clean up MAC directory", e); - } - })); - - return accumulo; - } - - public static void main(String[] args) - throws Exception - { - Logging.initialize(); - DistributedQueryRunner queryRunner = createAccumuloQueryRunner(ImmutableMap.of("http-server.http.port", "8080")); - Thread.sleep(10); - Logger log = Logger.get(AccumuloQueryRunner.class); - log.info("======== SERVER STARTED ========"); - log.info("\n====\n%s\n====", queryRunner.getCoordinator().getBaseUrl()); - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/MiniAccumuloConfigUtil.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/MiniAccumuloConfigUtil.java deleted file mode 100644 index a501005b45db..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/MiniAccumuloConfigUtil.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.common.base.Splitter; -import org.apache.accumulo.minicluster.MiniAccumuloConfig; -import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl; - -import java.io.File; -import java.lang.reflect.Field; -import java.util.List; - -import static java.lang.management.ManagementFactory.getRuntimeMXBean; - -public final class MiniAccumuloConfigUtil -{ - private MiniAccumuloConfigUtil() {} - - /** - * MiniAccumuloClusterImpl will build the class path itself if not set, - * but the code fails on Java 9 due to assumptions about URLClassLoader. - */ - public static void setConfigClassPath(MiniAccumuloConfig config) - { - List items = Splitter.on(File.pathSeparatorChar) - .splitToList(getRuntimeMXBean().getClassPath()); - getConfigImpl(config).setClasspathItems(items.toArray(new String[0])); - } - - private static MiniAccumuloConfigImpl getConfigImpl(MiniAccumuloConfig config) - { - try { - Field field = MiniAccumuloConfig.class.getDeclaredField("impl"); - field.setAccessible(true); - return (MiniAccumuloConfigImpl) field.get(config); - } - catch (ReflectiveOperationException e) { - throw new AssertionError(e); - } - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloClient.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloClient.java deleted file mode 100644 index e4ec3e8f609f..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloClient.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.common.collect.ImmutableList; -import io.prestosql.plugin.accumulo.conf.AccumuloConfig; -import io.prestosql.plugin.accumulo.conf.AccumuloTableProperties; -import io.prestosql.plugin.accumulo.index.ColumnCardinalityCache; -import io.prestosql.plugin.accumulo.index.IndexLookup; -import io.prestosql.plugin.accumulo.metadata.AccumuloTable; -import io.prestosql.plugin.accumulo.metadata.ZooKeeperMetadataManager; -import io.prestosql.spi.connector.ColumnMetadata; -import io.prestosql.spi.connector.ConnectorTableMetadata; -import io.prestosql.spi.connector.SchemaTableName; -import io.prestosql.type.InternalTypeManager; -import org.apache.accumulo.core.client.Connector; -import org.testng.annotations.Test; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static io.prestosql.metadata.MetadataManager.createTestMetadataManager; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static org.testng.Assert.assertNotNull; - -public class TestAccumuloClient -{ - private final AccumuloClient client; - private final ZooKeeperMetadataManager zooKeeperMetadataManager; - - public TestAccumuloClient() - throws Exception - { - AccumuloConfig config = new AccumuloConfig() - .setUsername("root") - .setPassword("secret"); - - Connector connector = AccumuloQueryRunner.getAccumuloConnector(); - config.setZooKeepers(connector.getInstance().getZooKeepers()); - zooKeeperMetadataManager = new ZooKeeperMetadataManager(config, new InternalTypeManager(createTestMetadataManager())); - client = new AccumuloClient(connector, config, zooKeeperMetadataManager, new AccumuloTableManager(connector), new IndexLookup(connector, new ColumnCardinalityCache(connector, config))); - } - - @Test - public void testCreateTableEmptyAccumuloColumn() - { - SchemaTableName tableName = new SchemaTableName("default", "test_create_table_empty_accumulo_column"); - - try { - List columns = ImmutableList.of( - new ColumnMetadata("id", BIGINT), - new ColumnMetadata("a", BIGINT), - new ColumnMetadata("b", BIGINT), - new ColumnMetadata("c", BIGINT), - new ColumnMetadata("d", BIGINT)); - - Map properties = new HashMap<>(); - new AccumuloTableProperties().getTableProperties().forEach(meta -> properties.put(meta.getName(), meta.getDefaultValue())); - properties.put("external", true); - properties.put("column_mapping", "a:a:a,b::b,c:c:,d::"); - client.createTable(new ConnectorTableMetadata(tableName, columns, properties)); - assertNotNull(client.getTable(tableName)); - } - finally { - AccumuloTable table = zooKeeperMetadataManager.getTable(tableName); - if (table != null) { - client.dropTable(table); - } - } - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloDistributedQueries.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloDistributedQueries.java deleted file mode 100644 index 08addf0d8658..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloDistributedQueries.java +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import io.prestosql.testing.AbstractTestDistributedQueries; -import io.prestosql.testing.MaterializedResult; -import io.prestosql.testing.QueryRunner; -import org.intellij.lang.annotations.Language; -import org.testng.annotations.Test; - -import static io.prestosql.plugin.accumulo.AccumuloQueryRunner.createAccumuloQueryRunner; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertFalse; -import static org.testng.Assert.assertTrue; - -/** - * Accumulo requires a unique identifier for the rows. - * Any row that has a duplicate row ID is effectively an update, - * overwriting existing values of the row with whatever the new values are. - * For the lineitem and partsupp tables, there is no unique identifier, - * so a generated UUID is used in order to prevent overwriting rows of data. - * This is the same for any test cases that were creating tables with duplicate rows, - * so some test cases are overridden from the base class and slightly modified to add an additional UUID column. - */ -public class TestAccumuloDistributedQueries - extends AbstractTestDistributedQueries -{ - @Override - protected QueryRunner createQueryRunner() - throws Exception - { - return createAccumuloQueryRunner(ImmutableMap.of()); - } - - @Override - public void testAddColumn() - { - // Adding columns via SQL are not supported until adding columns with comments are supported - } - - @Override - public void testDropColumn() - { - // Dropping columns are not supported by the connector - } - - @Override - public void testCreateTableAsSelect() - { - // This test is overridden due to Function "UUID" not found errors - // Some test cases from the base class are removed - - // TODO some test cases from overridden method succeed to create table, but with wrong number or rows. - - assertUpdate("CREATE TABLE test_create_table_as_if_not_exists (a bigint, b double)"); - assertTrue(getQueryRunner().tableExists(getSession(), "test_create_table_as_if_not_exists")); - assertTableColumnNames("test_create_table_as_if_not_exists", "a", "b"); - - assertUpdate("CREATE TABLE IF NOT EXISTS test_create_table_as_if_not_exists AS SELECT cast(uuid() AS uuid) AS uuid, orderkey, discount FROM lineitem", 0); - assertTrue(getQueryRunner().tableExists(getSession(), "test_create_table_as_if_not_exists")); - assertTableColumnNames("test_create_table_as_if_not_exists", "a", "b"); - - assertUpdate("DROP TABLE test_create_table_as_if_not_exists"); - assertFalse(getQueryRunner().tableExists(getSession(), "test_create_table_as_if_not_exists")); - - this.assertCreateTableAsSelect( - "test_group", - "SELECT orderstatus, sum(totalprice) x FROM orders GROUP BY orderstatus", - "SELECT count(DISTINCT orderstatus) FROM orders"); - - this.assertCreateTableAsSelect( - "test_with_data", - "SELECT * FROM orders WITH DATA", - "SELECT * FROM orders", - "SELECT count(*) FROM orders"); - - this.assertCreateTableAsSelect( - "test_with_no_data", - "SELECT * FROM orders WITH NO DATA", - "SELECT * FROM orders LIMIT 0", - "SELECT 0"); - } - - @Override - public void testDelete() - { - // Deletes are not supported by the connector - } - - @Override - public void testInsert() - { - @Language("SQL") String query = "SELECT cast(uuid() AS varchar) AS uuid, orderdate, orderkey FROM orders"; - - assertUpdate("CREATE TABLE test_insert AS " + query + " WITH NO DATA", 0); - assertQuery("SELECT count(*) FROM test_insert", "SELECT 0"); - - assertUpdate("INSERT INTO test_insert " + query, "SELECT count(*) FROM orders"); - - assertQuery("SELECT orderdate, orderkey FROM test_insert", "SELECT orderdate, orderkey FROM orders"); - // Override because base class error: Cannot insert null row ID - assertUpdate("INSERT INTO test_insert (uuid, orderkey) VALUES ('000000', -1)", 1); - assertUpdate("INSERT INTO test_insert (uuid, orderdate) VALUES ('000001', DATE '2001-01-01')", 1); - assertUpdate("INSERT INTO test_insert (uuid, orderkey, orderdate) VALUES ('000002', -2, DATE '2001-01-02')", 1); - assertUpdate("INSERT INTO test_insert (uuid, orderdate, orderkey) VALUES ('000003', DATE '2001-01-03', -3)", 1); - - assertQuery("SELECT orderdate, orderkey FROM test_insert", - "SELECT orderdate, orderkey FROM orders" - + " UNION ALL SELECT null, -1" - + " UNION ALL SELECT DATE '2001-01-01', null" - + " UNION ALL SELECT DATE '2001-01-02', -2" - + " UNION ALL SELECT DATE '2001-01-03', -3"); - - // UNION query produces columns in the opposite order - // of how they are declared in the table schema - assertUpdate( - "INSERT INTO test_insert (uuid, orderkey, orderdate) " + - "SELECT cast(uuid() AS varchar) AS uuid, orderkey, orderdate FROM orders " + - "UNION ALL " + - "SELECT cast(uuid() AS varchar) AS uuid, orderkey, orderdate FROM orders", - "SELECT 2 * count(*) FROM orders"); - - assertUpdate("DROP TABLE test_insert"); - } - - @Override - public void testInsertWithCoercion() - { - // Override because of non-canonical varchar mapping - } - - @Override // Overridden because we currently do not support arrays with null elements - public void testInsertArray() - { - assertUpdate("CREATE TABLE test_insert_array (a ARRAY, b ARRAY)"); - - // assertUpdate("INSERT INTO test_insert_array (a) VALUES (ARRAY[null])", 1); TODO support ARRAY with null elements - - assertUpdate("INSERT INTO test_insert_array (a, b) VALUES (ARRAY[1.23E1], ARRAY[1.23E1])", 1); - assertQuery("SELECT a[1], b[1] FROM test_insert_array", "VALUES (12.3, 12)"); - - assertUpdate("DROP TABLE test_insert_array"); - } - - @Test - public void testInsertDuplicateRows() - { - // This test case tests the Accumulo connectors override capabilities - // When a row is inserted into a table where a row with the same row ID already exists, - // the cells of the existing row are overwritten with the new values - try { - assertUpdate("CREATE TABLE test_insert_duplicate AS SELECT 1 a, 2 b, '3' c", 1); - assertQuery("SELECT a, b, c FROM test_insert_duplicate", "SELECT 1, 2, '3'"); - assertUpdate("INSERT INTO test_insert_duplicate (a, c) VALUES (1, '4')", 1); - assertUpdate("INSERT INTO test_insert_duplicate (a, b) VALUES (1, 3)", 1); - assertQuery("SELECT a, b, c FROM test_insert_duplicate", "SELECT 1, 3, '4'"); - } - finally { - assertUpdate("DROP TABLE test_insert_duplicate"); - } - } - - @Override - public void testScalarSubquery() - { - // Override because of extra UUID column in lineitem table, cannot SELECT * - - // nested - assertQuery("SELECT (SELECT (SELECT (SELECT 1)))"); - - // aggregation - assertQuery("SELECT " - + "orderkey, partkey, suppkey, linenumber, quantity, " - + "extendedprice, discount, tax, returnflag, linestatus, " - + "shipdate, commitdate, receiptdate, shipinstruct, shipmode, comment " - + "FROM lineitem WHERE orderkey = \n" - + "(SELECT max(orderkey) FROM orders)"); - - // no output - assertQuery("SELECT " - + "orderkey, partkey, suppkey, linenumber, quantity, " - + "extendedprice, discount, tax, returnflag, linestatus, " - + "shipdate, commitdate, receiptdate, shipinstruct, shipmode, comment " - + "FROM lineitem WHERE orderkey = \n" - + "(SELECT orderkey FROM orders WHERE 0=1)"); - - // no output matching with null test - assertQuery("SELECT " - + "orderkey, partkey, suppkey, linenumber, quantity, " - + "extendedprice, discount, tax, returnflag, linestatus, " - + "shipdate, commitdate, receiptdate, shipinstruct, shipmode, comment " - + "FROM lineitem WHERE \n" - + "(SELECT orderkey FROM orders WHERE 0=1) " - + "is null"); - assertQuery("SELECT " - + "orderkey, partkey, suppkey, linenumber, quantity, " - + "extendedprice, discount, tax, returnflag, linestatus, " - + "shipdate, commitdate, receiptdate, shipinstruct, shipmode, comment " - + "FROM lineitem WHERE \n" - + "(SELECT orderkey FROM orders WHERE 0=1) " - + "is not null"); - - // subquery results and in in-predicate - assertQuery("SELECT (SELECT 1) IN (1, 2, 3)"); - assertQuery("SELECT (SELECT 1) IN ( 2, 3)"); - - // multiple subqueries - assertQuery("SELECT (SELECT 1) = (SELECT 3)"); - assertQuery("SELECT (SELECT 1) < (SELECT 3)"); - assertQuery("SELECT COUNT(*) FROM lineitem WHERE " + - "(SELECT min(orderkey) FROM orders)" + - "<" + - "(SELECT max(orderkey) FROM orders)"); - - // distinct - assertQuery("SELECT DISTINCT orderkey FROM lineitem " + - "WHERE orderkey BETWEEN" + - " (SELECT avg(orderkey) FROM orders) - 10 " + - " AND" + - " (SELECT avg(orderkey) FROM orders) + 10"); - - // subqueries with joins - for (String joinType : ImmutableList.of("INNER", "LEFT OUTER")) { - assertQuery("SELECT l.orderkey, COUNT(*) " + - "FROM lineitem l " + joinType + " JOIN orders o ON l.orderkey = o.orderkey " + - "WHERE l.orderkey BETWEEN" + - " (SELECT avg(orderkey) FROM orders) - 10 " + - " AND" + - " (SELECT avg(orderkey) FROM orders) + 10 " + - "GROUP BY l.orderkey"); - } - - // subqueries with ORDER BY - assertQuery("SELECT orderkey, totalprice FROM orders ORDER BY (SELECT 2)"); - - // subquery returns multiple rows - String multipleRowsErrorMsg = "Scalar sub-query has returned multiple rows"; - assertQueryFails("SELECT " - + "orderkey, partkey, suppkey, linenumber, quantity, " - + "extendedprice, discount, tax, returnflag, linestatus, " - + "shipdate, commitdate, receiptdate, shipinstruct, shipmode, comment " - + "FROM lineitem WHERE orderkey = (\n" - + "SELECT orderkey FROM orders ORDER BY totalprice)", - multipleRowsErrorMsg); - assertQueryFails("SELECT orderkey, totalprice FROM orders ORDER BY (VALUES 1, 2)", - multipleRowsErrorMsg); - - // exposes a bug in optimize hash generation because EnforceSingleNode does not - // support more than one column from the underlying query - assertQuery("SELECT custkey, (SELECT DISTINCT custkey FROM orders ORDER BY custkey LIMIT 1) FROM orders"); - } - - @Override - public void testShowColumns() - { - // Override base class because table descriptions for Accumulo connector include comments - MaterializedResult actual = computeActual("SHOW COLUMNS FROM orders"); - - assertEquals(actual.getMaterializedRows().get(0).getField(0), "orderkey"); - assertEquals(actual.getMaterializedRows().get(0).getField(1), "bigint"); - assertEquals(actual.getMaterializedRows().get(1).getField(0), "custkey"); - assertEquals(actual.getMaterializedRows().get(1).getField(1), "bigint"); - assertEquals(actual.getMaterializedRows().get(2).getField(0), "orderstatus"); - assertEquals(actual.getMaterializedRows().get(2).getField(1), "varchar(1)"); - assertEquals(actual.getMaterializedRows().get(3).getField(0), "totalprice"); - assertEquals(actual.getMaterializedRows().get(3).getField(1), "double"); - assertEquals(actual.getMaterializedRows().get(4).getField(0), "orderdate"); - assertEquals(actual.getMaterializedRows().get(4).getField(1), "date"); - assertEquals(actual.getMaterializedRows().get(5).getField(0), "orderpriority"); - assertEquals(actual.getMaterializedRows().get(5).getField(1), "varchar(15)"); - assertEquals(actual.getMaterializedRows().get(6).getField(0), "clerk"); - assertEquals(actual.getMaterializedRows().get(6).getField(1), "varchar(15)"); - assertEquals(actual.getMaterializedRows().get(7).getField(0), "shippriority"); - assertEquals(actual.getMaterializedRows().get(7).getField(1), "integer"); - assertEquals(actual.getMaterializedRows().get(8).getField(0), "comment"); - assertEquals(actual.getMaterializedRows().get(8).getField(1), "varchar(79)"); - } - - @Test - public void testMultiInBelowCardinality() - { - assertQuery("SELECT COUNT(*) FROM partsupp WHERE partkey = 1", "SELECT 4"); - assertQuery("SELECT COUNT(*) FROM partsupp WHERE partkey = 2", "SELECT 4"); - assertQuery("SELECT COUNT(*) FROM partsupp WHERE partkey IN (1, 2)", "SELECT 8"); - } - - @Test - public void testSelectNullValue() - { - try { - assertUpdate("CREATE TABLE test_select_null_value AS SELECT 1 a, 2 b, CAST(NULL AS BIGINT) c", 1); - assertQuery("SELECT * FROM test_select_null_value", "SELECT 1, 2, NULL"); - assertQuery("SELECT a, c FROM test_select_null_value", "SELECT 1, NULL"); - } - finally { - assertUpdate("DROP TABLE test_select_null_value"); - } - } - - @Test - public void testCreateTableEmptyColumns() - { - try { - assertUpdate("CREATE TABLE test_create_table_empty_columns WITH (column_mapping = 'a:a:a,b::b,c:c:,d::', index_columns='a,b,c,d') AS SELECT 1 id, 2 a, 3 b, 4 c, 5 d", 1); - assertQuery("SELECT * FROM test_create_table_empty_columns", "SELECT 1, 2, 3, 4, 5"); - assertQuery("SELECT * FROM test_create_table_empty_columns WHERE a = 2", "SELECT 1, 2, 3, 4, 5"); - assertQuery("SELECT * FROM test_create_table_empty_columns WHERE b = 3", "SELECT 1, 2, 3, 4, 5"); - assertQuery("SELECT * FROM test_create_table_empty_columns WHERE c = 4", "SELECT 1, 2, 3, 4, 5"); - assertQuery("SELECT * FROM test_create_table_empty_columns WHERE d = 5", "SELECT 1, 2, 3, 4, 5"); - } - finally { - assertUpdate("DROP TABLE test_create_table_empty_columns"); - } - } - - @Override - public void testDescribeOutput() - { - // this connector uses a non-canonical type for varchar columns in tpch - } - - @Override - public void testDescribeOutputNamedAndUnnamed() - { - // this connector uses a non-canonical type for varchar columns in tpch - } - - @Override - public void testCommentTable() - { - // Accumulo connector currently does not support comment on table - assertQueryFails("COMMENT ON TABLE orders IS 'hello'", "This connector does not support setting table comments"); - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloIntegrationSmokeTest.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloIntegrationSmokeTest.java deleted file mode 100644 index fefbc0b6fa0f..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/TestAccumuloIntegrationSmokeTest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo; - -import com.google.common.collect.ImmutableMap; -import io.prestosql.testing.AbstractTestIntegrationSmokeTest; -import io.prestosql.testing.MaterializedResult; - -import static org.testng.Assert.assertEquals; - -public class TestAccumuloIntegrationSmokeTest - extends AbstractTestIntegrationSmokeTest -{ - public TestAccumuloIntegrationSmokeTest() - { - super(() -> AccumuloQueryRunner.createAccumuloQueryRunner(ImmutableMap.of())); - } - - @Override - protected boolean canCreateSchema() - { - return false; - } - - @Override - protected boolean canDropSchema() - { - return false; - } - - @Override - public void testDescribeTable() - { - // Override base class because table descriptions for Accumulo connector include comments - MaterializedResult actual = computeActual("DESC ORDERS").toTestTypes(); - assertEquals(actual.getMaterializedRows().get(0).getField(0), "orderkey"); - assertEquals(actual.getMaterializedRows().get(0).getField(1), "bigint"); - assertEquals(actual.getMaterializedRows().get(1).getField(0), "custkey"); - assertEquals(actual.getMaterializedRows().get(1).getField(1), "bigint"); - assertEquals(actual.getMaterializedRows().get(2).getField(0), "orderstatus"); - assertEquals(actual.getMaterializedRows().get(2).getField(1), "varchar(1)"); - assertEquals(actual.getMaterializedRows().get(3).getField(0), "totalprice"); - assertEquals(actual.getMaterializedRows().get(3).getField(1), "double"); - assertEquals(actual.getMaterializedRows().get(4).getField(0), "orderdate"); - assertEquals(actual.getMaterializedRows().get(4).getField(1), "date"); - assertEquals(actual.getMaterializedRows().get(5).getField(0), "orderpriority"); - assertEquals(actual.getMaterializedRows().get(5).getField(1), "varchar(15)"); - assertEquals(actual.getMaterializedRows().get(6).getField(0), "clerk"); - assertEquals(actual.getMaterializedRows().get(6).getField(1), "varchar(15)"); - assertEquals(actual.getMaterializedRows().get(7).getField(0), "shippriority"); - assertEquals(actual.getMaterializedRows().get(7).getField(1), "integer"); - assertEquals(actual.getMaterializedRows().get(8).getField(0), "comment"); - assertEquals(actual.getMaterializedRows().get(8).getField(1), "varchar(79)"); - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/index/TestIndexer.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/index/TestIndexer.java deleted file mode 100644 index f62202296a82..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/index/TestIndexer.java +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.index; - -import com.google.common.collect.ImmutableList; -import io.prestosql.plugin.accumulo.metadata.AccumuloTable; -import io.prestosql.plugin.accumulo.model.AccumuloColumnHandle; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.plugin.accumulo.serializers.LexicoderRowSerializer; -import io.prestosql.spi.type.ArrayType; -import io.prestosql.spi.type.Type; -import org.apache.accumulo.core.client.BatchWriterConfig; -import org.apache.accumulo.core.client.Connector; -import org.apache.accumulo.core.client.Instance; -import org.apache.accumulo.core.client.IteratorSetting; -import org.apache.accumulo.core.client.Scanner; -import org.apache.accumulo.core.client.mock.MockInstance; -import org.apache.accumulo.core.client.security.tokens.PasswordToken; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Range; -import org.apache.accumulo.core.data.Value; -import org.apache.accumulo.core.security.Authorizations; -import org.apache.accumulo.core.security.ColumnVisibility; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.Test; - -import java.util.Iterator; -import java.util.Map.Entry; -import java.util.Optional; - -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.VarcharType.VARCHAR; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.testng.Assert.assertEquals; -import static org.testng.Assert.assertFalse; - -public class TestIndexer -{ - private static final LexicoderRowSerializer SERIALIZER = new LexicoderRowSerializer(); - - private static byte[] encode(Type type, Object v) - { - return SERIALIZER.encode(type, v); - } - - private static final byte[] AGE = bytes("age"); - private static final byte[] CF = bytes("cf"); - private static final byte[] FIRSTNAME = bytes("firstname"); - private static final byte[] SENDERS = bytes("arr"); - - private static final byte[] M1_ROWID = encode(VARCHAR, "row1"); - private static final byte[] AGE_VALUE = encode(BIGINT, 27L); - private static final byte[] M1_FNAME_VALUE = encode(VARCHAR, "alice"); - private static final byte[] M1_ARR_VALUE = encode(new ArrayType(VARCHAR), AccumuloRowSerializer.getBlockFromArray(VARCHAR, ImmutableList.of("abc", "def", "ghi"))); - - private static final byte[] M2_ROWID = encode(VARCHAR, "row2"); - private static final byte[] M2_FNAME_VALUE = encode(VARCHAR, "bob"); - private static final byte[] M2_ARR_VALUE = encode(new ArrayType(VARCHAR), AccumuloRowSerializer.getBlockFromArray(VARCHAR, ImmutableList.of("ghi", "mno", "abc"))); - - private Mutation m1; - private Mutation m2; - private Mutation m1v; - private Mutation m2v; - private AccumuloTable table; - - @BeforeClass - public void setupClass() - { - AccumuloColumnHandle c1 = new AccumuloColumnHandle("id", Optional.empty(), Optional.empty(), VARCHAR, 0, "", false); - AccumuloColumnHandle c2 = new AccumuloColumnHandle("age", Optional.of("cf"), Optional.of("age"), BIGINT, 1, "", true); - AccumuloColumnHandle c3 = new AccumuloColumnHandle("firstname", Optional.of("cf"), Optional.of("firstname"), VARCHAR, 2, "", true); - AccumuloColumnHandle c4 = new AccumuloColumnHandle("arr", Optional.of("cf"), Optional.of("arr"), new ArrayType(VARCHAR), 3, "", true); - - table = new AccumuloTable("default", "index_test_table", ImmutableList.of(c1, c2, c3, c4), "id", true, LexicoderRowSerializer.class.getCanonicalName(), null); - - m1 = new Mutation(M1_ROWID); - m1.put(CF, AGE, AGE_VALUE); - m1.put(CF, FIRSTNAME, M1_FNAME_VALUE); - m1.put(CF, SENDERS, M1_ARR_VALUE); - - m2 = new Mutation(M2_ROWID); - m2.put(CF, AGE, AGE_VALUE); - m2.put(CF, FIRSTNAME, M2_FNAME_VALUE); - m2.put(CF, SENDERS, M2_ARR_VALUE); - - ColumnVisibility visibility1 = new ColumnVisibility("private"); - ColumnVisibility visibility2 = new ColumnVisibility("moreprivate"); - m1v = new Mutation(M1_ROWID); - m1v.put(CF, AGE, visibility1, AGE_VALUE); - m1v.put(CF, FIRSTNAME, visibility1, M1_FNAME_VALUE); - m1v.put(CF, SENDERS, visibility2, M1_ARR_VALUE); - - m2v = new Mutation(M2_ROWID); - m2v.put(CF, AGE, visibility1, AGE_VALUE); - m2v.put(CF, FIRSTNAME, visibility2, M2_FNAME_VALUE); - m2v.put(CF, SENDERS, visibility2, M2_ARR_VALUE); - } - - @Test - public void testMutationIndex() - throws Exception - { - Instance inst = new MockInstance(); - Connector conn = inst.getConnector("root", new PasswordToken("")); - conn.tableOperations().create(table.getFullTableName()); - conn.tableOperations().create(table.getIndexTableName()); - conn.tableOperations().create(table.getMetricsTableName()); - - for (IteratorSetting s : Indexer.getMetricIterators(table)) { - conn.tableOperations().attachIterator(table.getMetricsTableName(), s); - } - - Indexer indexer = new Indexer(conn, new Authorizations(), table, new BatchWriterConfig()); - indexer.index(m1); - indexer.flush(); - - Scanner scan = conn.createScanner(table.getIndexTableName(), new Authorizations()); - scan.setRange(new Range()); - - Iterator> iter = scan.iterator(); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row1", ""); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "row1", ""); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row1", ""); - assertFalse(iter.hasNext()); - - scan.close(); - - scan = conn.createScanner(table.getMetricsTableName(), new Authorizations()); - scan.setRange(new Range()); - - iter = scan.iterator(); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "___card___", "1"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___card___", "1"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___first_row___", "row1"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___last_row___", "row1"); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "___card___", "1"); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "___card___", "1"); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "___card___", "1"); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "___card___", "1"); - assertFalse(iter.hasNext()); - - scan.close(); - - indexer.index(m2); - indexer.close(); - - scan = conn.createScanner(table.getIndexTableName(), new Authorizations()); - scan.setRange(new Range()); - iter = scan.iterator(); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row1", ""); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row2", ""); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row2", ""); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "row1", ""); - assertKeyValuePair(iter.next(), M2_FNAME_VALUE, "cf_firstname", "row2", ""); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row2", ""); - assertKeyValuePair(iter.next(), bytes("mno"), "cf_arr", "row2", ""); - assertFalse(iter.hasNext()); - - scan.close(); - - scan = conn.createScanner(table.getMetricsTableName(), new Authorizations()); - scan.setRange(new Range()); - - iter = scan.iterator(); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "___card___", "2"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___card___", "2"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___first_row___", "row1"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___last_row___", "row2"); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "___card___", "2"); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "___card___", "1"); - assertKeyValuePair(iter.next(), M2_FNAME_VALUE, "cf_firstname", "___card___", "1"); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "___card___", "1"); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "___card___", "2"); - assertKeyValuePair(iter.next(), bytes("mno"), "cf_arr", "___card___", "1"); - assertFalse(iter.hasNext()); - - scan.close(); - } - - @Test - public void testMutationIndexWithVisibilities() - throws Exception - { - Instance inst = new MockInstance(); - Connector conn = inst.getConnector("root", new PasswordToken("")); - conn.tableOperations().create(table.getFullTableName()); - conn.tableOperations().create(table.getIndexTableName()); - conn.tableOperations().create(table.getMetricsTableName()); - - for (IteratorSetting s : Indexer.getMetricIterators(table)) { - conn.tableOperations().attachIterator(table.getMetricsTableName(), s); - } - - Indexer indexer = new Indexer(conn, new Authorizations(), table, new BatchWriterConfig()); - indexer.index(m1); - indexer.index(m1v); - indexer.flush(); - - Scanner scan = conn.createScanner(table.getIndexTableName(), new Authorizations("private", "moreprivate")); - scan.setRange(new Range()); - - Iterator> iter = scan.iterator(); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row1", ""); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row1", "private", ""); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row1", "moreprivate", ""); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "row1", ""); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "row1", "private", ""); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "row1", "moreprivate", ""); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row1", "moreprivate", ""); - assertFalse(iter.hasNext()); - - scan.close(); - - scan = conn.createScanner(table.getMetricsTableName(), new Authorizations("private", "moreprivate")); - scan.setRange(new Range()); - - iter = scan.iterator(); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "___card___", "1"); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "___card___", "private", "1"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___card___", "2"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___first_row___", "row1"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___last_row___", "row1"); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "___card___", "1"); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "___card___", "moreprivate", "1"); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "___card___", "1"); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "___card___", "private", "1"); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "___card___", "1"); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "___card___", "moreprivate", "1"); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "___card___", "1"); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "___card___", "moreprivate", "1"); - assertFalse(iter.hasNext()); - - scan.close(); - - indexer.index(m2); - indexer.index(m2v); - indexer.close(); - - scan = conn.createScanner(table.getIndexTableName(), new Authorizations("private", "moreprivate")); - scan.setRange(new Range()); - iter = scan.iterator(); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row1", ""); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row1", "private", ""); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row2", ""); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "row2", "private", ""); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row1", "moreprivate", ""); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row2", ""); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "row2", "moreprivate", ""); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "row1", ""); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "row1", "private", ""); - assertKeyValuePair(iter.next(), M2_FNAME_VALUE, "cf_firstname", "row2", ""); - assertKeyValuePair(iter.next(), M2_FNAME_VALUE, "cf_firstname", "row2", "moreprivate", ""); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "row1", "moreprivate", ""); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row1", ""); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row1", "moreprivate", ""); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row2", ""); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "row2", "moreprivate", ""); - assertKeyValuePair(iter.next(), bytes("mno"), "cf_arr", "row2", ""); - assertKeyValuePair(iter.next(), bytes("mno"), "cf_arr", "row2", "moreprivate", ""); - assertFalse(iter.hasNext()); - - scan.close(); - - scan = conn.createScanner(table.getMetricsTableName(), new Authorizations("private", "moreprivate")); - scan.setRange(new Range()); - - iter = scan.iterator(); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "___card___", "2"); - assertKeyValuePair(iter.next(), AGE_VALUE, "cf_age", "___card___", "private", "2"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___card___", "4"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___first_row___", "row1"); - assertKeyValuePair(iter.next(), Indexer.METRICS_TABLE_ROW_ID.array(), "___rows___", "___last_row___", "row2"); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "___card___", "2"); - assertKeyValuePair(iter.next(), bytes("abc"), "cf_arr", "___card___", "moreprivate", "2"); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "___card___", "1"); - assertKeyValuePair(iter.next(), M1_FNAME_VALUE, "cf_firstname", "___card___", "private", "1"); - assertKeyValuePair(iter.next(), M2_FNAME_VALUE, "cf_firstname", "___card___", "1"); - assertKeyValuePair(iter.next(), M2_FNAME_VALUE, "cf_firstname", "___card___", "moreprivate", "1"); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "___card___", "1"); - assertKeyValuePair(iter.next(), bytes("def"), "cf_arr", "___card___", "moreprivate", "1"); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "___card___", "2"); - assertKeyValuePair(iter.next(), bytes("ghi"), "cf_arr", "___card___", "moreprivate", "2"); - assertKeyValuePair(iter.next(), bytes("mno"), "cf_arr", "___card___", "1"); - assertKeyValuePair(iter.next(), bytes("mno"), "cf_arr", "___card___", "moreprivate", "1"); - assertFalse(iter.hasNext()); - - scan.close(); - } - - private static void assertKeyValuePair(Entry e, byte[] row, String cf, String cq, String value) - { - assertEquals(row, e.getKey().getRow().copyBytes()); - assertEquals(cf, e.getKey().getColumnFamily().toString()); - assertEquals(cq, e.getKey().getColumnQualifier().toString()); - assertEquals(value, e.getValue().toString()); - } - - private static void assertKeyValuePair(Entry e, byte[] row, String cf, String cq, String cv, String value) - { - assertEquals(row, e.getKey().getRow().copyBytes()); - assertEquals(cf, e.getKey().getColumnFamily().toString()); - assertEquals(cq, e.getKey().getColumnQualifier().toString()); - assertEquals(cv, e.getKey().getColumnVisibility().toString()); - assertEquals(value, e.getValue().toString()); - } - - private static byte[] bytes(String s) - { - return s.getBytes(UTF_8); - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestAccumuloSplit.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestAccumuloSplit.java deleted file mode 100644 index 788971b035a4..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestAccumuloSplit.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.google.common.collect.ImmutableList; -import io.airlift.json.JsonCodec; -import org.apache.accumulo.core.data.Range; -import org.testng.annotations.Test; - -import java.util.Optional; -import java.util.stream.Collectors; - -import static org.testng.Assert.assertEquals; - -public class TestAccumuloSplit -{ - private final JsonCodec codec = JsonCodec.jsonCodec(AccumuloSplit.class); - - @Test - public void testJsonRoundTrip() - { - AccumuloSplit expected = new AccumuloSplit( - ImmutableList.of(new Range(), new Range("bar", "foo"), new Range("bar", false, "baz", false)).stream().map(WrappedRange::new).collect(Collectors.toList()), - Optional.of("localhost:9000")); - - String json = codec.toJson(expected); - AccumuloSplit actual = codec.fromJson(json); - assertSplit(actual, expected); - } - - @Test - public void testJsonRoundTripEmptyThings() - { - AccumuloSplit expected = new AccumuloSplit( - ImmutableList.of(), - Optional.empty()); - - String json = codec.toJson(expected); - AccumuloSplit actual = codec.fromJson(json); - assertSplit(actual, expected); - } - - private static void assertSplit(AccumuloSplit actual, AccumuloSplit expected) - { - assertEquals(actual.getAddresses(), expected.getAddresses()); - assertEquals(actual.getHostPort(), expected.getHostPort()); - assertEquals(actual.getRanges(), expected.getRanges()); - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestField.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestField.java deleted file mode 100644 index 081e99d0132a..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestField.java +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.block.Block; -import io.prestosql.spi.type.ArrayType; -import io.prestosql.spi.type.StandardTypes; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.TypeSignatureParameter; -import org.testng.annotations.Test; - -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.GregorianCalendar; - -import static io.prestosql.metadata.MetadataManager.createTestMetadataManager; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.BooleanType.BOOLEAN; -import static io.prestosql.spi.type.DateType.DATE; -import static io.prestosql.spi.type.DoubleType.DOUBLE; -import static io.prestosql.spi.type.IntegerType.INTEGER; -import static io.prestosql.spi.type.RealType.REAL; -import static io.prestosql.spi.type.SmallintType.SMALLINT; -import static io.prestosql.spi.type.TimeType.TIME; -import static io.prestosql.spi.type.TimestampType.TIMESTAMP; -import static io.prestosql.spi.type.TinyintType.TINYINT; -import static io.prestosql.spi.type.VarbinaryType.VARBINARY; -import static io.prestosql.spi.type.VarcharType.VARCHAR; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.testng.Assert.assertEquals; - -public class TestField -{ - @Test(expectedExceptions = NullPointerException.class, expectedExceptionsMessageRegExp = "type is null") - public void testTypeIsNull() - { - new Field(null, null); - } - - @Test - public void testArray() - { - Type type = new ArrayType(VARCHAR); - Block expected = AccumuloRowSerializer.getBlockFromArray(VARCHAR, ImmutableList.of("a", "b", "c")); - Field f1 = new Field(expected, type); - assertEquals(f1.getArray(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "ARRAY ['a','b','c']"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testBoolean() - { - Type type = BOOLEAN; - Field f1 = new Field(true, type); - assertEquals(f1.getBoolean().booleanValue(), true); - assertEquals(f1.getObject(), true); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "true"); - - f1 = new Field(false, type); - assertEquals(f1.getBoolean().booleanValue(), false); - assertEquals(f1.getObject(), false); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "false"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testDate() - { - Type type = DATE; - Date expected = new Date(new GregorianCalendar(1999, 0, 1).getTime().getTime()); - Field f1 = new Field(expected, type); - assertEquals(f1.getDate(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "DATE '1999-01-01'"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testDouble() - { - Type type = DOUBLE; - Double expected = 123.45678; - Field f1 = new Field(expected, type); - assertEquals(f1.getDouble(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "123.45678"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testFloat() - { - Type type = REAL; - Float expected = 123.45678f; - Field f1 = new Field(expected, type); - assertEquals(f1.getFloat(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "123.45678"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testInt() - { - Type type = INTEGER; - Integer expected = 12345678; - Field f1 = new Field(expected, type); - assertEquals(f1.getInt(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "12345678"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testLong() - { - Type type = BIGINT; - Long expected = 12345678L; - Field f1 = new Field(expected, type); - assertEquals(f1.getLong(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "12345678"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testMap() - { - Type type = createTestMetadataManager().getParameterizedType(StandardTypes.MAP, ImmutableList.of( - TypeSignatureParameter.typeParameter(VARCHAR.getTypeSignature()), - TypeSignatureParameter.typeParameter(BIGINT.getTypeSignature()))); - Block expected = AccumuloRowSerializer.getBlockFromMap(type, ImmutableMap.of("a", 1L, "b", 2L, "c", 3L)); - Field f1 = new Field(expected, type); - assertEquals(f1.getMap(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "MAP(ARRAY ['a','b','c'], ARRAY [1,2,3])"); - } - - @Test - public void testSmallInt() - { - Type type = SMALLINT; - Short expected = 12345; - Field f1 = new Field(expected, type); - assertEquals(f1.getShort(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "12345"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testTime() - { - Type type = TIME; - Time expected = new Time(new GregorianCalendar(1999, 0, 1, 12, 30, 0).getTime().getTime()); - Field f1 = new Field(expected, type); - assertEquals(f1.getTime(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "TIME '12:30:00'"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testTimestamp() - { - Type type = TIMESTAMP; - Timestamp expected = new Timestamp(new GregorianCalendar(1999, 0, 1, 12, 30, 0).getTime().getTime()); - Field f1 = new Field(expected, type); - assertEquals(f1.getTimestamp(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "TIMESTAMP '1999-01-01 12:30:00.0'"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testTinyInt() - { - Type type = TINYINT; - Byte expected = 123; - Field f1 = new Field(expected, type); - assertEquals(f1.getByte(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "123"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testVarbinary() - { - Type type = VARBINARY; - byte[] expected = "O'Leary".getBytes(UTF_8); - Field f1 = new Field(expected, type); - assertEquals(f1.getVarbinary(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "CAST('O''Leary' AS VARBINARY)"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } - - @Test - public void testVarchar() - { - Type type = VARCHAR; - String expected = "O'Leary"; - Field f1 = new Field(expected, type); - assertEquals(f1.getVarchar(), expected); - assertEquals(f1.getObject(), expected); - assertEquals(f1.getType(), type); - assertEquals(f1.toString(), "'O''Leary'"); - - Field f2 = new Field(f1); - assertEquals(f2, f1); - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestRow.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestRow.java deleted file mode 100644 index 276c00a4fd63..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestRow.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import com.google.common.collect.ImmutableList; -import io.prestosql.plugin.accumulo.serializers.AccumuloRowSerializer; -import io.prestosql.spi.type.ArrayType; -import org.testng.annotations.Test; - -import java.sql.Date; -import java.sql.Timestamp; -import java.util.GregorianCalendar; -import java.util.Optional; - -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.BooleanType.BOOLEAN; -import static io.prestosql.spi.type.DateType.DATE; -import static io.prestosql.spi.type.DoubleType.DOUBLE; -import static io.prestosql.spi.type.IntegerType.INTEGER; -import static io.prestosql.spi.type.RealType.REAL; -import static io.prestosql.spi.type.SmallintType.SMALLINT; -import static io.prestosql.spi.type.TimeType.TIME; -import static io.prestosql.spi.type.TimestampType.TIMESTAMP; -import static io.prestosql.spi.type.TinyintType.TINYINT; -import static io.prestosql.spi.type.VarbinaryType.VARBINARY; -import static io.prestosql.spi.type.VarcharType.VARCHAR; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.testng.Assert.assertEquals; - -public class TestRow -{ - @Test - public void testRow() - { - Row r1 = new Row(); - r1.addField(new Field(AccumuloRowSerializer.getBlockFromArray(VARCHAR, ImmutableList.of("a", "b", "c")), new ArrayType(VARCHAR))); - r1.addField(true, BOOLEAN); - r1.addField(new Field(new Date(new GregorianCalendar(1999, 0, 1).getTime().getTime()), DATE)); - r1.addField(123.45678, DOUBLE); - r1.addField(new Field(123.45678f, REAL)); - r1.addField(12345678, INTEGER); - r1.addField(new Field(12345678L, BIGINT)); - r1.addField(new Field((short) 12345, SMALLINT)); - r1.addField(new GregorianCalendar(1999, 0, 1, 12, 30, 0).getTime().getTime(), TIME); - r1.addField(new Field(new Timestamp(new GregorianCalendar(1999, 0, 1, 12, 30, 0).getTime().getTime()), TIMESTAMP)); - r1.addField((byte) 123, TINYINT); - r1.addField(new Field("O'Leary".getBytes(UTF_8), VARBINARY)); - r1.addField("O'Leary", VARCHAR); - r1.addField(null, VARCHAR); - - assertEquals(r1.length(), 14); - assertEquals(r1.toString(), "(ARRAY ['a','b','c'],true,DATE '1999-01-01',123.45678,123.45678,12345678,12345678,12345,TIME '12:30:00',TIMESTAMP '1999-01-01 12:30:00.0',123,CAST('O''Leary' AS VARBINARY),'O''Leary',null)"); - - Row r2 = new Row(r1); - assertEquals(r2, r1); - } - - @Test(expectedExceptions = NullPointerException.class, expectedExceptionsMessageRegExp = "type is null") - public void testRowTypeIsNull() - { - Row r1 = new Row(); - r1.addField(VARCHAR, null); - } - - @Test - public void testRowFromString() - { - Row expected = new Row(); - expected.addField(new Field(AccumuloRowSerializer.getBlockFromArray(VARCHAR, ImmutableList.of("a", "b", "c")), new ArrayType(VARCHAR))); - expected.addField(true, BOOLEAN); - expected.addField(new Field(new Date(new GregorianCalendar(1999, 0, 1).getTime().getTime()), DATE)); - expected.addField(123.45678, DOUBLE); - expected.addField(new Field(123.45678f, REAL)); - expected.addField(12345678, INTEGER); - expected.addField(new Field(12345678L, BIGINT)); - expected.addField(new Field((short) 12345, SMALLINT)); - expected.addField(new GregorianCalendar(1999, 0, 1, 12, 30, 0).getTime().getTime(), TIME); - expected.addField(new Field(new Timestamp(new GregorianCalendar(1999, 0, 1, 12, 30, 0).getTime().getTime()), TIMESTAMP)); - expected.addField((byte) 123, TINYINT); - expected.addField(new Field("O'Leary".getBytes(UTF_8), VARBINARY)); - expected.addField("O'Leary", VARCHAR); - expected.addField(null, VARCHAR); - - RowSchema schema = new RowSchema(); - schema.addRowId("a", new ArrayType(VARCHAR)); - schema.addColumn("b", Optional.of("b"), Optional.of("b"), BOOLEAN); - schema.addColumn("c", Optional.of("c"), Optional.of("c"), DATE); - schema.addColumn("d", Optional.of("d"), Optional.of("d"), DOUBLE); - schema.addColumn("e", Optional.of("e"), Optional.of("e"), REAL); - schema.addColumn("f", Optional.of("f"), Optional.of("f"), INTEGER); - schema.addColumn("g", Optional.of("g"), Optional.of("g"), BIGINT); - schema.addColumn("h", Optional.of("h"), Optional.of("h"), SMALLINT); - schema.addColumn("i", Optional.of("i"), Optional.of("i"), TIME); - schema.addColumn("j", Optional.of("j"), Optional.of("j"), TIMESTAMP); - schema.addColumn("k", Optional.of("k"), Optional.of("k"), TINYINT); - schema.addColumn("l", Optional.of("l"), Optional.of("l"), VARBINARY); - schema.addColumn("m", Optional.of("m"), Optional.of("m"), VARCHAR); - schema.addColumn("n", Optional.of("n"), Optional.of("n"), VARCHAR); - - Row actual = Row.fromString(schema, "a,b,c|true|1999-01-01|123.45678|123.45678|12345678|12345678|12345|12:30:00|1999-01-01 12:30:00.0|123|O'Leary|O'Leary|", '|'); - assertEquals(actual, expected); - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestWrappedRange.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestWrappedRange.java deleted file mode 100644 index 900ab44b9d9d..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/model/TestWrappedRange.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.model; - -import org.apache.accumulo.core.data.Range; -import org.testng.annotations.Test; - -import static org.testng.Assert.assertEquals; - -public class TestWrappedRange -{ - @Test - public void testJsonRoundTrip() - throws Exception - { - Range exact = new Range("foo"); - Range range = new Range("bar", "foo"); - Range exclusiveRange = new Range("asiago", false, "bagel", false); - - assertEquals(WrappedRange.fromBytes(new WrappedRange(exact).toBytes()).getRange(), exact); - assertEquals(WrappedRange.fromBytes(new WrappedRange(range).toBytes()).getRange(), range); - assertEquals(WrappedRange.fromBytes(new WrappedRange(exclusiveRange).toBytes()).getRange(), exclusiveRange); - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/AbstractTestAccumuloRowSerializer.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/AbstractTestAccumuloRowSerializer.java deleted file mode 100644 index aeb639d8750d..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/AbstractTestAccumuloRowSerializer.java +++ /dev/null @@ -1,316 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.serializers; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import io.prestosql.spi.type.ArrayType; -import io.prestosql.spi.type.StandardTypes; -import io.prestosql.spi.type.Type; -import io.prestosql.spi.type.TypeSignatureParameter; -import org.apache.accumulo.core.data.Key; -import org.apache.accumulo.core.data.Mutation; -import org.apache.accumulo.core.data.Value; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.testng.annotations.Test; - -import java.sql.Date; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.AbstractMap.SimpleImmutableEntry; -import java.util.List; -import java.util.Map; -import java.util.UUID; - -import static io.prestosql.metadata.MetadataManager.createTestMetadataManager; -import static io.prestosql.spi.type.BigintType.BIGINT; -import static io.prestosql.spi.type.BooleanType.BOOLEAN; -import static io.prestosql.spi.type.DateType.DATE; -import static io.prestosql.spi.type.DoubleType.DOUBLE; -import static io.prestosql.spi.type.IntegerType.INTEGER; -import static io.prestosql.spi.type.RealType.REAL; -import static io.prestosql.spi.type.SmallintType.SMALLINT; -import static io.prestosql.spi.type.TimeType.TIME; -import static io.prestosql.spi.type.TimestampType.TIMESTAMP; -import static io.prestosql.spi.type.TinyintType.TINYINT; -import static io.prestosql.spi.type.VarbinaryType.VARBINARY; -import static io.prestosql.spi.type.VarcharType.VARCHAR; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.testng.Assert.assertEquals; - -public abstract class AbstractTestAccumuloRowSerializer -{ - private final Class serializerClass; - private static final String COLUMN_NAME = "foo"; - - protected AbstractTestAccumuloRowSerializer(Class serializerClass) - { - this.serializerClass = serializerClass; - } - - @Test - public void testArray() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = new ArrayType(VARCHAR); - List expected = ImmutableList.of("a", "b", "c"); - byte[] data = serializer.encode(type, AccumuloRowSerializer.getBlockFromArray(VARCHAR, expected)); - List actual = serializer.decode(type, data); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = AccumuloRowSerializer.getArrayFromBlock(VARCHAR, serializer.getArray(COLUMN_NAME, type)); - assertEquals(actual, expected); - } - - @Test - public void testBoolean() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = BOOLEAN; - byte[] data = serializer.encode(type, true); - boolean actual = serializer.decode(type, data); - assertEquals(actual, true); - - deserializeData(serializer, data); - actual = serializer.getBoolean(COLUMN_NAME); - assertEquals(actual, true); - - data = serializer.encode(type, false); - actual = serializer.decode(type, data); - assertEquals(actual, false); - - deserializeData(serializer, data); - actual = serializer.getBoolean(COLUMN_NAME); - assertEquals(actual, false); - } - - @Test - public void testDate() - throws Exception - { - Date expected = new Date(new DateTime(2001, 2, 3, 4, 5, 6, DateTimeZone.UTC).getMillis()); - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - byte[] data = serializer.encode(DATE, expected); - - deserializeData(serializer, data); - Date actual = serializer.getDate(COLUMN_NAME); - - // Convert milliseconds to days so they can be compared regardless of the time of day - assertEquals(MILLISECONDS.toDays(actual.getTime()), MILLISECONDS.toDays(expected.getTime())); - } - - @Test - public void testDouble() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = DOUBLE; - Double expected = 123.45678; - byte[] data = serializer.encode(type, expected); - Double actual = serializer.decode(type, data); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getDouble(COLUMN_NAME); - assertEquals(actual, expected); - } - - @Test - public void testFloat() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = REAL; - Float expected = 123.45678f; - byte[] data = serializer.encode(type, expected); - Float actual = ((Double) serializer.decode(type, data)).floatValue(); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getFloat(COLUMN_NAME); - assertEquals(actual, expected); - } - - @Test - public void testInt() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = INTEGER; - Integer expected = 123456; - byte[] data = serializer.encode(type, expected); - @SuppressWarnings("unchecked") - Integer actual = ((Long) serializer.decode(type, data)).intValue(); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getInt(COLUMN_NAME); - assertEquals(actual, expected); - } - - @Test - public void testLong() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = BIGINT; - Long expected = 123456L; - byte[] data = serializer.encode(type, expected); - Long actual = serializer.decode(type, data); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getLong(COLUMN_NAME); - assertEquals(actual, expected); - } - - @Test - public void testMap() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = createTestMetadataManager().getParameterizedType(StandardTypes.MAP, ImmutableList.of( - TypeSignatureParameter.typeParameter(VARCHAR.getTypeSignature()), - TypeSignatureParameter.typeParameter(BIGINT.getTypeSignature()))); - Map expected = ImmutableMap.of("a", 1L, "b", 2L, "3", 3L); - byte[] data = serializer.encode(type, AccumuloRowSerializer.getBlockFromMap(type, expected)); - Map actual = serializer.decode(type, data); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = AccumuloRowSerializer.getMapFromBlock(type, serializer.getMap(COLUMN_NAME, type)); - assertEquals(actual, expected); - } - - @Test - public void testSmallInt() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = SMALLINT; - Short expected = 12345; - byte[] data = serializer.encode(type, expected); - Short actual = ((Long) serializer.decode(type, data)).shortValue(); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getShort(COLUMN_NAME); - assertEquals(actual, expected); - } - - @Test - public void testTime() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = TIME; - Time expected = new Time(new java.util.Date().getTime()); - byte[] data = serializer.encode(type, expected); - Time actual = new Time(serializer.decode(type, data)); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getTime(COLUMN_NAME); - assertEquals(actual, expected); - } - - @Test - public void testTimestamp() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = TIMESTAMP; - Timestamp expected = new Timestamp(new java.util.Date().getTime()); - byte[] data = serializer.encode(type, expected); - Timestamp actual = new Timestamp(serializer.decode(type, data)); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getTimestamp(COLUMN_NAME); - assertEquals(actual, expected); - } - - @Test - public void testTinyInt() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = TINYINT; - Byte expected = 123; - byte[] data = serializer.encode(type, expected); - Byte actual = ((Long) serializer.decode(type, data)).byteValue(); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getByte(COLUMN_NAME); - assertEquals(actual, expected); - } - - @Test - public void testVarbinary() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = VARBINARY; - byte[] expected = b(UUID.randomUUID().toString()); - byte[] data = serializer.encode(type, expected); - byte[] actual = serializer.decode(type, data); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getVarbinary(COLUMN_NAME); - assertEquals(actual, expected); - } - - @Test - public void testVarchar() - throws Exception - { - AccumuloRowSerializer serializer = serializerClass.getConstructor().newInstance(); - Type type = VARCHAR; - String expected = UUID.randomUUID().toString(); - byte[] data = serializer.encode(type, expected); - String actual = serializer.decode(type, data); - assertEquals(actual, expected); - - deserializeData(serializer, data); - actual = serializer.getVarchar(COLUMN_NAME); - assertEquals(actual, expected); - } - - protected void deserializeData(AccumuloRowSerializer serializer, byte[] data) - { - Mutation m = new Mutation("row"); - m.put(b("a"), b("a"), data); - Key key = new Key(b("row"), b("a"), b("b"), b(), 0, false); - Value value = new Value(data); - serializer.setMapping(COLUMN_NAME, "a", "b"); - serializer.deserialize(new SimpleImmutableEntry<>(key, value)); - } - - protected static byte[] b(String str) - { - return str.getBytes(UTF_8); - } - - protected static byte[] b() - { - return new byte[0]; - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/TestLexicoderRowSerializer.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/TestLexicoderRowSerializer.java deleted file mode 100644 index b2e0b9424f79..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/TestLexicoderRowSerializer.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.serializers; - -public class TestLexicoderRowSerializer - extends AbstractTestAccumuloRowSerializer -{ - public TestLexicoderRowSerializer() - { - super(LexicoderRowSerializer.class); - } -} diff --git a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/TestStringRowSerializer.java b/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/TestStringRowSerializer.java deleted file mode 100644 index 1cf5cb93e832..000000000000 --- a/presto-accumulo/src/test/java/io/prestosql/plugin/accumulo/serializers/TestStringRowSerializer.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.prestosql.plugin.accumulo.serializers; - -public class TestStringRowSerializer - extends AbstractTestAccumuloRowSerializer -{ - public TestStringRowSerializer() - { - super(StringRowSerializer.class); - } - - @Override - public void testArray() - { - // Arrays are not supported by StringRowSerializer - } - - @Override - public void testMap() - { - // Maps are not supported by StringRowSerializer - } -} diff --git a/presto-server/src/main/provisio/presto.xml b/presto-server/src/main/provisio/presto.xml index 93104ef0e231..b55429a20c7f 100644 --- a/presto-server/src/main/provisio/presto.xml +++ b/presto-server/src/main/provisio/presto.xml @@ -194,12 +194,6 @@ - - - - - -