Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -35,25 +35,27 @@ default boolean hasBuiltInCaching()
*/
HiveMetastore createMetastore(Optional<ConnectorIdentity> identity);

static HiveMetastoreFactory ofInstance(HiveMetastore metastore)
static HiveMetastoreFactory ofInstance(HiveMetastore metastore, boolean impersonationEnabled)
{
return new StaticHiveMetastoreFactory(metastore);
return new StaticHiveMetastoreFactory(metastore, impersonationEnabled);
}

class StaticHiveMetastoreFactory
implements HiveMetastoreFactory
{
private final HiveMetastore metastore;
private final boolean impersonationEnabled;

private StaticHiveMetastoreFactory(HiveMetastore metastore)
private StaticHiveMetastoreFactory(HiveMetastore metastore, boolean impersonationEnabled)
{
this.metastore = requireNonNull(metastore, "metastore is null");
this.impersonationEnabled = impersonationEnabled;
}

@Override
public boolean isImpersonationEnabled()
{
return false;
return impersonationEnabled;
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ public Stream<AddFileEntry> getActiveFiles(
newDirectExecutorService());

TransactionLogReaderFactory transactionLogReaderFactory = new FileSystemTransactionLogReaderFactory(fileSystemFactory);
HiveMetastoreFactory hiveMetastoreFactory = HiveMetastoreFactory.ofInstance(createTestingFileHiveMetastore(new MemoryFileSystemFactory(), Location.of("memory:///")));
HiveMetastoreFactory hiveMetastoreFactory = HiveMetastoreFactory.ofInstance(createTestingFileHiveMetastore(new MemoryFileSystemFactory(), Location.of("memory:///")), false);
DeltaLakeMetadataFactory metadataFactory = new DeltaLakeMetadataFactory(
hiveMetastoreFactory,
new DefaultDeltaLakeFileSystemFactory(hdfsFileSystemFactory, new NoOpVendedCredentialsProvider()),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ public TestingDeltaLakeMetastoreModule(HiveMetastore metastore)
@Override
public void setup(Binder binder)
{
binder.bind(HiveMetastoreFactory.class).annotatedWith(RawHiveMetastoreFactory.class).toInstance(HiveMetastoreFactory.ofInstance(metastore));
binder.bind(HiveMetastoreFactory.class).annotatedWith(RawHiveMetastoreFactory.class).toInstance(HiveMetastoreFactory.ofInstance(metastore, false));
install(new CachingHiveMetastoreModule());
binder.bind(DeltaLakeTableOperationsProvider.class).to(DeltaLakeFileMetastoreTableOperationsProvider.class).in(Scopes.SINGLETON);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public String getName()
public Connector create(String catalogName, Map<String, String> config, ConnectorContext context)
{
checkStrictSpiVersionMatch(context, this);
return createConnector(catalogName, config, context, EMPTY_MODULE, Optional.empty(), Optional.empty());
return createConnector(catalogName, config, context, EMPTY_MODULE, Optional.empty(), false, Optional.empty());
}

public static Connector createConnector(
Expand All @@ -80,6 +80,7 @@ public static Connector createConnector(
ConnectorContext context,
Module module,
Optional<HiveMetastore> metastore,
boolean metastoreImpersonationEnabled,
Optional<TrinoFileSystemFactory> fileSystemFactory)
{
ClassLoader classLoader = HiveConnectorFactory.class.getClassLoader();
Expand All @@ -91,7 +92,7 @@ public static Connector createConnector(
new JsonModule(),
new TypeDeserializerModule(),
new HiveModule(),
new HiveMetastoreModule(metastore),
new HiveMetastoreModule(metastore, metastoreImpersonationEnabled),
new HiveSecurityModule(),
fileSystemFactory
.map(factory -> (Module) binder -> binder.bind(TrinoFileSystemFactory.class).toInstance(factory))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,17 +30,19 @@ public class HiveMetastoreModule
extends AbstractConfigurationAwareModule
{
private final Optional<HiveMetastore> metastore;
private final boolean impersonationEnabled;

public HiveMetastoreModule(Optional<HiveMetastore> metastore)
public HiveMetastoreModule(Optional<HiveMetastore> metastore, boolean impersonationEnabled)
{
this.metastore = metastore;
this.impersonationEnabled = impersonationEnabled;
}

@Override
protected void setup(Binder binder)
{
if (metastore.isPresent()) {
binder.bind(HiveMetastoreFactory.class).annotatedWith(RawHiveMetastoreFactory.class).toInstance(HiveMetastoreFactory.ofInstance(metastore.get()));
binder.bind(HiveMetastoreFactory.class).annotatedWith(RawHiveMetastoreFactory.class).toInstance(HiveMetastoreFactory.ofInstance(metastore.get(), impersonationEnabled));
binder.bind(Key.get(boolean.class, AllowHiveTableRename.class)).toInstance(true);
}
else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import io.trino.metastore.HiveMetastoreFactory;
import io.trino.metastore.Table;
import io.trino.metastore.cache.CachingHiveMetastore;
import io.trino.metastore.cache.SharedHiveMetastoreCache.ImpersonationCachingHiveMetastoreFactory;
import io.trino.plugin.hive.HiveErrorCode;
import io.trino.plugin.hive.fs.DirectoryLister;
import io.trino.plugin.hive.metastore.glue.GlueCache;
Expand Down Expand Up @@ -133,6 +134,18 @@ public void flushMetadataCache(
}

private void doFlushMetadataCache(ConnectorSession session, Optional<String> schemaName, Optional<String> tableName, List<String> partitionColumns, List<String> partitionValues)
{
if (hiveMetadataFactory instanceof ImpersonationCachingHiveMetastoreFactory impersonationCachingHiveMetastoreFactory) {
checkState(cachingHiveMetastore.isEmpty(), "CachingHiveMetastore should not be set when using ImpersonationCachingHiveMetastoreFactory");
Optional<CachingHiveMetastore> impersonationCachingHiveMetastore = Optional.of((CachingHiveMetastore) impersonationCachingHiveMetastoreFactory.createMetastore(Optional.of(session.getIdentity())));
doFlushMetadataCache(session, impersonationCachingHiveMetastore, schemaName, tableName, partitionColumns, partitionValues);
}
else {
doFlushMetadataCache(session, cachingHiveMetastore, schemaName, tableName, partitionColumns, partitionValues);
}
}

private void doFlushMetadataCache(ConnectorSession session, Optional<CachingHiveMetastore> cachingHiveMetastore, Optional<String> schemaName, Optional<String> tableName, List<String> partitionColumns, List<String> partitionValues)
{
if (cachingHiveMetastore.isEmpty() && glueCache.isEmpty()) {
// TODO this currently does not work. CachingHiveMetastore is always bound for metastores other than Glue, even when caching is disabled,
Expand All @@ -156,13 +169,13 @@ else if (schemaName.isPresent() && tableName.isPresent()) {
List<String> partitions;

if (!partitionColumns.isEmpty()) {
cachingHiveMetastore.ifPresent(cachingHiveMetastore -> cachingHiveMetastore.flushPartitionCache(schemaName.get(), tableName.get(), partitionColumns, partitionValues));
cachingHiveMetastore.ifPresent(hiveMetastore -> hiveMetastore.flushPartitionCache(schemaName.get(), tableName.get(), partitionColumns, partitionValues));
glueCache.ifPresent(glueCache -> glueCache.invalidatePartition(schemaName.get(), tableName.get(), new PartitionName(partitionValues)));

partitions = ImmutableList.of(makePartName(partitionColumns, partitionValues));
}
else {
cachingHiveMetastore.ifPresent(cachingHiveMetastore -> cachingHiveMetastore.invalidateTable(schemaName.get(), tableName.get()));
cachingHiveMetastore.ifPresent(hiveMetastore -> hiveMetastore.invalidateTable(schemaName.get(), tableName.get()));
glueCache.ifPresent(glueCache -> glueCache.invalidateTable(schemaName.get(), tableName.get(), true));

List<String> partitionColumnNames = table.getPartitionColumns().stream()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@

import static io.airlift.log.Level.WARN;
import static io.airlift.units.Duration.nanosSince;
import static io.trino.plugin.hive.HiveTestUtils.SESSION;
import static io.trino.plugin.hive.TestingHiveUtils.getConnectorService;
import static io.trino.plugin.tpch.ColumnNaming.SIMPLIFIED;
import static io.trino.plugin.tpch.DecimalTypeMapping.DOUBLE;
Expand Down Expand Up @@ -101,6 +102,7 @@ public static class Builder<SELF extends Builder<?>>
private List<TpchTable<?>> initialTables = ImmutableList.of();
private Optional<String> initialSchemasLocationBase = Optional.empty();
private Optional<Function<DistributedQueryRunner, HiveMetastore>> metastore = Optional.empty();
private boolean metastoreImpersonationEnabled;
private boolean tpcdsCatalogEnabled;
private boolean tpchBucketedCatalogEnabled;
private boolean createTpchSchemas = true;
Expand Down Expand Up @@ -161,6 +163,13 @@ public SELF setMetastore(Function<DistributedQueryRunner, HiveMetastore> metasto
return self();
}

@CanIgnoreReturnValue
public SELF setMetastoreImpersonationEnabled(boolean metastoreImpersonationEnabled)
{
this.metastoreImpersonationEnabled = metastoreImpersonationEnabled;
return self();
}

@CanIgnoreReturnValue
public SELF setTpcdsCatalogEnabled(boolean tpcdsCatalogEnabled)
{
Expand Down Expand Up @@ -236,7 +245,7 @@ public DistributedQueryRunner build()
hiveProperties.put("fs.hadoop.enabled", "true");
}

queryRunner.installPlugin(new TestingHivePlugin(dataDir, metastore, decryptionKeyRetriever));
queryRunner.installPlugin(new TestingHivePlugin(dataDir, metastore, metastoreImpersonationEnabled, decryptionKeyRetriever));

Map<String, String> hiveProperties = new HashMap<>();
if (!skipTimezoneSetup) {
Expand Down Expand Up @@ -280,7 +289,7 @@ public DistributedQueryRunner build()
private void populateData(QueryRunner queryRunner)
{
HiveMetastore metastore = getConnectorService(queryRunner, HiveMetastoreFactory.class)
.createMetastore(Optional.empty());
.createMetastore(Optional.of(SESSION.getIdentity()));
if (metastore.getDatabase(TPCH_SCHEMA).isEmpty()) {
metastore.createDatabase(createDatabaseMetastoreObject(TPCH_SCHEMA, initialSchemasLocationBase));
copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, initialTables);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ private static ConnectorPageSink createPageSink(
getDefaultHiveFileWriterFactories(config, fileSystemFactory),
HDFS_FILE_SYSTEM_FACTORY,
PAGE_SORTER,
HiveMetastoreFactory.ofInstance(metastore),
HiveMetastoreFactory.ofInstance(metastore, false),
new GroupByHashPageIndexerFactory(new FlatHashStrategyCompiler(new TypeOperators())),
TESTING_TYPE_MANAGER,
config,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.trino.plugin.hive;

import com.google.common.collect.ImmutableMap;
import io.trino.Session;
import io.trino.plugin.hive.containers.Hive3MinioDataLake;
import io.trino.plugin.hive.containers.HiveMinioDataLake;
import io.trino.plugin.hive.s3.S3HiveQueryRunner;
import io.trino.spi.security.Identity;
import io.trino.testing.AbstractTestQueryFramework;
import io.trino.testing.QueryRunner;
import io.trino.testing.sql.TestTable;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;

import java.util.List;

import static io.trino.plugin.hive.containers.HiveHadoop.HIVE3_IMAGE;
import static io.trino.testing.TestingNames.randomNameSuffix;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS;

@TestInstance(PER_CLASS)
final class TestThriftMetastoreImpersonation
extends AbstractTestQueryFramework
{
@Override
protected QueryRunner createQueryRunner()
throws Exception
{
HiveMinioDataLake hiveMinioDataLake = closeAfterClass(new Hive3MinioDataLake("test-thrift-impersonation-" + randomNameSuffix(), HIVE3_IMAGE));
hiveMinioDataLake.start();
return S3HiveQueryRunner.builder(hiveMinioDataLake)
.setMetastoreImpersonationEnabled(true)
.setHiveProperties(ImmutableMap.<String, String>builder()
.put("hive.security", "allow-all")
.put("hive.metastore-cache-ttl", "1d")
.put("hive.user-metastore-cache-ttl", "1d")
.buildOrThrow())
.build();
}

@Test
void testFlushMetadataCache()
{
Session alice = Session.builder(getSession()).setIdentity(Identity.ofUser("alice")).build();

try (TestTable table = newTrinoTable("test_partition", "(id int, part int) WITH (partitioned_by = ARRAY['part'])", List.of("1, 10"))) {
assertThat(computeScalar(alice, "SELECT count(1) FROM \"" + table.getName() + "$partitions\""))
.isEqualTo(1L);

assertUpdate("INSERT INTO " + table.getName() + " VALUES (2, 20)", 1);
assertThat(computeScalar(alice, "SELECT count(1) FROM \"" + table.getName() + "$partitions\""))
.isEqualTo(1L);

assertUpdate(alice, "CALL system.flush_metadata_cache(schema_name => CURRENT_SCHEMA, table_name => '" + table.getName() + "')");
assertThat(computeScalar(alice, "SELECT count(1) FROM \"" + table.getName() + "$partitions\""))
.isEqualTo(2L);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -38,17 +38,23 @@ public class TestingHiveConnectorFactory
implements ConnectorFactory
{
private final Optional<HiveMetastore> metastore;
private final boolean metastoreImpersonationEnabled;
private final Module module;

public TestingHiveConnectorFactory(Path localFileSystemRootPath)
{
this(localFileSystemRootPath, Optional.empty(), Optional.empty());
this(localFileSystemRootPath, Optional.empty(), false, Optional.empty());
}

@Deprecated
public TestingHiveConnectorFactory(Path localFileSystemRootPath, Optional<HiveMetastore> metastore, Optional<DecryptionKeyRetriever> decryptionKeyRetriever)
public TestingHiveConnectorFactory(
Path localFileSystemRootPath,
Optional<HiveMetastore> metastore,
boolean metastoreImpersonationEnabled,
Optional<DecryptionKeyRetriever> decryptionKeyRetriever)
{
this.metastore = requireNonNull(metastore, "metastore is null");
this.metastoreImpersonationEnabled = metastoreImpersonationEnabled;

boolean ignored = localFileSystemRootPath.toFile().mkdirs();
this.module = binder -> {
Expand Down Expand Up @@ -79,6 +85,6 @@ public Connector create(String catalogName, Map<String, String> config, Connecto
if (metastore.isEmpty() && !config.containsKey("hive.metastore")) {
configBuilder.put("hive.metastore", "file");
}
return createConnector(catalogName, configBuilder.buildOrThrow(), context, module, metastore, Optional.empty());
return createConnector(catalogName, configBuilder.buildOrThrow(), context, module, metastore, metastoreImpersonationEnabled, Optional.empty());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,30 +29,36 @@ public class TestingHivePlugin
{
private final Path localFileSystemRootPath;
private final Optional<HiveMetastore> metastore;
private final boolean metastoreImpersonationEnabled;
private final Optional<DecryptionKeyRetriever> decryptionKeyRetriever;

public TestingHivePlugin(Path localFileSystemRootPath)
{
this(localFileSystemRootPath, Optional.empty(), Optional.empty());
this(localFileSystemRootPath, Optional.empty(), false, Optional.empty());
}

@Deprecated
public TestingHivePlugin(Path localFileSystemRootPath, HiveMetastore metastore)
{
this(localFileSystemRootPath, Optional.of(metastore), Optional.empty());
this(localFileSystemRootPath, Optional.of(metastore), false, Optional.empty());
}

@Deprecated
public TestingHivePlugin(Path localFileSystemRootPath, Optional<HiveMetastore> metastore, Optional<DecryptionKeyRetriever> decryptionKeyRetriever)
public TestingHivePlugin(
Path localFileSystemRootPath,
Optional<HiveMetastore> metastore,
boolean metastoreImpersonationEnabled,
Optional<DecryptionKeyRetriever> decryptionKeyRetriever)
{
this.localFileSystemRootPath = requireNonNull(localFileSystemRootPath, "localFileSystemRootPath is null");
this.metastore = requireNonNull(metastore, "metastore is null");
this.metastoreImpersonationEnabled = metastoreImpersonationEnabled;
this.decryptionKeyRetriever = requireNonNull(decryptionKeyRetriever, "decryptionKeyRetriever is null");
}

@Override
public Iterable<ConnectorFactory> getConnectorFactories()
{
return ImmutableList.of(new TestingHiveConnectorFactory(localFileSystemRootPath, metastore, decryptionKeyRetriever));
return ImmutableList.of(new TestingHiveConnectorFactory(localFileSystemRootPath, metastore, metastoreImpersonationEnabled, decryptionKeyRetriever));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
import static io.trino.plugin.base.security.UserNameProvider.SIMPLE_USER_NAME_PROVIDER;
import static io.trino.plugin.hive.HiveTestUtils.HDFS_ENVIRONMENT;
import static io.trino.plugin.hive.HiveTestUtils.HDFS_FILE_SYSTEM_STATS;
import static io.trino.plugin.hive.HiveTestUtils.SESSION;
import static io.trino.plugin.hive.metastore.thrift.TestingTokenAwareMetastoreClientFactory.TIMEOUT;
import static java.util.Objects.requireNonNull;
import static java.util.concurrent.Executors.newFixedThreadPool;
Expand Down Expand Up @@ -108,6 +109,6 @@ public ThriftMetastore build(Consumer<AutoCloseable> registerResource)
thriftMetastoreConfig,
fileSystemFactory,
executorService);
return metastoreFactory.createMetastore(Optional.empty());
return metastoreFactory.createMetastore(Optional.of(SESSION.getIdentity()));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ public static Connector createConnector(
new MBeanModule(),
new JsonModule(),
new HudiModule(),
new HiveMetastoreModule(Optional.empty()),
new HiveMetastoreModule(Optional.empty(), false),
new FileSystemModule(catalogName, context, false),
new MBeanServerModule(),
module.orElse(EMPTY_MODULE),
Expand Down
Loading