|
87 | 87 | import org.apache.hadoop.hive.common.StatsSetupConst.StatDB; |
88 | 88 | import org.apache.hadoop.hive.common.StringInternUtils; |
89 | 89 | import org.apache.hadoop.hive.common.TableName; |
90 | | -import org.apache.hadoop.hive.common.ValidTxnList; |
91 | 90 | import org.apache.hadoop.hive.common.ValidTxnWriteIdList; |
92 | 91 | import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; |
93 | 92 | import org.apache.hadoop.hive.conf.Constants; |
@@ -363,7 +362,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { |
363 | 362 | private final Map<TableScanOperator, Map<String, ExprNodeDesc>> opToPartToSkewedPruner; |
364 | 363 | private Map<SelectOperator, Table> viewProjectToTableSchema; |
365 | 364 | private Operator<? extends OperatorDesc> sinkOp; |
366 | | - private final CacheTableHelper cacheTableHelper = new CacheTableHelper(); |
367 | 365 |
|
368 | 366 | /** |
369 | 367 | * a map for the split sampling, from alias to an instance of SplitSample |
@@ -2883,8 +2881,7 @@ private void replaceViewReferenceWithDefinition(QB qb, Table tab, |
2883 | 2881 | String viewText = tab.getViewExpandedText(); |
2884 | 2882 | TableMask viewMask = new TableMask(this, conf, false); |
2885 | 2883 | viewTree = ParseUtils.parse(viewText, ctx, tab.getCompleteName()); |
2886 | | - cacheTableHelper.populateCacheForView(ctx.getParsedTables(), conf, |
2887 | | - getTxnMgr(), tab.getDbName(), tab.getTableName()); |
| 2884 | + |
2888 | 2885 | if (viewMask.isEnabled() && analyzeRewrite == null) { |
2889 | 2886 | ParseResult parseResult = rewriteASTWithMaskAndFilter(viewMask, viewTree, |
2890 | 2887 | ctx.getViewTokenRewriteStream(viewFullyQualifiedName), |
@@ -13163,15 +13160,21 @@ void analyzeInternal(ASTNode ast, Supplier<PlannerContext> pcf) throws SemanticE |
13163 | 13160 | perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.GENERATE_RESOLVED_PARSETREE); |
13164 | 13161 | // 1. Generate Resolved Parse tree from syntax tree |
13165 | 13162 | boolean needsTransform = needsTransform(); |
13166 | | - //change the location of position alias process here |
| 13163 | + // change the location of position alias process here |
13167 | 13164 | processPositionAlias(ast); |
13168 | | - cacheTableHelper.populateCache(ctx.getParsedTables(), conf, getTxnMgr()); |
13169 | 13165 | PlannerContext plannerCtx = pcf.get(); |
13170 | 13166 | if (!genResolvedParseTree(ast, plannerCtx)) { |
13171 | 13167 | return; |
13172 | 13168 | } |
13173 | | - if (tablesFromReadEntities(inputs).stream().anyMatch(AcidUtils::isTransactionalTable)) { |
13174 | | - queryState.getValidTxnList(); |
| 13169 | + |
| 13170 | + if (queryState.getHMSCache() != null) { |
| 13171 | + // this step primes the cache containing the validTxnWriteIdList. It will fetch |
| 13172 | + // all the tables into the MetaStore Client cache with one HMS call. |
| 13173 | + getQueryValidTxnWriteIdList(); |
| 13174 | + } else { |
| 13175 | + if (tablesFromReadEntities(inputs).stream().anyMatch(AcidUtils::isTransactionalTable)) { |
| 13176 | + queryState.getValidTxnList(); |
| 13177 | + } |
13175 | 13178 | } |
13176 | 13179 |
|
13177 | 13180 | if (HiveConf.getBoolVar(conf, ConfVars.HIVE_REMOVE_ORDERBY_IN_SUBQUERY)) { |
@@ -15166,30 +15169,25 @@ private String getQueryStringForCache(ASTNode ast) { |
15166 | 15169 |
|
15167 | 15170 | private ValidTxnWriteIdList getQueryValidTxnWriteIdList() throws SemanticException { |
15168 | 15171 | // TODO: Once HIVE-18948 is in, should be able to retrieve writeIdList from the conf. |
15169 | | - //cachedWriteIdList = AcidUtils.getValidTxnWriteIdList(conf); |
| 15172 | + // cachedWriteIdList = AcidUtils.getValidTxnWriteIdList(conf); |
15170 | 15173 | // |
15171 | | - List<String> transactionalTables = tablesFromReadEntities(inputs) |
15172 | | - .stream() |
15173 | | - .filter(AcidUtils::isTransactionalTable) |
15174 | | - .map(Table::getFullyQualifiedName) |
15175 | | - .collect(Collectors.toList()); |
15176 | | - |
15177 | | - if (transactionalTables.size() > 0) { |
15178 | | - String txnString = queryState.getValidTxnList(); |
15179 | | - if (txnString == null) { |
15180 | | - return null; |
15181 | | - } |
15182 | | - try { |
15183 | | - return getTxnMgr().getValidWriteIds(transactionalTables, txnString); |
15184 | | - } catch (Exception err) { |
15185 | | - String msg = "Error while getting the txnWriteIdList for tables " + transactionalTables |
15186 | | - + " and validTxnList " + conf.get(ValidTxnList.VALID_TXNS_KEY); |
15187 | | - throw new SemanticException(msg, err); |
15188 | | - } |
15189 | | - } |
| 15174 | + var transactionalTables = tablesFromReadEntities(inputs) |
| 15175 | + .stream() |
| 15176 | + .filter(AcidUtils::isTransactionalTable) |
| 15177 | + .map(Table::getFullyQualifiedName) |
| 15178 | + .toList(); |
15190 | 15179 |
|
15191 | | - // No transactional tables. |
15192 | | - return null; |
| 15180 | + if (transactionalTables.isEmpty()) { |
| 15181 | + return null; |
| 15182 | + } |
| 15183 | + String txnString = queryState.getValidTxnList(); |
| 15184 | + try { |
| 15185 | + return getTxnMgr().getValidWriteIds(transactionalTables, txnString); |
| 15186 | + } catch (Exception err) { |
| 15187 | + String msg = "Error while getting the txnWriteIdList for tables " + transactionalTables |
| 15188 | + + " and validTxnList " + txnString; |
| 15189 | + throw new SemanticException(msg, err); |
| 15190 | + } |
15193 | 15191 | } |
15194 | 15192 |
|
15195 | 15193 | private QueryResultsCache.LookupInfo createLookupInfoForQuery(ASTNode astNode) throws SemanticException { |
|
0 commit comments