-
Notifications
You must be signed in to change notification settings - Fork 4.8k
HIVE-29191: Gracefully Handle Removed Configuration Properties in Hiv… #6073
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -24,6 +24,7 @@ | |
|
||
import static org.apache.hadoop.hive.conf.SystemVariables.*; | ||
|
||
import java.util.HashSet; | ||
import java.util.Map; | ||
import java.util.Properties; | ||
import java.util.Set; | ||
|
@@ -55,19 +56,30 @@ public class SetProcessor implements CommandProcessor { | |
private static final SessionState.LogHelper console = SessionState.getConsole(); | ||
|
||
private static final String prefix = "set: "; | ||
private static final Set<String> removedConfigs = | ||
private static final Set<String> removedHiveConfigs = | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I feel that maintaining all removed configs in the code base does not make much sense and beats the purpose of deprecating and removing a property. If every time that we remove a property we had to add it here and continue maintaining to some extend then why remove it in the first place. We could keep the property and just remove all references to it which also doesn't make sense. Moreover, as history has shown we don't enrich this I would propose to drop this completely and stop adding stuff here. |
||
Sets.newHashSet("hive.mapred.supports.subdirectories", | ||
"hive.enforce.sorting","hive.enforce.bucketing", | ||
"hive.outerjoin.supports.filters", | ||
"hive.llap.zk.sm.principal", | ||
"hive.llap.zk.sm.keytab.file" | ||
"hive.llap.zk.sm.keytab.file", | ||
"hive.stats.fetch.partition.stats", | ||
vikramahuja1001 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
"hive.optimize.sort.dynamic.partition", | ||
"hive.metastore.initial.metadata.count.enabled", | ||
"hive.cli.pretty.output.num.cols", | ||
"hive.debug.localtask", | ||
"hive.timedout.txn.reaper.start", | ||
"hive.repl.dumpdir.ttl", | ||
"hive.repl.dumpdir.clean.freq", | ||
"hive.llap.io.vrb.queue.limit.base", | ||
"hive.llap.external.splits.order.by.force.single.split" | ||
); | ||
// Allow the user to set the ORC properties without getting an error. | ||
private static final Set<String> allowOrcConfigs = new HashSet<>(); | ||
static { | ||
for(OrcConf var: OrcConf.values()) { | ||
String name = var.getHiveConfName(); | ||
if (name != null && name.startsWith("hive.")) { | ||
removedConfigs.add(name); | ||
allowOrcConfigs.add(name); | ||
} | ||
} | ||
} | ||
|
@@ -140,6 +152,10 @@ private boolean isHidden(String key) { | |
return false; | ||
} | ||
|
||
public Set<String> getRemovedHiveConfigs() { | ||
return removedHiveConfigs; | ||
} | ||
|
||
private void dumpOption(String s) { | ||
SessionState ss = SessionState.get(); | ||
|
||
|
@@ -228,6 +244,14 @@ static String setConf(SessionState ss, String varName, String key, String varVal | |
throws IllegalArgumentException { | ||
String result = null; | ||
HiveConf conf = ss.getConf(); | ||
|
||
if (removedHiveConfigs.contains(key)) { | ||
// do not do anything. do not throw any error, just silently return | ||
result = HiveConf.generateRemovedWarning(); | ||
LOG.warn(result); | ||
return result; | ||
} | ||
|
||
String value = new VariableSubstitution(new HiveVariableSource() { | ||
@Override | ||
public Map<String, String> getHiveVariable() { | ||
|
@@ -251,7 +275,7 @@ public Map<String, String> getHiveVariable() { | |
message.append("' FAILED in validation : ").append(fail).append('.'); | ||
throw new IllegalArgumentException(message.toString()); | ||
} | ||
} else if (!removedConfigs.contains(key) && key.startsWith("hive.")) { | ||
} else if (!allowOrcConfigs.contains(key) && key.startsWith("hive.")) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Going forward I see the following options (in personal preference order):
|
||
throw new IllegalArgumentException("hive configuration " + key + " does not exists."); | ||
} | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are better ways to log warning about deprecated/removed properties. See:
org.apache.hadoop.conf.Configuration#addDeprecation
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I checked the method
addDeprecation
. This particular method takes oldKeys and newKeys(which are replacing the oldKeys) and it also has a null check to make sure newKeys are not null. Now since we have removed the properties there are no newKeys in our case making the method unusable for us.Do let me know in case there is some other way to use the same functionality.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I guess we could pass an empty string or some special placeholder to achieve somewhat the desired behavior. However, since we are talking about removed properties I guess the deprecation API that I suggested does not make much sense. One way or the other at some point also entries in the deprecation context will be removed.