hive metastore 初始化
hive metastore的初始化init函数信息量非常丰富, 顺着把所有关联的调用函数看下去, 大概就能理解metastore的工作流程了.
package org.apache.hadoop.hive.metastore;
public class HiveMetaStore extends ThriftHiveMetastore {
private ClassLoader classLoader;
private AlterHandler alterHandler;
private List<MetaStorePreEventListener> preListeners;
private List<MetaStoreEventListener> listeners;
private List<MetaStoreEventListener> transactionalListeners;
private List<MetaStoreEndFunctionListener> endFunctionListeners;
private List<MetaStoreInitListener> initListeners;
private Pattern partitionValidationPattern;
private final boolean isInTest;
@Override
public void init() throws MetaException {
initListeners = MetaStoreUtils.getMetaStoreListeners(
MetaStoreInitListener.class, hiveConf,
hiveConf.getVar(HiveConf.ConfVars.METASTORE_INIT_HOOKS));
for (MetaStoreInitListener singleInitListener: initListeners) {
MetaStoreInitContext context = new MetaStoreInitContext();
singleInitListener.onInit(context);
}
String alterHandlerName = hiveConf.get("hive.metastore.alter.impl",
HiveAlterHandler.class.getName());
alterHandler = (AlterHandler) ReflectionUtils.newInstance(MetaStoreUtils.getClass(
alterHandlerName), hiveConf);
wh = new Warehouse(hiveConf);
synchronized (HMSHandler.class) {
if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(hiveConf))) {
createDefaultDB();
createDefaultRoles();
addAdminUsers();
currentUrl = MetaStoreInit.getConnectionURL(hiveConf);
}
}
//Start Metrics for Embedded mode
if (hiveConf.getBoolVar(ConfVars.METASTORE_METRICS)) {
try {
MetricsFactory.init(hiveConf);
} catch (Exception e) {
// log exception, but ignore inability to start
LOG.error("error in Metrics init: " + e.getClass().getName() + " "
+ e.getMessage(), e);
}
}
Metrics metrics = MetricsFactory.getInstance();
if (metrics != null && hiveConf.getBoolVar(ConfVars.METASTORE_INIT_METADATA_COUNT_ENABLED)) {
LOG.info("Begin calculating metadata count metrics.");
updateMetrics();
LOG.info("Finished metadata count metrics: " + initDatabaseCount + " databases, " + initTableCount +
" tables, " + initPartCount + " partitions.");
metrics.addGauge(MetricsConstant.INIT_TOTAL_DATABASES, new MetricsVariable() {
@Override
public Object getValue() {
return initDatabaseCount;
}
});
metrics.addGauge(MetricsConstant.INIT_TOTAL_TABLES, new MetricsVariable() {
@Override
public Object getValue() {
return initTableCount;
}
});
metrics.addGauge(MetricsConstant.INIT_TOTAL_PARTITIONS, new MetricsVariable() {
@Override
public Object getValue() {
return initPartCount;
}
});
}
preListeners = MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class,
hiveConf,
hiveConf.getVar(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS));
preListeners.add(0, new TransactionalValidationListener(hiveConf));
listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, hiveConf,
hiveConf.getVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS));
listeners.add(new SessionPropertiesListener(hiveConf));
listeners.add(new AcidEventListener(hiveConf));
transactionalListeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class,hiveConf,
hiveConf.getVar(ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS));
if (metrics != null) {
listeners.add(new HMSMetricsListener(hiveConf, metrics));
}
endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
MetaStoreEndFunctionListener.class, hiveConf,
hiveConf.getVar(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS));
String partitionValidationRegex =
hiveConf.getVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN);
if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
partitionValidationPattern = Pattern.compile(partitionValidationRegex);
} else {
partitionValidationPattern = null;
}
long cleanFreq = hiveConf.getTimeVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ, TimeUnit.MILLISECONDS);
if (cleanFreq > 0) {
// In default config, there is no timer.
Timer cleaner = new Timer("Metastore Events Cleaner Thread", true);
cleaner.schedule(new EventCleanerTask(this), cleanFreq, cleanFreq);
}
expressionProxy = PartFilterExprUtil.createExpressionProxy(hiveConf);
fileMetadataManager = new FileMetadataManager((ThreadLocalRawStore)this, hiveConf);
}
}