ajantha-bhat commented on a change in pull request #3493: [CARBONDATA-3600] Fix creating mv timeseries UDF column as partition column
URL:
https://github.com/apache/carbondata/pull/3493#discussion_r356937829
##########
File path: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
##########
@@ -121,11 +123,38 @@ class CarbonEnv {
CarbonProperties.getInstance
.addNonSerializableProperty(CarbonCommonConstants.IS_DRIVER_INSTANCE, "true")
initialized = true
+ cleanChildTablesNotRegisteredInHive(sparkSession)
}
}
Profiler.initialize(sparkSession.sparkContext)
LOGGER.info("Initialize CarbonEnv completed...")
}
+
+ private def cleanChildTablesNotRegisteredInHive(sparkSession: SparkSession): Unit = {
+ // If in case JDBC application is killed/stopped, when create datamap was in progress, datamap
+ // table was created and datampschema was saved to the system, but table was not registered to
+ // metastore. So, when we restart JDBC application, we need to clean up
+ // stale tables and datamapschema's.
+ val dataMapSchemas = DataMapStoreManager.getInstance().getAllDataMapSchemas
+ dataMapSchemas.asScala.foreach {
+ dataMapSchema =>
+ if (null != dataMapSchema.getRelationIdentifier &&
+ !dataMapSchema.isIndexDataMap) {
+ if (!sparkSession.sessionState
+ .catalog
+ .tableExists(TableIdentifier(dataMapSchema.getRelationIdentifier.getTableName,
+ Some(dataMapSchema.getRelationIdentifier.getDatabaseName)))) {
+ DataMapStoreManager.getInstance().dropDataMapSchema(dataMapSchema.getDataMapName)
+ DataMapStoreManager.getInstance.unRegisterDataMapCatalog(dataMapSchema)
Review comment:
same as above. can surround by try catch
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[hidden email]
With regards,
Apache Git Services