[GitHub] [carbondata] NamanRastogi commented on a change in pull request #3164: [WIP] [CARBONDATA-3331] Fix for external table in Show Metacache

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|

[GitHub] [carbondata] NamanRastogi commented on a change in pull request #3164: [WIP] [CARBONDATA-3331] Fix for external table in Show Metacache

GitBox
NamanRastogi commented on a change in pull request #3164: [WIP] [CARBONDATA-3331] Fix for external table in Show Metacache
URL: https://github.com/apache/carbondata/pull/3164#discussion_r269860387
 
 

 ##########
 File path: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
 ##########
 @@ -71,52 +65,68 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
         Row("ALL", "ALL", 0L, 0L, 0L),
         Row(currentDatabase, "ALL", 0L, 0L, 0L))
     } else {
-      val carbonTables = CarbonEnv.getInstance(sparkSession).carbonMetaStore
-        .listAllTables(sparkSession).filter {
-        carbonTable =>
-          carbonTable.getDatabaseName.equalsIgnoreCase(currentDatabase) &&
-          isValidTable(carbonTable, sparkSession) &&
-          !carbonTable.isChildDataMap
+      val carbonTables = sparkSession.sessionState.catalog.listTables(currentDatabase).collect {
+        case tableIdent if CarbonEnv.getInstance(sparkSession).carbonMetaStore
+          .tableExists(tableIdent)(sparkSession) =>
+          CarbonEnv.getCarbonTable(tableIdent)(sparkSession)
       }
 
       // All tables of current database
-      var (dbIndexSize, dbDatamapSize, dbDictSize) = (0L, 0L, 0L)
-      val tableList: Seq[Row] = carbonTables.map {
+      var (dbDatamapSize, dbDictSize) = (0L, 0L)
+      val tableList = carbonTables.flatMap {
         carbonTable =>
-          val tableResult = getTableCache(sparkSession, carbonTable)
-          var (indexSize, datamapSize) = (tableResult(0).getLong(1), 0L)
-          tableResult.drop(2).foreach {
-            row =>
-              indexSize += row.getLong(1)
-              datamapSize += row.getLong(2)
-          }
-          val dictSize = tableResult(1).getLong(1)
-
-          dbIndexSize += indexSize
-          dbDictSize += dictSize
-          dbDatamapSize += datamapSize
-
-          val tableName = if (!carbonTable.isTransactionalTable) {
-            carbonTable.getTableName + " (external table)"
-          }
-          else {
-            carbonTable.getTableName
+          try {
+            val tableResult = getTableCache(sparkSession, carbonTable)
+            var (indexSize, datamapSize) = (tableResult(0).getLong(1), 0L)
+            tableResult.drop(2).foreach {
+              row =>
+                indexSize += row.getLong(1)
+                datamapSize += row.getLong(2)
+            }
+            val dictSize = tableResult(1).getLong(1)
+
+            dbDictSize += dictSize
+            dbDatamapSize += datamapSize
+
+            val tableName = if (!carbonTable.isTransactionalTable) {
+              carbonTable.getTableName + " (external table)"
+            }
+            else {
+              carbonTable.getTableName
+            }
+            Seq((currentDatabase, tableName, indexSize, datamapSize, dictSize))
+          } catch {
+            case ex: UnsupportedOperationException =>
+              Seq.empty
           }
-          (currentDatabase, tableName, indexSize, datamapSize, dictSize)
       }.collect {
         case (db, table, indexSize, datamapSize, dictSize) if !((indexSize == 0) &&
                                                                 (datamapSize == 0) &&
                                                                 (dictSize == 0)) =>
           Row(db, table, indexSize, datamapSize, dictSize)
       }
 
+      val tablePaths = carbonTables.map {
+        carbonTable =>
+          carbonTable.getTablePath
+      }
+
       // Scan whole cache and fill the entries for All-Database-All-Tables
+      // and Current-Database-All-Tables
       var (allIndexSize, allDatamapSize, allDictSize) = (0L, 0L, 0L)
+      var dbIndexSize = 0L
       cache.getCacheMap.asScala.foreach {
-        case (_, cacheable) =>
+        case (key, cacheable) =>
           cacheable match {
             case _: BlockletDataMapIndexWrapper =>
               allIndexSize += cacheable.getMemorySize
+              val tablePath = tablePaths.find {
 
 Review comment:
   Done.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[hidden email]


With regards,
Apache Git Services