[GitHub] [carbondata] QiangCai commented on a change in pull request #3183: [CARBONDATA-3349] Show sort_columns for each segment

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|

[GitHub] [carbondata] QiangCai commented on a change in pull request #3183: [CARBONDATA-3349] Show sort_columns for each segment

GitBox
QiangCai commented on a change in pull request #3183: [CARBONDATA-3349] Show sort_columns for each segment
URL: https://github.com/apache/carbondata/pull/3183#discussion_r279175387
 
 

 ##########
 File path: integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
 ##########
 @@ -145,6 +152,208 @@ object CarbonStore {
     }
   }
 
+  def showSegments(
+      limit: Option[String],
+      carbonTable: CarbonTable,
+      hadoopConf: Configuration,
+      showHistory: Boolean): Seq[Row] = {
+    val metaFolder = CarbonTablePath.getMetadataPath(carbonTable.getTablePath)
+    val loadMetadataDetailsArray = if (showHistory) {
+      SegmentStatusManager.readLoadMetadata(metaFolder) ++
+      SegmentStatusManager.readLoadHistoryMetadata(metaFolder)
+    } else {
+      SegmentStatusManager.readLoadMetadata(metaFolder)
+    }
+
+    if (loadMetadataDetailsArray.nonEmpty) {
+      var loadMetadataDetailsSortedArray = loadMetadataDetailsArray.sortWith { (l1, l2) =>
+        java.lang.Double.parseDouble(l1.getLoadName) > java.lang.Double.parseDouble(l2.getLoadName)
+      }
+      if (!showHistory) {
+        loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray
+          .filter(_.getVisibility.equalsIgnoreCase("true"))
+      }
+      if (limit.isDefined) {
+        val limitLoads = limit.get
+        try {
+          val lim = Integer.parseInt(limitLoads)
+          loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray.slice(0, lim)
+        } catch {
+          case _: NumberFormatException =>
+            CarbonException.analysisException("Entered limit is not a valid Number")
+        }
+      }
+      val tableDataMap: TableDataMap =
+        DataMapStoreManager.getInstance.getDefaultDataMap(carbonTable)
+      val readCommitScope: ReadCommittedScope =
+        new TableStatusReadCommittedScope(
+          carbonTable.getAbsoluteTableIdentifier,
+          loadMetadataDetailsSortedArray.filter(_.getVisibility.equalsIgnoreCase("true")),
+          hadoopConf)
+      loadMetadataDetailsSortedArray
+        .map { load =>
+          val mergedTo =
+            if (load.getMergedLoadName != null) {
+              load.getMergedLoadName
+            } else {
+              "NA"
+            }
+
+          val startTime =
+            if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) {
+              "NA"
+            } else {
+              new java.sql.Timestamp(load.getLoadStartTime).toString
+            }
+
+          val endTime =
+            if (load.getLoadEndTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) {
+              "NA"
+            } else {
+              new java.sql.Timestamp(load.getLoadEndTime).toString
+            }
+
+          val (dataSize, indexSize) = if (load.getFileFormat == FileFormat.ROW_V1) {
+            // for streaming segment, we should get the actual size from the index file
+            // since it is continuously inserting data
+            val segmentDir =
+            CarbonTablePath.getSegmentPath(carbonTable.getTablePath, load.getLoadName)
+            val indexPath = CarbonTablePath.getCarbonStreamIndexFilePath(segmentDir)
+            val indexFile = FileFactory.getCarbonFile(indexPath)
+            if (indexFile.exists()) {
+              val indices =
+                StreamSegment.readIndexFile(indexPath, FileFactory.getFileType(indexPath))
+              (indices.asScala.map(_.getFile_size).sum, indexFile.getSize)
+            } else {
+              (-1L, -1L)
+            }
+          } else {
+            // for batch segment, we can get the data size from table status file directly
+            (if (load.getDataSize == null) -1L else load.getDataSize.toLong,
 
 Review comment:
   it is old code, already change to reuse code
   
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[hidden email]


With regards,
Apache Git Services