[GitHub] [carbondata] QiangCai commented on a change in pull request #3183: [CARBONDATA-3349] Show sort_columns for each segment

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|

[GitHub] [carbondata] QiangCai commented on a change in pull request #3183: [CARBONDATA-3349] Show sort_columns for each segment

GitBox
QiangCai commented on a change in pull request #3183: [CARBONDATA-3349] Show sort_columns for each segment
URL: https://github.com/apache/carbondata/pull/3183#discussion_r279175423
 
 

 ##########
 File path: integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala
 ##########
 @@ -145,6 +152,208 @@ object CarbonStore {
     }
   }
 
+  def showSegments(
+      limit: Option[String],
+      carbonTable: CarbonTable,
+      hadoopConf: Configuration,
+      showHistory: Boolean): Seq[Row] = {
+    val metaFolder = CarbonTablePath.getMetadataPath(carbonTable.getTablePath)
+    val loadMetadataDetailsArray = if (showHistory) {
+      SegmentStatusManager.readLoadMetadata(metaFolder) ++
+      SegmentStatusManager.readLoadHistoryMetadata(metaFolder)
+    } else {
+      SegmentStatusManager.readLoadMetadata(metaFolder)
+    }
+
+    if (loadMetadataDetailsArray.nonEmpty) {
+      var loadMetadataDetailsSortedArray = loadMetadataDetailsArray.sortWith { (l1, l2) =>
+        java.lang.Double.parseDouble(l1.getLoadName) > java.lang.Double.parseDouble(l2.getLoadName)
+      }
+      if (!showHistory) {
+        loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray
+          .filter(_.getVisibility.equalsIgnoreCase("true"))
+      }
+      if (limit.isDefined) {
+        val limitLoads = limit.get
+        try {
+          val lim = Integer.parseInt(limitLoads)
+          loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray.slice(0, lim)
+        } catch {
+          case _: NumberFormatException =>
+            CarbonException.analysisException("Entered limit is not a valid Number")
+        }
+      }
+      val tableDataMap: TableDataMap =
+        DataMapStoreManager.getInstance.getDefaultDataMap(carbonTable)
+      val readCommitScope: ReadCommittedScope =
+        new TableStatusReadCommittedScope(
+          carbonTable.getAbsoluteTableIdentifier,
+          loadMetadataDetailsSortedArray.filter(_.getVisibility.equalsIgnoreCase("true")),
+          hadoopConf)
+      loadMetadataDetailsSortedArray
+        .map { load =>
+          val mergedTo =
+            if (load.getMergedLoadName != null) {
+              load.getMergedLoadName
+            } else {
+              "NA"
+            }
+
+          val startTime =
+            if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) {
+              "NA"
+            } else {
+              new java.sql.Timestamp(load.getLoadStartTime).toString
+            }
+
+          val endTime =
+            if (load.getLoadEndTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) {
+              "NA"
+            } else {
+              new java.sql.Timestamp(load.getLoadEndTime).toString
+            }
+
+          val (dataSize, indexSize) = if (load.getFileFormat == FileFormat.ROW_V1) {
+            // for streaming segment, we should get the actual size from the index file
+            // since it is continuously inserting data
+            val segmentDir =
+            CarbonTablePath.getSegmentPath(carbonTable.getTablePath, load.getLoadName)
+            val indexPath = CarbonTablePath.getCarbonStreamIndexFilePath(segmentDir)
+            val indexFile = FileFactory.getCarbonFile(indexPath)
+            if (indexFile.exists()) {
+              val indices =
+                StreamSegment.readIndexFile(indexPath, FileFactory.getFileType(indexPath))
+              (indices.asScala.map(_.getFile_size).sum, indexFile.getSize)
+            } else {
+              (-1L, -1L)
+            }
+          } else {
+            // for batch segment, we can get the data size from table status file directly
+            (if (load.getDataSize == null) -1L else load.getDataSize.toLong,
+              if (load.getIndexSize == null) -1L else load.getIndexSize.toLong)
+          }
+
+          val (isSorted, sortColumns) =
+            getSortColumnsOfSegment(
+              load,
+              readCommitScope,
+              tableDataMap,
+              hadoopConf
+            )
+          if (showHistory) {
+            Row(
+              load.getLoadName,
+              load.getSegmentStatus.getMessage,
+              startTime,
+              endTime,
+              mergedTo,
+              load.getFileFormat.toString,
+              load.getVisibility,
+              Strings.formatSize(dataSize.toFloat),
+              Strings.formatSize(indexSize.toFloat),
+              isSorted,
+              sortColumns)
+          } else {
+            Row(
+              load.getLoadName,
+              load.getSegmentStatus.getMessage,
+              startTime,
+              endTime,
+              mergedTo,
+              load.getFileFormat.toString,
+              Strings.formatSize(dataSize.toFloat),
+              Strings.formatSize(indexSize.toFloat),
+              isSorted,
+              sortColumns)
+          }
+        }.toSeq
+    } else {
+      Seq.empty
+    }
+  }
+
+  private def getSortColumnsOfSegment(
+      load: LoadMetadataDetails,
+      readCommitScope: ReadCommittedScope,
+      tableDataMap: TableDataMap,
+      hadoopConf: Configuration
+  ): (String, String) = {
+    var isSorted = ""
+    var sortColumns = ""
+    if (load.getFileFormat == FileFormat.ROW_V1) {
+      isSorted = "false"
+    } else if (load.getVisibility.equalsIgnoreCase("true")) {
+      val segment = new Segment(load.getLoadName(), load.getSegmentFile(), readCommitScope)
+      val dataMapFactory = tableDataMap.getDataMapFactory().asInstanceOf[BlockletDataMapFactory]
+      val indexIdents = dataMapFactory.getTableBlockIndexUniqueIdentifiers(segment)
+      val indexIterator = indexIdents.iterator()
+      if (indexIterator.hasNext) {
+        val indexIdent = indexIterator.next()
+        var indexHeader: IndexHeader = null
+        var indexContent: Array[Byte] = null
+        val indexFilePath = indexIdent.getIndexFilePath + CarbonCommonConstants.FILE_SEPARATOR +
+                        indexIdent.getIndexFileName
+        if (indexIdent.getMergeIndexFileName != null) {
+          val indexFileStore = new SegmentIndexFileStore(hadoopConf)
+          try {
+            indexFileStore.readMergeFile(indexFilePath)
+          } catch {
+            case ex: IOException =>
+              LOGGER.error(ex)
+          }
+          val iterator = indexFileStore.getCarbonIndexMap.entrySet().iterator()
+          if (iterator.hasNext) {
+            indexContent = iterator.next().getValue
+          }
+        }
+        val indexReader = new CarbonIndexFileReader()
+        try {
+          if (indexContent == null) {
+            indexReader.openThriftReader(indexFilePath)
+          } else {
+            indexReader.openThriftReader(indexContent)
+          }
+          // get the index header
+          indexHeader = indexReader.readIndexHeader()
+        } catch {
+          case ex: IOException =>
+            LOGGER.error(ex)
+        } finally {
+          indexReader.closeThriftReader()
+        }
+        if (indexHeader != null && indexHeader.isSetIs_sort) {
+          if (indexHeader.is_sort) {
 
 Review comment:
   default is an empty string for not set is_sort cases.
   when it set is_sort, it should be false.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[hidden email]


With regards,
Apache Git Services