Login  Register

[GitHub] [carbondata] vikramahuja1001 commented on a change in pull request #4072: [CARBONDATA-4110] Support clean files dry run operation and show statistics after clean files operation

Posted by GitBox on Feb 22, 2021; 5:43am
URL: http://apache-carbondata-dev-mailing-list-archive.168.s1.nabble.com/GitHub-carbondata-vikramahuja1001-opened-a-new-pull-request-4072-WIP-Clean-files-phase2-tp105322p106394.html


vikramahuja1001 commented on a change in pull request #4072:
URL: https://github.com/apache/carbondata/pull/4072#discussion_r579996248



##########
File path: integration/spark/src/main/scala/org/apache/carbondata/trash/DataTrashManager.scala
##########
@@ -121,6 +176,78 @@ object DataTrashManager {
     }
   }
 
+  /**
+   * Does Clean files dry run operation on the expired segments. Returns the size freed
+   * during that clean files operation and also shows the remaining trash size, which can be
+   * cleaned after those segments are expired
+   */
+  private def dryRunOnExpiredSegments(
+      carbonTable: CarbonTable,
+      isForceDelete: Boolean,
+      cleanStaleInProgress: Boolean): Seq[Long] = {
+    var sizeFreed: Long = 0
+    var trashSizeRemaining: Long = 0
+    val loadMetadataDetails = SegmentStatusManager.readLoadMetadata(carbonTable.getMetadataPath)
+    if (SegmentStatusManager.isLoadDeletionRequired(loadMetadataDetails)) {
+      loadMetadataDetails.foreach { oneLoad =>
+        val segmentFilePath = CarbonTablePath.getSegmentFilePath(carbonTable.getTablePath,
+          oneLoad.getSegmentFile)
+        if (DeleteLoadFolders.canDeleteThisLoad(oneLoad, isForceDelete, cleanStaleInProgress)) {
+          // No need to consider physical data for external segments, only consider metadata.
+          if (oneLoad.getPath() == null || oneLoad.getPath().equalsIgnoreCase("NA")) {
+            sizeFreed += calculateSegmentSizeForOneLoad(carbonTable, oneLoad, loadMetadataDetails)
+          }
+          sizeFreed += FileFactory.getCarbonFile(segmentFilePath).getSize
+        } else {
+          if (SegmentStatusManager.isExpiredSegment(oneLoad, carbonTable
+              .getAbsoluteTableIdentifier)) {
+            trashSizeRemaining += calculateSegmentSizeForOneLoad(carbonTable, oneLoad,
+                loadMetadataDetails)
+            trashSizeRemaining += FileFactory.getCarbonFile(segmentFilePath).getSize
+          }
+        }
+      }
+    }
+    Seq(sizeFreed, trashSizeRemaining)
+  }
+
+  /**
+   * calculates the segment size based of a segment
+   */
+  def calculateSegmentSizeForOneLoad( carbonTable: CarbonTable, oneLoad: LoadMetadataDetails,
+        loadMetadataDetails: Array[LoadMetadataDetails]) : Long = {
+    var size : Long = 0
+    if (oneLoad.getDataSize!= null && !oneLoad.getDataSize.isEmpty) {

Review comment:
       Okay

##########
File path: integration/spark/src/main/scala/org/apache/carbondata/trash/DataTrashManager.scala
##########
@@ -121,6 +176,78 @@ object DataTrashManager {
     }
   }
 
+  /**
+   * Does Clean files dry run operation on the expired segments. Returns the size freed
+   * during that clean files operation and also shows the remaining trash size, which can be
+   * cleaned after those segments are expired
+   */
+  private def dryRunOnExpiredSegments(
+      carbonTable: CarbonTable,
+      isForceDelete: Boolean,
+      cleanStaleInProgress: Boolean): Seq[Long] = {
+    var sizeFreed: Long = 0
+    var trashSizeRemaining: Long = 0
+    val loadMetadataDetails = SegmentStatusManager.readLoadMetadata(carbonTable.getMetadataPath)
+    if (SegmentStatusManager.isLoadDeletionRequired(loadMetadataDetails)) {
+      loadMetadataDetails.foreach { oneLoad =>
+        val segmentFilePath = CarbonTablePath.getSegmentFilePath(carbonTable.getTablePath,
+          oneLoad.getSegmentFile)
+        if (DeleteLoadFolders.canDeleteThisLoad(oneLoad, isForceDelete, cleanStaleInProgress)) {
+          // No need to consider physical data for external segments, only consider metadata.
+          if (oneLoad.getPath() == null || oneLoad.getPath().equalsIgnoreCase("NA")) {
+            sizeFreed += calculateSegmentSizeForOneLoad(carbonTable, oneLoad, loadMetadataDetails)
+          }
+          sizeFreed += FileFactory.getCarbonFile(segmentFilePath).getSize
+        } else {
+          if (SegmentStatusManager.isExpiredSegment(oneLoad, carbonTable
+              .getAbsoluteTableIdentifier)) {
+            trashSizeRemaining += calculateSegmentSizeForOneLoad(carbonTable, oneLoad,
+                loadMetadataDetails)
+            trashSizeRemaining += FileFactory.getCarbonFile(segmentFilePath).getSize
+          }
+        }
+      }
+    }
+    Seq(sizeFreed, trashSizeRemaining)
+  }
+
+  /**
+   * calculates the segment size based of a segment
+   */
+  def calculateSegmentSizeForOneLoad( carbonTable: CarbonTable, oneLoad: LoadMetadataDetails,
+        loadMetadataDetails: Array[LoadMetadataDetails]) : Long = {
+    var size : Long = 0
+    if (oneLoad.getDataSize!= null && !oneLoad.getDataSize.isEmpty) {

Review comment:
       yeah




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[hidden email]