CarbonDataQA1 commented on pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#issuecomment-718398674 Build Failed with Spark 2.4.5, Please check CI http://121.244.95.60:12545/job/ApacheCarbon_PR_Builder_2.4.5/2963/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
CarbonDataQA1 commented on pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#issuecomment-718399151 Build Failed with Spark 2.3.4, Please check CI http://121.244.95.60:12545/job/ApacheCarbonPRBuilder2.3/4721/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
QiangCai commented on a change in pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#discussion_r514043536 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala ########## @@ -93,6 +94,7 @@ object DataLoadProcessBuilderOnSpark { val convertStepRowCounter = sc.longAccumulator("Convert Processor Accumulator") val sortStepRowCounter = sc.longAccumulator("Sort Processor Accumulator") val writeStepRowCounter = sc.longAccumulator("Write Processor Accumulator") + val defaultMaxSplitBytes = sessionState(sparkSession).conf.filesMaxPartitionBytes Review comment: move to line 156 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala ########## @@ -227,9 +236,17 @@ object DataLoadProcessBuilderOnSpark { // 2. sort var numPartitions = CarbonDataProcessorUtil.getGlobalSortPartitions( configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS)) + + // if numPartitions user does not specify and not specified in config then dynamically calculate + if (numPartitions <= 0) { + numPartitions = Math.ceil(SizeEstimator.estimate(originRDD) / defaultMaxSplitBytes).toInt Review comment: SizeEstimator.estimate(originRDD).toDouble and move to line 247 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala ########## @@ -143,10 +145,16 @@ object DataLoadProcessBuilderOnSpark { var numPartitions = CarbonDataProcessorUtil.getGlobalSortPartitions( configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS)) + + // if numPartitions user does not specify and not specified in config then dynamically calculate + if (numPartitions == 0) { + numPartitions = Math.ceil(model.getTotalSize.toDouble / defaultMaxSplitBytes).toInt + } + + // after calculation based on size if still zero then take the partition number if (numPartitions <= 0) { numPartitions = convertRDD.partitions.length Review comment: Math.min(convertRDD.partitions.length, dynamic partition number) ########## File path: integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala ########## @@ -227,9 +236,17 @@ object DataLoadProcessBuilderOnSpark { // 2. sort var numPartitions = CarbonDataProcessorUtil.getGlobalSortPartitions( configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS)) + + // if numPartitions user does not specify and not specified in config then dynamically calculate + if (numPartitions <= 0) { + numPartitions = Math.ceil(SizeEstimator.estimate(originRDD) / defaultMaxSplitBytes).toInt + } + + // after calculation based on size if still zero then take the partition number if (numPartitions <= 0) { numPartitions = originRDD.partitions.length Review comment: numPartitions = Math.min(originRDD.partitions.length, dynamic partition number) ########## File path: integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala ########## @@ -202,6 +210,7 @@ object DataLoadProcessBuilderOnSpark { val partialSuccessAccum = sc.longAccumulator("Partial Success Accumulator") val sortStepRowCounter = sc.longAccumulator("Sort Processor Accumulator") val writeStepRowCounter = sc.longAccumulator("Write Processor Accumulator") + val defaultMaxSplitBytes = sessionState(sparkSession).conf.filesMaxPartitionBytes Review comment: move to line 247 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala ########## @@ -143,10 +145,16 @@ object DataLoadProcessBuilderOnSpark { var numPartitions = CarbonDataProcessorUtil.getGlobalSortPartitions( configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS)) + + // if numPartitions user does not specify and not specified in config then dynamically calculate + if (numPartitions == 0) { + numPartitions = Math.ceil(model.getTotalSize.toDouble / defaultMaxSplitBytes).toInt Review comment: move to 156 ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
QiangCai commented on a change in pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#discussion_r514046333 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala ########## @@ -143,10 +145,16 @@ object DataLoadProcessBuilderOnSpark { var numPartitions = CarbonDataProcessorUtil.getGlobalSortPartitions( configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS)) + + // if numPartitions user does not specify and not specified in config then dynamically calculate + if (numPartitions == 0) { + numPartitions = Math.ceil(model.getTotalSize.toDouble / defaultMaxSplitBytes).toInt + } + + // after calculation based on size if still zero then take the partition number if (numPartitions <= 0) { numPartitions = convertRDD.partitions.length Review comment: numPartitions = Math.min(convertRDD.partitions.length, dynamic partition number) ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
CarbonDataQA1 commented on pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#issuecomment-718491747 Build Success with Spark 2.4.5, Please check CI http://121.244.95.60:12545/job/ApacheCarbon_PR_Builder_2.4.5/2966/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
CarbonDataQA1 commented on pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#issuecomment-718493643 Build Failed with Spark 2.3.4, Please check CI http://121.244.95.60:12545/job/ApacheCarbonPRBuilder2.3/4725/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
CarbonDataQA1 commented on pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#issuecomment-718729637 Build Success with Spark 2.3.4, Please check CI http://121.244.95.60:12545/job/ApacheCarbonPRBuilder2.3/4731/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
CarbonDataQA1 commented on pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#issuecomment-718730475 Build Success with Spark 2.4.5, Please check CI http://121.244.95.60:12545/job/ApacheCarbon_PR_Builder_2.4.5/2972/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
QiangCai commented on a change in pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#discussion_r514748658 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala ########## @@ -227,9 +232,15 @@ object DataLoadProcessBuilderOnSpark { // 2. sort var numPartitions = CarbonDataProcessorUtil.getGlobalSortPartitions( configuration.getDataLoadProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS)) + + // if numPartitions user does not specify and not specified in config then dynamically calculate if (numPartitions <= 0) { - numPartitions = originRDD.partitions.length + val defaultMaxSplitBytes = sessionState(sparkSession).conf.filesMaxPartitionBytes + val dynamicPartitionNum = Math.ceil(SizeEstimator.estimate(originRDD).toDouble / Review comment: does SizeEstimator.estimate work for RDD? ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
QiangCai commented on a change in pull request #3912: URL: https://github.com/apache/carbondata/pull/3912#discussion_r514942737 ########## File path: integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestGlobalSortDataLoad.scala ########## @@ -527,6 +529,73 @@ class TestGlobalSortDataLoad extends QueryTest with BeforeAndAfterEach with Befo assert(sql("select * from carbon_global_sort_update").count() == 22) } + test("calculate the global sort partitions automatically when user does not give in load options ") { Review comment: add more test case to config a small defaultMaxSplitBytes, let it has multiple partitions. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
In reply to this post by GitBox
maheshrajus closed pull request #3912: URL: https://github.com/apache/carbondata/pull/3912 ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] |
Free forum by Nabble | Edit this page |