ajantha-bhat commented on a change in pull request #3488: [CARBONDATA-3599] Support insert data from stage files written by SDK
URL: https://github.com/apache/carbondata/pull/3488#discussion_r360681806 ########## File path: integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala ########## @@ -369,6 +378,72 @@ object DataLoadProcessBuilderOnSpark { carbonTaskInfo.setTaskId(CarbonUtil.generateUUID()) ThreadLocalTaskInfo.setCarbonTaskInfo(carbonTaskInfo) } + + /** + * create CarbonLoadModel for global_sort + */ + def createLoadModelForGlobalSort( + sparkSession: SparkSession, + carbonTable: CarbonTable + ): CarbonLoadModel = { + val conf = SparkSQLUtil.sessionState(sparkSession).newHadoopConf() + CarbonTableOutputFormat.setDatabaseName(conf, carbonTable.getDatabaseName) + CarbonTableOutputFormat.setTableName(conf, carbonTable.getTableName) + CarbonTableOutputFormat.setCarbonTable(conf, carbonTable) + val fieldList = carbonTable.getCreateOrderColumn + .asScala + .map { column => + new StructField(column.getColName, column.getDataType) + } + CarbonTableOutputFormat.setInputSchema(conf, new StructType(fieldList.asJava)) + val loadModel = CarbonTableOutputFormat.getLoadModel(conf) + loadModel.setSerializationNullFormat( + TableOptionConstant.SERIALIZATION_NULL_FORMAT.getName + ",\\N") + loadModel.setBadRecordsLoggerEnable( + TableOptionConstant.BAD_RECORDS_LOGGER_ENABLE.getName + ",false") + loadModel.setBadRecordsAction( + TableOptionConstant.BAD_RECORDS_ACTION.getName + ",force") + loadModel.setIsEmptyDataBadRecord( + DataLoadProcessorConstants.IS_EMPTY_DATA_BAD_RECORD + ",false") + val globalSortPartitions = + carbonTable.getTableInfo.getFactTable.getTableProperties.get("global_sort_partitions") + if (globalSortPartitions != null) { + loadModel.setGlobalSortPartitions(globalSortPartitions) + } + loadModel + } + + /** + * create DataFrame basing on specified splits + */ + def createInputDataFrame( + sparkSession: SparkSession, + carbonTable: CarbonTable, + splits: Seq[InputSplit] + ): DataFrame = { + val columns = carbonTable + .getCreateOrderColumn + .asScala + .map(_.getColName) + .toArray + val schema = SparkTypeConverter.createSparkSchema(carbonTable, columns) + val rdd: RDD[Row] = new CarbonScanRDD[CarbonRow]( + sparkSession, + columnProjection = new CarbonProjection(columns), + null, + carbonTable.getAbsoluteTableIdentifier, + carbonTable.getTableInfo.serialize, + carbonTable.getTableInfo, + new CarbonInputMetrics, + null, + null, + classOf[CarbonRowReadSupport], + splits.asJava) + .map { row => + new GenericRow(row.getData.asInstanceOf[Array[Any]]) + } + sparkSession.createDataFrame(rdd, schema) Review comment: @akashrn5 : I will be merging this once retest passed. #3515 can rebase. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
Free forum by Nabble | Edit this page |