GitHub user jackylk opened a pull request:
https://github.com/apache/carbondata/pull/2415 [CARBONDATA-2659] Support partition table by DataFrame API Currently only partition table is only supported by SQL, it should be supported by Spark DataFrame API also. This PR added an option to specify the partition columns when writing a DataFrame to carbon table For example: ``` df.write .format("carbondata") .option("tableName", "carbon_df_table") .option("partitionColumns", "c1, c2") // a list of column names .mode(SaveMode.Overwrite) .save() ``` - [X] Any interfaces changed? Added an option for DataFrame.write - [X] Any backward compatibility impacted? No - [X] Document update required? - [X] Testing done Added one test case - [X] For large changes, please consider breaking it into sub-tasks under an umbrella JIRA. NA You can merge this pull request into a Git repository by running: $ git pull https://github.com/jackylk/incubator-carbondata dataframe-partition Alternatively you can review and apply these changes as the patch at: https://github.com/apache/carbondata/pull/2415.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #2415 ---- commit 16e6c110060811d2493d014a1f21b9bb0c54ea32 Author: Jacky Li <jacky.likun@...> Date: 2018-06-26T12:27:37Z add test ---- --- |
Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2415#discussion_r198151382 --- Diff: integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala --- @@ -23,51 +23,59 @@ package org.apache.carbondata.spark */ class CarbonOption(options: Map[String, String]) { - def dbName: Option[String] = options.get("dbName") + lazy val dbName: Option[String] = options.get("dbName") - def tableName: String = options.getOrElse("tableName", "default_table") + lazy val tableName: String = options.getOrElse("tableName", "default_table") - def tablePath: Option[String] = options.get("tablePath") + lazy val tablePath: Option[String] = options.get("tablePath") - def partitionCount: String = options.getOrElse("partitionCount", "1") + lazy val partitionCount: String = options.getOrElse("partitionCount", "1") - def partitionClass: String = { + lazy val partitionClass: String = { options.getOrElse("partitionClass", "org.apache.carbondata.processing.partition.impl.SampleDataPartitionerImpl") } - def tempCSV: Boolean = options.getOrElse("tempCSV", "false").toBoolean + lazy val tempCSV: Boolean = options.getOrElse("tempCSV", "false").toBoolean --- End diff -- I remember that the 'tempCsv' option has been deprecated --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2415#discussion_r198152314 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala --- @@ -92,11 +89,35 @@ class CarbonDataFrameWriter(sqlContext: SQLContext, val dataFrame: DataFrame) { ).filter(_._2.isDefined) .map(property => s"'${property._1}' = '${property._2.get}'").mkString(",") + val partitionColumns: Seq[String] = if (options.partitionColumns.isDefined) { + options.partitionColumns.get.map { column => + val c = schema.fields.find(_.name.equalsIgnoreCase(column)) + if (c.isEmpty) { + throw new MalformedCarbonCommandException(s"invalid partition column: $column") --- End diff -- missing validation for duplicated column names? --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2415 Build Failed with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/5387/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2415 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/6561/ --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2415#discussion_r198250318 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala --- @@ -92,11 +89,35 @@ class CarbonDataFrameWriter(sqlContext: SQLContext, val dataFrame: DataFrame) { ).filter(_._2.isDefined) .map(property => s"'${property._1}' = '${property._2.get}'").mkString(",") + val partitionColumns: Seq[String] = if (options.partitionColumns.isDefined) { + options.partitionColumns.get.map { column => + val c = schema.fields.find(_.name.equalsIgnoreCase(column)) + if (c.isEmpty) { + throw new MalformedCarbonCommandException(s"invalid partition column: $column") --- End diff -- fixed --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2415#discussion_r198250394 --- Diff: integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala --- @@ -23,51 +23,59 @@ package org.apache.carbondata.spark */ class CarbonOption(options: Map[String, String]) { - def dbName: Option[String] = options.get("dbName") + lazy val dbName: Option[String] = options.get("dbName") - def tableName: String = options.getOrElse("tableName", "default_table") + lazy val tableName: String = options.getOrElse("tableName", "default_table") - def tablePath: Option[String] = options.get("tablePath") + lazy val tablePath: Option[String] = options.get("tablePath") - def partitionCount: String = options.getOrElse("partitionCount", "1") + lazy val partitionCount: String = options.getOrElse("partitionCount", "1") - def partitionClass: String = { + lazy val partitionClass: String = { options.getOrElse("partitionClass", "org.apache.carbondata.processing.partition.impl.SampleDataPartitionerImpl") } - def tempCSV: Boolean = options.getOrElse("tempCSV", "false").toBoolean + lazy val tempCSV: Boolean = options.getOrElse("tempCSV", "false").toBoolean --- End diff -- removed --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2415 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/5395/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2415 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/6568/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2415 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/5468/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2415 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/5471/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2415 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/5475/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2415 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/7105/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2415 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/5881/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2415#discussion_r202909786 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala --- @@ -92,11 +89,38 @@ class CarbonDataFrameWriter(sqlContext: SQLContext, val dataFrame: DataFrame) { ).filter(_._2.isDefined) .map(property => s"'${property._1}' = '${property._2.get}'").mkString(",") + val partition: Seq[String] = if (options.partitionColumns.isDefined) { + if (options.partitionColumns.get.toSet.size != options.partitionColumns.get.length) { + throw new MalformedCarbonCommandException(s"repeated partition column") + } + options.partitionColumns.get.map { column => + val field = schema.fields.find(_.name.equalsIgnoreCase(column)) + if (field.isEmpty) { + throw new MalformedCarbonCommandException(s"invalid partition column: $column") + } + s"$column ${field.get.dataType.typeName}" + } + } else { + Seq() + } + + val schemaWithoutPartition = if (options.partitionColumns.isDefined) { + val fields = schema.filterNot(field => options.partitionColumns.get.contains(field.name)) --- End diff -- better check `exists ` with equalsIgnoreCase inside `filterNot` instead of `contains` --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2415 @jackylk Please rebase it --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2415 Build Failed with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/7268/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2415 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/5896/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2415 Build Failed with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/6037/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2415 retest this please --- |
Free forum by Nabble | Edit this page |