Posted by
GitBox on
Feb 05, 2021; 10:00am
URL: http://apache-carbondata-dev-mailing-list-archive.168.s1.nabble.com/GitHub-carbondata-areyouokfreejoe-opened-a-new-pull-request-4086-CARBONDATA-4115-Successful-load-andD-tp105825p106067.html
areyouokfreejoe commented on a change in pull request #4086:
URL:
https://github.com/apache/carbondata/pull/4086#discussion_r569985309##########
File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
##########
@@ -276,7 +280,15 @@ case class CarbonInsertIntoCommand(databaseNameOp: Option[String],
}
throw ex
}
- Seq.empty
+ if(loadResultForReturn!=null && loadResultForReturn.getLoadName!=null) {
+ Seq(Row(loadResultForReturn.getLoadName))
+ } else {
+ rowsForReturn
Review comment:
It's not number of rows.
It's segment id from partition table case.
Ok, Comment has been added.
##########
File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
##########
@@ -276,7 +280,15 @@ case class CarbonInsertIntoCommand(databaseNameOp: Option[String],
}
throw ex
}
- Seq.empty
+ if(loadResultForReturn!=null && loadResultForReturn.getLoadName!=null) {
Review comment:
Ok, I have it fixed and re push.
##########
File path: integration/spark/src/test/scala/org/apache/spark/util/CarbonCommandSuite.scala
##########
@@ -100,6 +138,56 @@ class CarbonCommandSuite extends QueryTest with BeforeAndAfterAll {
private lazy val location = CarbonProperties.getStorePath()
+ test("Return segment ID after load and insert") {
+ val tableName = "test_table"
+ val inputTableName = "csv_table"
+ val inputPath = s"$resourcesPath/data_alltypes.csv"
+ dropTable(tableName)
+ dropTable(inputTableName)
+ createAndLoadInputTable(inputTableName, inputPath)
+ createTestTable(tableName)
+ checkAnswer(sql(
+ s"""
+ | INSERT INTO TABLE $tableName
+ | SELECT shortField, intField, bigintField, doubleField, stringField,
+ | from_unixtime(unix_timestamp(timestampField,'yyyy/M/dd')) timestampField, decimalField,
+ | cast(to_date(from_unixtime(unix_timestamp(dateField,'yyyy/M/dd'))) as date), charField
+ | FROM $inputTableName
+ """.stripMargin), Seq(Row("0")))
+ checkAnswer(sql(
+ s"LOAD DATA LOCAL INPATH '$inputPath'" +
+ s" INTO TABLE $tableName" +
+ " OPTIONS('FILEHEADER'=" +
+ "'shortField,intField,bigintField,doubleField,stringField," +
+ "timestampField,decimalField,dateField,charField')"), Seq(Row("1")))
Review comment:
In JDBC case, it will return like this
+------------+
|Segment ID|
+------------+
| 0|
+------------+
##########
File path: integration/spark/src/test/scala/org/apache/spark/util/CarbonCommandSuite.scala
##########
@@ -100,6 +138,56 @@ class CarbonCommandSuite extends QueryTest with BeforeAndAfterAll {
private lazy val location = CarbonProperties.getStorePath()
+ test("Return segment ID after load and insert") {
+ val tableName = "test_table"
+ val inputTableName = "csv_table"
+ val inputPath = s"$resourcesPath/data_alltypes.csv"
+ dropTable(tableName)
+ dropTable(inputTableName)
+ createAndLoadInputTable(inputTableName, inputPath)
+ createTestTable(tableName)
+ checkAnswer(sql(
+ s"""
+ | INSERT INTO TABLE $tableName
+ | SELECT shortField, intField, bigintField, doubleField, stringField,
+ | from_unixtime(unix_timestamp(timestampField,'yyyy/M/dd')) timestampField, decimalField,
+ | cast(to_date(from_unixtime(unix_timestamp(dateField,'yyyy/M/dd'))) as date), charField
+ | FROM $inputTableName
+ """.stripMargin), Seq(Row("0")))
+ checkAnswer(sql(
+ s"LOAD DATA LOCAL INPATH '$inputPath'" +
+ s" INTO TABLE $tableName" +
+ " OPTIONS('FILEHEADER'=" +
+ "'shortField,intField,bigintField,doubleField,stringField," +
+ "timestampField,decimalField,dateField,charField')"), Seq(Row("1")))
Review comment:
And in spark sql, it will return as just a num string. The retrun is simple and I think in this case the user can easily catch it and use it as a input of next step. I don't think the too many word is needed here.
##########
File path: integration/spark/src/test/scala/org/apache/spark/util/CarbonCommandSuite.scala
##########
@@ -100,6 +138,56 @@ class CarbonCommandSuite extends QueryTest with BeforeAndAfterAll {
private lazy val location = CarbonProperties.getStorePath()
+ test("Return segment ID after load and insert") {
+ val tableName = "test_table"
+ val inputTableName = "csv_table"
+ val inputPath = s"$resourcesPath/data_alltypes.csv"
+ dropTable(tableName)
+ dropTable(inputTableName)
+ createAndLoadInputTable(inputTableName, inputPath)
+ createTestTable(tableName)
+ checkAnswer(sql(
+ s"""
+ | INSERT INTO TABLE $tableName
+ | SELECT shortField, intField, bigintField, doubleField, stringField,
+ | from_unixtime(unix_timestamp(timestampField,'yyyy/M/dd')) timestampField, decimalField,
+ | cast(to_date(from_unixtime(unix_timestamp(dateField,'yyyy/M/dd'))) as date), charField
+ | FROM $inputTableName
+ """.stripMargin), Seq(Row("0")))
+ checkAnswer(sql(
+ s"LOAD DATA LOCAL INPATH '$inputPath'" +
+ s" INTO TABLE $tableName" +
+ " OPTIONS('FILEHEADER'=" +
+ "'shortField,intField,bigintField,doubleField,stringField," +
+ "timestampField,decimalField,dateField,charField')"), Seq(Row("1")))
Review comment:
In JDBC case, it will return like this
+------------+
|Segment ID|
+------------+
| 0 |
+------------+
##########
File path: integration/spark/src/test/scala/org/apache/spark/util/CarbonCommandSuite.scala
##########
@@ -82,6 +83,43 @@ class CarbonCommandSuite extends QueryTest with BeforeAndAfterAll {
""".stripMargin)
}
+ protected def createTestTable(tableName: String): Unit = {
+ sql(
+ s"""
Review comment:
Can you help point out which test case I can add validation? Because there are too many.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[hidden email]