Github user kunal642 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1861#discussion_r165271343 --- Diff: integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala --- @@ -310,5 +310,99 @@ test("check load and select for avg double datatype") { checkAnswer(sql("select name,avg(salary) from maintbl group by name"), rows) } + test("create datamap with 'if not exists' after load data into mainTable and create datamap") { --- End diff -- I think no need to add test cases in all the files. One test case in TestPreAggregateCreateCommand and one in TestTimeseriesCreateTable would be enough. --- |
In reply to this post by qiuchenjian-2
Github user kunal642 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1861#discussion_r165271557 --- Diff: integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/timeseries/TestTimeSeriesCreateTable.scala --- @@ -319,6 +326,53 @@ class TestTimeSeriesCreateTable extends QueryTest with BeforeAndAfterAll { assert(e.getMessage.equals(s"$timeSeries should define time granularity")) } + test("test timeseries create table 19: should support if not exists") { + sql("DROP DATAMAP IF EXISTS agg1 ON TABLE mainTable") + try { --- End diff -- no need for try block. If any exception if thrown the test case will fail --- |
In reply to this post by qiuchenjian-2
Github user kunal642 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1861#discussion_r165272350 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala --- @@ -49,10 +52,22 @@ case class CarbonCreateDataMapCommand( throw new MalformedCarbonCommandException("Streaming table does not support creating datamap") } val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName) + val dbName = tableIdentifier.database.getOrElse("default") + val tableName = tableIdentifier.table + "_" + dataMapName - if (dmClassName.equalsIgnoreCase(PREAGGREGATE.toString) || + if (sparkSession.sessionState.catalog.listTables(dbName) --- End diff -- sparkSession.sessionState.catalog.tableExists(tableIdentifier) --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1861#discussion_r165295181 --- Diff: integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/timeseries/TestTimeSeriesCreateTable.scala --- @@ -81,29 +82,29 @@ class TestTimeSeriesCreateTable extends QueryTest with BeforeAndAfterAll { """.stripMargin) } - test("test timeseries create table Zero") { + test("test timeseries create table 1") { --- End diff -- It's necessary and convenience to find the test case according to the number when some test case failed. --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1861#discussion_r165296290 --- Diff: integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala --- @@ -310,5 +310,99 @@ test("check load and select for avg double datatype") { checkAnswer(sql("select name,avg(salary) from maintbl group by name"), rows) } + test("create datamap with 'if not exists' after load data into mainTable and create datamap") { --- End diff -- not all file, but I think we should add this test case: Create data map when the main table has data. --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1861#discussion_r165296811 --- Diff: integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala --- @@ -310,5 +310,99 @@ test("check load and select for avg double datatype") { checkAnswer(sql("select name,avg(salary) from maintbl group by name"), rows) } + test("create datamap with 'if not exists' after load data into mainTable and create datamap") { --- End diff -- And another problem happend when the order of the load data into main table and create datamap is different, you can check CARBONDATA-2085. --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1861#discussion_r165300927 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala --- @@ -49,10 +52,22 @@ case class CarbonCreateDataMapCommand( throw new MalformedCarbonCommandException("Streaming table does not support creating datamap") } val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName) + val dbName = tableIdentifier.database.getOrElse("default") + val tableName = tableIdentifier.table + "_" + dataMapName - if (dmClassName.equalsIgnoreCase(PREAGGREGATE.toString) || + if (sparkSession.sessionState.catalog.listTables(dbName) --- End diff -- When I change it to "sparkSession.sessionState.catalog.tableExists(tableIdentifier)", it will fail. like: An exception or error caused a run to abort: Table or view 'mainTable_agg0_second' already exists in database 'default'; org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException: Table or view 'mainTable_agg0_second' already exists in database 'default'; --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/1861#discussion_r165301060 --- Diff: integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/timeseries/TestTimeSeriesCreateTable.scala --- @@ -319,6 +326,53 @@ class TestTimeSeriesCreateTable extends QueryTest with BeforeAndAfterAll { assert(e.getMessage.equals(s"$timeSeries should define time granularity")) } + test("test timeseries create table 19: should support if not exists") { + sql("DROP DATAMAP IF EXISTS agg1 ON TABLE mainTable") + try { --- End diff -- ok, done --- |
In reply to this post by qiuchenjian-2
Github user kunal642 commented on the issue:
https://github.com/apache/carbondata/pull/1861 LGTM --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/1861 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/2173/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/1861 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/3415/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/1861 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/3286/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/1861 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/3294/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/1861 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/2188/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/1861 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/3427/ --- |
In reply to this post by qiuchenjian-2
|
Free forum by Nabble | Edit this page |