Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192418665 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -171,19 +170,25 @@ class CarbonSession(@transient val sc: SparkContext, */ private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = { val analyzed = qe.analyzed + val LOG: LogService = LogServiceFactory.getLogService(classOf[CarbonSession].getName) analyzed match { case _@Project(columns, _@Filter(expr, s: SubqueryAlias)) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => + LOG.info(String.format("Search service started and supports: %s", sse.sqlText)) --- End diff -- It is in scala here, why still use String.format? --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192420491 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -171,19 +170,25 @@ class CarbonSession(@transient val sc: SparkContext, */ private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = { val analyzed = qe.analyzed + val LOG: LogService = LogServiceFactory.getLogService(classOf[CarbonSession].getName) analyzed match { case _@Project(columns, _@Filter(expr, s: SubqueryAlias)) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => + LOG.info(String.format("Search service started and supports: %s", sse.sqlText)) runSearch(analyzed, columns, expr, s.child.asInstanceOf[LogicalRelation]) case gl@GlobalLimit(_, ll@LocalLimit(_, p@Project(columns, _@Filter(expr, s: SubqueryAlias)))) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => val logicalRelation = s.child.asInstanceOf[LogicalRelation] + LOG.info(String.format("Search service started and supports: %s", sse.sqlText)) runSearch(analyzed, columns, expr, logicalRelation, gl.maxRows, ll.maxRows) case _ => + LOG.info(String.format( --- End diff -- the level is `warn` or `info`? --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192551319 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -101,8 +100,8 @@ class CarbonSession(@transient val sc: SparkContext, } catch { case e: Exception => logError(String.format( --- End diff -- ok, I will change --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192551326 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -171,19 +170,25 @@ class CarbonSession(@transient val sc: SparkContext, */ private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = { val analyzed = qe.analyzed + val LOG: LogService = LogServiceFactory.getLogService(classOf[CarbonSession].getName) analyzed match { case _@Project(columns, _@Filter(expr, s: SubqueryAlias)) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => + LOG.info(String.format("Search service started and supports: %s", sse.sqlText)) --- End diff -- ok, thanks, changed --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192551338 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -171,19 +170,25 @@ class CarbonSession(@transient val sc: SparkContext, */ private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = { val analyzed = qe.analyzed + val LOG: LogService = LogServiceFactory.getLogService(classOf[CarbonSession].getName) analyzed match { case _@Project(columns, _@Filter(expr, s: SubqueryAlias)) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => + LOG.info(String.format("Search service started and supports: %s", sse.sqlText)) runSearch(analyzed, columns, expr, s.child.asInstanceOf[LogicalRelation]) case gl@GlobalLimit(_, ll@LocalLimit(_, p@Project(columns, _@Filter(expr, s: SubqueryAlias)))) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => val logicalRelation = s.child.asInstanceOf[LogicalRelation] + LOG.info(String.format("Search service started and supports: %s", sse.sqlText)) runSearch(analyzed, columns, expr, logicalRelation, gl.maxRows, ll.maxRows) case _ => + LOG.info(String.format( --- End diff -- done, info, other don't support now --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192551344 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -171,19 +170,25 @@ class CarbonSession(@transient val sc: SparkContext, */ private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = { val analyzed = qe.analyzed + val LOG: LogService = LogServiceFactory.getLogService(classOf[CarbonSession].getName) analyzed match { case _@Project(columns, _@Filter(expr, s: SubqueryAlias)) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => + LOG.info(String.format("Search service started and supports: %s", sse.sqlText)) runSearch(analyzed, columns, expr, s.child.asInstanceOf[LogicalRelation]) case gl@GlobalLimit(_, ll@LocalLimit(_, p@Project(columns, _@Filter(expr, s: SubqueryAlias)))) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => val logicalRelation = s.child.asInstanceOf[LogicalRelation] + LOG.info(String.format("Search service started and supports: %s", sse.sqlText)) runSearch(analyzed, columns, expr, logicalRelation, gl.maxRows, ll.maxRows) case _ => + LOG.info(String.format( + "Search service started, but don't support: %s, and running it with SparkSQL", --- End diff -- ok, done --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2357 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/5209/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2357 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/6239/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2357 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/5077/ --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on the issue:
https://github.com/apache/carbondata/pull/2357 @xuchuanyin @CI pass, please check again. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192742562 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -171,19 +170,24 @@ class CarbonSession(@transient val sc: SparkContext, */ private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = { val analyzed = qe.analyzed + val LOG: LogService = LogServiceFactory.getLogService(classOf[CarbonSession].getName) --- End diff -- change `classOf[CarbonSession]` to `this.getClass` --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192742793 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -171,19 +170,24 @@ class CarbonSession(@transient val sc: SparkContext, */ private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = { val analyzed = qe.analyzed + val LOG: LogService = LogServiceFactory.getLogService(classOf[CarbonSession].getName) analyzed match { case _@Project(columns, _@Filter(expr, s: SubqueryAlias)) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => + LOG.info(s"Search service started and supports: ${sse.sqlText}") runSearch(analyzed, columns, expr, s.child.asInstanceOf[LogicalRelation]) case gl@GlobalLimit(_, ll@LocalLimit(_, p@Project(columns, _@Filter(expr, s: SubqueryAlias)))) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => val logicalRelation = s.child.asInstanceOf[LogicalRelation] + LOG.info(s"Search service started and supports: ${sse.sqlText}") --- End diff -- put this log into `runSearch` --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192971763 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -171,19 +170,24 @@ class CarbonSession(@transient val sc: SparkContext, */ private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = { val analyzed = qe.analyzed + val LOG: LogService = LogServiceFactory.getLogService(classOf[CarbonSession].getName) --- End diff -- ok, done --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2357#discussion_r192974805 --- Diff: integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala --- @@ -171,19 +170,24 @@ class CarbonSession(@transient val sc: SparkContext, */ private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = { val analyzed = qe.analyzed + val LOG: LogService = LogServiceFactory.getLogService(classOf[CarbonSession].getName) analyzed match { case _@Project(columns, _@Filter(expr, s: SubqueryAlias)) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => + LOG.info(s"Search service started and supports: ${sse.sqlText}") runSearch(analyzed, columns, expr, s.child.asInstanceOf[LogicalRelation]) case gl@GlobalLimit(_, ll@LocalLimit(_, p@Project(columns, _@Filter(expr, s: SubqueryAlias)))) if s.child.isInstanceOf[LogicalRelation] && s.child.asInstanceOf[LogicalRelation].relation .isInstanceOf[CarbonDatasourceHadoopRelation] => val logicalRelation = s.child.asInstanceOf[LogicalRelation] + LOG.info(s"Search service started and supports: ${sse.sqlText}") --- End diff -- It can't get the sse.sqlText if move the log to runSearch, except change the runSearch parameter. I think it's better to keep the log in trySearchMode and add some Distinguish for filter and limit. --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2357 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/5224/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2357 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/6254/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2357 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/5092/ --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on the issue:
https://github.com/apache/carbondata/pull/2357 @jackylk changed, CI pass , please check it again. --- |
In reply to this post by qiuchenjian-2
Github user xubo245 commented on the issue:
https://github.com/apache/carbondata/pull/2357 retest this please --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2357 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/6275/ --- |
Free forum by Nabble | Edit this page |