[ https://issues.apache.org/jira/browse/CARBONDATA-1705?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Kunal Kapoor updated CARBONDATA-1705: ------------------------------------- Summary: NullPointerException is thrown while loading data into table with pre-aggregate table (was: NullPointerException is thrown while loading data into table) > NullPointerException is thrown while loading data into table with pre-aggregate table > ------------------------------------------------------------------------------------- > > Key: CARBONDATA-1705 > URL: https://issues.apache.org/jira/browse/CARBONDATA-1705 > Project: CarbonData > Issue Type: Bug > Reporter: Kunal Kapoor > Assignee: Kunal Kapoor > > *Steps:* > 1. CREATE TABLE maintable(id int, name string, city string, age int) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('dictionary_include'='id') > 2. create tablemaintable_preagg_avg stored BY 'carbondata' tblproperties > ('parent'='maintable') as select id,avg(age) from maintable group by id > 3. LOAD DATA LOCAL INPATH 'sample.csv' into table maintable > *Exception:* > 17/11/12 00:04:17 ERROR TaskContextImpl: Error in TaskCompletionListener > java.lang.NullPointerException > at org.apache.spark.TaskContextImpl$$anonfun$markTaskCompleted$1.apply(TaskContextImpl.scala:97) > at org.apache.spark.TaskContextImpl$$anonfun$markTaskCompleted$1.apply(TaskContextImpl.scala:95) > at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) > at org.apache.spark.TaskContextImpl.markTaskCompleted(TaskContextImpl.scala:95) > at org.apache.spark.scheduler.Task.run(Task.scala:112) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) > at java.lang.Thread.run(Thread.java:745) > 17/11/12 00:04:17 ERROR Executor: Exception in task 0.0 in stage 5163.0 (TID 65678) > org.apache.spark.util.TaskCompletionListenerException > at org.apache.spark.TaskContextImpl.markTaskCompleted(TaskContextImpl.scala:105) > at org.apache.spark.scheduler.Task.run(Task.scala:112) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) > at java.lang.Thread.run(Thread.java:745) > 17/11/12 00:04:17 ERROR TaskSetManager: Task 0 in stage 5163.0 failed 1 times; aborting job > 17/11/12 00:04:17 ERROR CarbonDataRDDFactory$: ScalaTest-main-running-TestPreAggregateLoad load data frame failed > org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 5163.0 failed 1 times, most recent failure: Lost task 0.0 in stage 5163.0 (TID 65678, localhost, executor driver): org.apache.spark.util.TaskCompletionListenerException > at org.apache.spark.TaskContextImpl.markTaskCompleted(TaskContextImpl.scala:105) > at org.apache.spark.scheduler.Task.run(Task.scala:112) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) > at java.lang.Thread.run(Thread.java:745) > Driver stacktrace: > at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1435) > at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1423) > at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1422) > at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) > at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) > at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1422) > at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802) > at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802) > at scala.Option.foreach(Option.scala:257) > at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802) > at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1650) > at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1605) > at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1594) > at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48) > at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:628) > at org.apache.spark.SparkContext.runJob(SparkContext.scala:1918) > at org.apache.spark.SparkContext.runJob(SparkContext.scala:1931) > at org.apache.spark.SparkContext.runJob(SparkContext.scala:1944) > at org.apache.spark.SparkContext.runJob(SparkContext.scala:1958) > at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:935) > at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) > at org.apache.spark.rdd.RDD.withScope(RDD.scala:362) > at org.apache.spark.rdd.RDD.collect(RDD.scala:934) > at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadDataFrame(CarbonDataRDDFactory.scala:918) > at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:325) > at org.apache.spark.sql.execution.command.management.LoadTableCommand.loadData(LoadTableCommand.scala:336) > at org.apache.spark.sql.execution.command.management.LoadTableCommand.processData(LoadTableCommand.scala:177) > at org.apache.spark.sql.execution.command.management.LoadTableCommand.run(LoadTableCommand.scala:60) > at org.apache.spark.sql.execution.command.management.LoadTableByInsertCommand.processData(LoadTableByInsertCommand.scala:47) > at org.apache.spark.sql.execution.command.management.LoadTableByInsertCommand.run(LoadTableByInsertCommand.scala:33) > at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58) > at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56) > at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74) > at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114) > at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114) > at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135) > at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132) > at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113) > at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87) > at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87) > at org.apache.spark.sql.Dataset.<init>(Dataset.scala:185) > at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64) > at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592) > at org.apache.spark.sql.execution.command.preaaggregate.LoadPostAggregateListener$$anonfun$onEvent$4.apply(PreAggregateListeners.scala:77) > at org.apache.spark.sql.execution.command.preaaggregate.LoadPostAggregateListener$$anonfun$onEvent$4.apply(PreAggregateListeners.scala:65) > at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) > at scala.collection.Iterator$class.foreach(Iterator.scala:893) > at scala.collection.AbstractIterator.foreach(Iterator.scala:1336) > at scala.collection.IterableLike$class.foreach(IterableLike.scala:72) > at scala.collection.AbstractIterable.foreach(Iterable.scala:54) > at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) > at org.apache.spark.sql.execution.command.preaaggregate.LoadPostAggregateListener$.onEvent(PreAggregateListeners.scala:65) > at org.apache.carbondata.events.OperationListenerBus.fireEvent(OperationListenerBus.java:116) > at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:486) > at org.apache.spark.sql.execution.command.management.LoadTableCommand.loadData(LoadTableCommand.scala:336) > at org.apache.spark.sql.execution.command.management.LoadTableCommand.processData(LoadTableCommand.scala:177) > at org.apache.spark.sql.execution.command.management.LoadTableCommand.run(LoadTableCommand.scala:60) > at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58) > at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56) > at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74) > at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114) > at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114) > at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135) > at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132) > at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113) > at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87) > at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87) > at org.apache.spark.sql.Dataset.<init>(Dataset.scala:185) > at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64) > at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592) > at org.apache.spark.sql.test.Spark2TestQueryExecutor.sql(Spark2TestQueryExecutor.scala:35) > at org.apache.spark.sql.test.util.QueryTest.sql(QueryTest.scala:87) > at org.apache.carbondata.integration.spark.testsuite.preaggregate.TestPreAggregateLoad$$anonfun$4.apply$mcV$sp(TestPreAggregateLoad.scala:129) > at org.apache.carbondata.integration.spark.testsuite.preaggregate.TestPreAggregateLoad$$anonfun$4.apply(TestPreAggregateLoad.scala:121) > at org.apache.carbondata.integration.spark.testsuite.preaggregate.TestPreAggregateLoad$$anonfun$4.apply(TestPreAggregateLoad.scala:121) > at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22) > at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85) > at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104) > at org.scalatest.Transformer.apply(Transformer.scala:22) > at org.scalatest.Transformer.apply(Transformer.scala:20) > at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166) > at org.apache.spark.sql.test.util.CarbonFunSuite.withFixture(CarbonFunSuite.scala:41) > at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163) > at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175) > at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175) > at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306) > at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175) > at org.scalatest.FunSuite.runTest(FunSuite.scala:1555) > at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208) > at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208) > at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413) > at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401) > at scala.collection.immutable.List.foreach(List.scala:381) > at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401) > at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396) > at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483) > at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208) > at org.scalatest.FunSuite.runTests(FunSuite.scala:1555) > at org.scalatest.Suite$class.run(Suite.scala:1424) > at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555) > at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212) > at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212) > at org.scalatest.SuperEngine.runImpl(Engine.scala:545) > at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212) > at org.apache.carbondata.integration.spark.testsuite.preaggregate.TestPreAggregateLoad.org$scalatest$BeforeAndAfterAll$$super$run(TestPreAggregateLoad.scala:24) > at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:257) > at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:256) > at org.apache.carbondata.integration.spark.testsuite.preaggregate.TestPreAggregateLoad.run(TestPreAggregateLoad.scala:24) > at org.scalatest.Suite$class.callExecuteOnSuite$1(Suite.scala:1492) > at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1528) > at org.scalatest.Suite$$anonfun$runNestedSuites$1.apply(Suite.scala:1526) > at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) > at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) > at org.scalatest.Suite$class.runNestedSuites(Suite.scala:1526) > at org.scalatest.tools.DiscoverySuite.runNestedSuites(DiscoverySuite.scala:29) > at org.scalatest.Suite$class.run(Suite.scala:1421) > at org.scalatest.tools.DiscoverySuite.run(DiscoverySuite.scala:29) > at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55) > at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563) > at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557) > at scala.collection.immutable.List.foreach(List.scala:381) > at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557) > at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044) > at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043) > at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722) > at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043) > at org.scalatest.tools.Runner$.main(Runner.scala:860) > at org.scalatest.tools.Runner.main(Runner.scala) > Caused by: org.apache.spark.util.TaskCompletionListenerException > at org.apache.spark.TaskContextImpl.markTaskCompleted(TaskContextImpl.scala:105) > at org.apache.spark.scheduler.Task.run(Task.scala:112) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) > at java.lang.Thread.run(Thread.java:745) > 17/11/12 00:04:17 ERROR CarbonDataRDDFactory$: ScalaTest-main-running-TestPreAggregateLoad > org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 5163.0 failed 1 times, most recent failure: Lost task 0.0 in stage 5163.0 (TID 65678, localhost, executor driver): org.apache.spark.util.TaskCompletionListenerException > at org.apache.spark.TaskContextImpl.markTaskCompleted(TaskContextImpl.scala:105) > at org.apache.spark.scheduler.Task.run(Task.scala:112) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) > at java.lang.Thread.run(Thread.java:745) -- This message was sent by Atlassian JIRA (v6.4.14#64029) |
Free forum by Nabble | Edit this page |