[ https://issues.apache.org/jira/browse/CARBONDATA-4117?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Kunal Kapoor resolved CARBONDATA-4117. -------------------------------------- Fix Version/s: 2.1.1 Resolution: Fixed > Test cg index query with Index server fails with NPE > ---------------------------------------------------- > > Key: CARBONDATA-4117 > URL: https://issues.apache.org/jira/browse/CARBONDATA-4117 > Project: CarbonData > Issue Type: Bug > Reporter: SHREELEKHYA GAMPA > Priority: Minor > Fix For: 2.1.1 > > Time Spent: 4h 20m > Remaining Estimate: 0h > > Test queries to execute: > spark-sql> CREATE TABLE index_test_cg(id INT, name STRING, city STRING, age INT) STORED AS carbondata TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT'); > spark-sql> create index cgindex on table index_test_cg (name) as 'org.apache.carbondata.spark.testsuite.index.CGIndexFactory'; > LOAD DATA LOCAL INPATH '$file2' INTO TABLE index_test_cg OPTIONS('header'='false') > spark-sql> select * from index_test_cg where name='n502670'; > 2021-01-29 15:09:25,881 | ERROR | main | Exception occurred while getting splits using index server. Initiating Fallback to embedded mode | org.apache.carbondata.hadoop.api.CarbonInputFormat.getDistributedSplit(CarbonInputFormat.java:454) > java.lang.reflect.UndeclaredThrowableException > at com.sun.proxy.$Proxy69.getSplits(Unknown Source) > at org.apache.carbondata.indexserver.DistributedIndexJob$$anonfun$1.apply(IndexJobs.scala:85) > at org.apache.carbondata.indexserver.DistributedIndexJob$$anonfun$1.apply(IndexJobs.scala:59) > at org.apache.carbondata.spark.util.CarbonScalaUtil$.logTime(CarbonScalaUtil.scala:769) > at org.apache.carbondata.indexserver.DistributedIndexJob.execute(IndexJobs.scala:58) > at org.apache.carbondata.core.index.IndexUtil.executeIndexJob(IndexUtil.java:307) > at org.apache.carbondata.hadoop.api.CarbonInputFormat.getDistributedSplit(CarbonInputFormat.java:443) > at org.apache.carbondata.hadoop.api.CarbonInputFormat.getPrunedBlocklets(CarbonInputFormat.java:555) > at org.apache.carbondata.hadoop.api.CarbonInputFormat.getDataBlocksOfSegment(CarbonInputFormat.java:500) > at org.apache.carbondata.hadoop.api.CarbonTableInputFormat.getSplits(CarbonTableInputFormat.java:357) > at org.apache.carbondata.hadoop.api.CarbonTableInputFormat.getSplits(CarbonTableInputFormat.java:205) > at org.apache.carbondata.spark.rdd.CarbonScanRDD.internalGetPartitions(CarbonScanRDD.scala:159) > at org.apache.carbondata.spark.rdd.CarbonRDD.getPartitions(CarbonRDD.scala:68) > at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:273) > at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:269) > at scala.Option.getOrElse(Option.scala:121) > at org.apache.spark.rdd.RDD.partitions(RDD.scala:269) > at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:49) > at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:273) > at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:269) > at scala.Option.getOrElse(Option.scala:121) > at org.apache.spark.rdd.RDD.partitions(RDD.scala:269) > at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:49) > at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:273) > at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:269) > at scala.Option.getOrElse(Option.scala:121) > at org.apache.spark.rdd.RDD.partitions(RDD.scala:269) > at org.apache.spark.SparkContext.runJob(SparkContext.scala:2299) > at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:989) > at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) > at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) > at org.apache.spark.rdd.RDD.withScope(RDD.scala:384) > at org.apache.spark.rdd.RDD.collect(RDD.scala:988) > at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:345) > at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:372) > at org.apache.spark.sql.execution.QueryExecution.hiveResultString(QueryExecution.scala:127) > at org.apache.spark.sql.hive.thriftserver.SparkSQLDriver$$anonfun$run$1.apply(SparkSQLDriver.scala:66) > at org.apache.spark.sql.hive.thriftserver.SparkSQLDriver$$anonfun$run$1.apply(SparkSQLDriver.scala:66) > at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1$$anonfun$apply$1.apply(SQLExecution.scala:95) > at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:144) > at org.apache.spark.sql.execution.SQLExecution$$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:86) > at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:789) > at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:63) > at org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:65) > at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:383) > at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:406) > at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:277) > at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:498) > at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52) > at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:882) > at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:164) > at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:187) > at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:89) > at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:957) > at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:966) > at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala) > Caused by: org.apache.hadoop.ipc.RemoteException(java.io.IOException): java.security.PrivilegedActionException: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 20.0 failed 4 times, most recent failure: Lost task 0.3 in stage 20.0 (TID 45, linux-29, executor 9): java.lang.NullPointerException > at org.apache.carbondata.core.index.IndexStoreManager.getIndex(IndexStoreManager.java:145) > at org.apache.carbondata.core.index.IndexUtil.pruneIndexes(IndexUtil.java:248) > at org.apache.carbondata.core.index.IndexUtil.pruneIndexes(IndexUtil.java:226) > at org.apache.carbondata.core.index.IndexInputFormat$1.initialize(IndexInputFormat.java:176) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1$$anonfun$apply$1.apply(DistributedPruneRDD.scala:141) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1$$anonfun$apply$1.apply(DistributedPruneRDD.scala:138) > at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) > at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241) > at scala.collection.immutable.List.flatMap(List.scala:355) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1.apply(DistributedPruneRDD.scala:138) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1.apply(DistributedPruneRDD.scala:138) > at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24) > at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) > at java.lang.Thread.run(Thread.java:748) > Driver stacktrace: > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:360) > at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1741) > at org.apache.carbondata.indexserver.IndexServer$.doAs(IndexServer.scala:116) > at org.apache.carbondata.indexserver.IndexServer$.getSplits(IndexServer.scala:169) > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) > at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) > at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) > at java.lang.reflect.Method.invoke(Method.java:498) > at org.apache.hadoop.ipc.WritableRpcEngine$Server$WritableRpcInvoker.call(WritableRpcEngine.java:550) > at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1036) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:985) > at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:913) > at java.security.AccessController.doPrivileged(Native Method) > at javax.security.auth.Subject.doAs(Subject.java:422) > at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1761) > at org.apache.hadoop.ipc.Server$Handler.run(Server.java:2876) > at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1563) > at org.apache.hadoop.ipc.Client.call(Client.java:1509) > at org.apache.hadoop.ipc.Client.call(Client.java:1408) > at org.apache.hadoop.ipc.WritableRpcEngine$Invoker.invoke(WritableRpcEngine.java:251) > ... 60 more > 2021-01-29 15:09:25,960 | ERROR | Executor task launch worker for task 102 | Exception in task 0.0 in stage 7.0 (TID 102) | org.apache.spark.executor.Executor.logError(Logging.scala:91) > java.lang.NullPointerException > at org.apache.carbondata.core.index.IndexStoreManager.getIndex(IndexStoreManager.java:145) > at org.apache.carbondata.core.index.IndexUtil.pruneIndexes(IndexUtil.java:248) > at org.apache.carbondata.core.index.IndexUtil.pruneIndexes(IndexUtil.java:226) > at org.apache.carbondata.core.index.IndexInputFormat$1.initialize(IndexInputFormat.java:176) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1$$anonfun$apply$1.apply(DistributedPruneRDD.scala:141) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1$$anonfun$apply$1.apply(DistributedPruneRDD.scala:138) > at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) > at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241) > at scala.collection.immutable.List.flatMap(List.scala:355) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1.apply(DistributedPruneRDD.scala:138) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1.apply(DistributedPruneRDD.scala:138) > at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24) > at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) > at java.lang.Thread.run(Thread.java:748) > 2021-01-29 15:09:25,962 | WARN | task-result-getter-3 | Lost task 0.0 in stage 7.0 (TID 102, localhost, executor driver): java.lang.NullPointerException > at org.apache.carbondata.core.index.IndexStoreManager.getIndex(IndexStoreManager.java:145) > at org.apache.carbondata.core.index.IndexUtil.pruneIndexes(IndexUtil.java:248) > at org.apache.carbondata.core.index.IndexUtil.pruneIndexes(IndexUtil.java:226) > at org.apache.carbondata.core.index.IndexInputFormat$1.initialize(IndexInputFormat.java:176) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1$$anonfun$apply$1.apply(DistributedPruneRDD.scala:141) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1$$anonfun$apply$1.apply(DistributedPruneRDD.scala:138) > at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) > at scala.collection.TraversableLike$$anonfun$flatMap$1.apply(TraversableLike.scala:241) > at scala.collection.immutable.List.foreach(List.scala:392) > at scala.collection.TraversableLike$class.flatMap(TraversableLike.scala:241) > at scala.collection.immutable.List.flatMap(List.scala:355) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1.apply(DistributedPruneRDD.scala:138) > at org.apache.carbondata.indexserver.DistributedPruneRDD$$anonfun$org$apache$carbondata$indexserver$DistributedPruneRDD$$generateFuture$1.apply(DistributedPruneRDD.scala:138) > at scala.concurrent.impl.Future$PromiseCompletingRunnable.liftedTree1$1(Future.scala:24) > at scala.concurrent.impl.Future$PromiseCompletingRunnable.run(Future.scala:24) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) > at java.lang.Thread.run(Thread.java:748) > | org.apache.spark.scheduler.TaskSetManager.logWarning(Logging.scala:66) > 2021-01-29 15:09:25,962 | ERROR | task-result-getter-3 | Task 0 in stage 7.0 failed 1 times; aborting job | org.apache.spark.scheduler.TaskSetManager.logError(Logging.scala:70) > Time taken: 15.16 seconds -- This message was sent by Atlassian Jira (v8.3.4#803005) |
Free forum by Nabble | Edit this page |