[jira] [Resolved] (CARBONDATA-3147) Preaggregate dataload fails in case of concurrent load in some cases

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|

[jira] [Resolved] (CARBONDATA-3147) Preaggregate dataload fails in case of concurrent load in some cases

Akash R Nilugal (Jira)

     [ https://issues.apache.org/jira/browse/CARBONDATA-3147?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Ravindra Pesala resolved CARBONDATA-3147.
-----------------------------------------
       Resolution: Fixed
    Fix Version/s: 1.5.2

> Preaggregate dataload fails in case of concurrent load in some cases
> --------------------------------------------------------------------
>
>                 Key: CARBONDATA-3147
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-3147
>             Project: CarbonData
>          Issue Type: Bug
>            Reporter: Kunal Kapoor
>            Assignee: Kunal Kapoor
>            Priority: Major
>             Fix For: 1.5.2
>
>          Time Spent: 6h 50m
>  Remaining Estimate: 0h
>
> java.io.IOException: Entry not found to update in the table status file
> at org.apache.carbondata.processing.util.CarbonLoaderUtil.recordNewLoadMetadata(CarbonLoaderUtil.java:320)
> at org.apache.carbondata.processing.util.CarbonLoaderUtil.recordNewLoadMetadata(CarbonLoaderUtil.java:207)
> at org.apache.carbondata.processing.util.CarbonLoaderUtil.updateTableStatusForFailure(CarbonLoaderUtil.java:467)
> at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.processData(CarbonLoadDataCommand.scala:358)
> at org.apache.spark.sql.execution.command.preaaggregate.PreAggregateUtil$.startDataLoadForDataMap(PreAggregateUtil.scala:603)
> at org.apache.spark.sql.execution.command.preaaggregate.LoadPostAggregateListener$$anonfun$onEvent$10.apply(PreAggregateListeners.scala:488)
> at org.apache.spark.sql.execution.command.preaaggregate.LoadPostAggregateListener$$anonfun$onEvent$10.apply(PreAggregateListeners.scala:463)
> at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733)
> at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
> at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732)
> at org.apache.spark.sql.execution.command.preaaggregate.LoadPostAggregateListener$.onEvent(PreAggregateListeners.scala:463)
> at org.apache.carbondata.events.OperationListenerBus.fireEvent(OperationListenerBus.java:83)
> at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:524)
> at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.loadData(CarbonLoadDataCommand.scala:594)
> at org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand.processData(CarbonLoadDataCommand.scala:322)
> at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:147)
> at org.apache.spark.sql.execution.command.AtomicRunnableCommand$$anonfun$run$3.apply(package.scala:144)
> at org.apache.spark.sql.execution.command.Auditable$class.runWithAudit(package.scala:104)
> at org.apache.spark.sql.execution.command.AtomicRunnableCommand.runWithAudit(package.scala:140)
> at org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:144)
> at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:59)
> at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:57)
> at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:75)
> at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
> at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)
> at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)
> at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)
> at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113)
> at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:125)
> at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:125)
> at org.apache.spark.sql.Dataset.(Dataset.scala:185)
> at org.apache.spark.sql.CarbonSession$$anonfun$sql$1.apply(CarbonSession.scala:90)
> at org.apache.spark.sql.CarbonSession$$anonfun$sql$1.apply(CarbonSession.scala:89)
> at org.apache.spark.sql.CarbonSession.withProfiler(CarbonSession.scala:135)
> at org.apache.spark.sql.CarbonSession.sql(CarbonSession.scala:87)
> at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:699)
> at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:252)
> at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:183)
> at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1$$anon$2.run(SparkExecuteStatementOperation.scala:180)
> at java.security.AccessController.doPrivileged(Native Method)
> at javax.security.auth.Subject.doAs(Subject.java:422)
> at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1778)
> at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$1.run(SparkExecuteStatementOperation.scala:193)
> at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
> at java.util.concurrent.FutureTask.run(FutureTask.java:266)
> at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)