Re: Using DataFrame to write carbondata file cause no table found error
Posted by ZhuWilliam on Nov 25, 2016; 10:16am
URL: http://apache-carbondata-dev-mailing-list-archive.168.s1.nabble.com/Using-DataFrame-to-write-carbondata-file-cause-no-table-found-error-tp3203p3207.html
Here is the error:
ERROR 25-11 18:13:40,116 - Data loading failed. table not found: default.carbon1
AUDIT 25-11 18:13:40,118 - [allwefantasy][allwefantasy][Thread-98]Data loading failed. table not found: default.carbon1
INFO 25-11 18:13:40,119 - Finished job streaming job 1480068820000 ms.0 from job set of time 1480068820000 ms
INFO 25-11 18:13:40,119 - Total delay: 0.119 s for time 1480068820000 ms (execution: 0.106 s)
INFO 25-11 18:13:40,120 - Removing RDD 4 from persistence list
java.lang.RuntimeException: Data loading failed. table not found: default.carbon1
at scala.sys.package$.error(package.scala:27)
at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:1040)
at org.apache.carbondata.spark.CarbonDataFrameWriter.loadDataFrame(CarbonDataFrameWriter.scala:132)
at org.apache.carbondata.spark.CarbonDataFrameWriter.writeToCarbonFile(CarbonDataFrameWriter.scala:52)
at org.apache.carbondata.spark.CarbonDataFrameWriter.appendToCarbonFile(CarbonDataFrameWriter.scala:43)
at org.apache.spark.sql.CarbonSource.createRelation(CarbonDatasourceRelation.scala:112)
at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:222)
at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:148)
at streaming.core.compositor.spark.streaming.output.SQLOutputCompositor$$anonfun$result$1.apply(SQLOutputCompositor.scala:61)
at streaming.core.compositor.spark.streaming.output.SQLOutputCompositor$$anonfun$result$1.apply(SQLOutputCompositor.scala:53)
at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:661)
at org.apache.spark.streaming.dstream.DStream$$anonfun$foreachRDD$1$$anonfun$apply$mcV$sp$3.apply(DStream.scala:661)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ForEachDStream.scala:50)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:50)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:50)
at org.apache.spark.streaming.dstream.DStream.createRDDWithLocalProperties(DStream.scala:426)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply$mcV$sp(ForEachDStream.scala:49)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:49)
at org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1.apply(ForEachDStream.scala:49)
at scala.util.Try$.apply(Try.scala:161)
at org.apache.spark.streaming.scheduler.Job.run(Job.scala:39)
at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply$mcV$sp(JobScheduler.scala:224)
at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:224)
at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler$$anonfun$run$1.apply(JobScheduler.scala:224)
at scala.util.DynamicVariable.withValue(DynamicVariable.scala:57)
at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.run(JobScheduler.scala:223)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)