[jira] [Updated] (CARBONDATA-1654) NullPointerException when insert overwrite table

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|

[jira] [Updated] (CARBONDATA-1654) NullPointerException when insert overwrite table

Akash R Nilugal (Jira)

     [ https://issues.apache.org/jira/browse/CARBONDATA-1654?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

cen yuhai updated CARBONDATA-1654:
----------------------------------
    Description:
carbon.sql("insert overwrite table carbondata_table select * from hive_table where dt = '2017-10-10' ").collect
carbondata wanto find directory Segment_1, but there is Segment_2
{code}
[Stage 0:>                                                      (0 + 504) / 504]17/10/28 19:11:28 WARN [org.glassfish.jersey.internal.Errors(191) -- SparkUI-174]: The following warnings have been detected: WARNING: The (sub)resource method stageData in org.apache.spark.status.api.v1.OneStageResource contains empty path annotation.

17/10/28 19:25:20 ERROR [org.apache.carbondata.core.datastore.filesystem.AbstractDFSCarbonFile(141) -- main]: main Exception occurred:File does not exist: hdfs://bipcluster/user/master/carbon/store/dm_test/carbondata_table/Fact/Part0/Segment_1
17/10/28 19:25:22 ERROR [org.apache.spark.sql.execution.command.LoadTable(143) -- main]: main
java.lang.NullPointerException
        at org.apache.carbondata.core.datastore.filesystem.AbstractDFSCarbonFile.isDirectory(AbstractDFSCarbonFile.java:88)
        at org.apache.carbondata.core.util.CarbonUtil.deleteRecursive(CarbonUtil.java:364)
        at org.apache.carbondata.core.util.CarbonUtil.access$100(CarbonUtil.java:93)
        at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:326)
        at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:322)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:422)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)
        at org.apache.carbondata.core.util.CarbonUtil.deleteFoldersAndFiles(CarbonUtil.java:322)
        at org.apache.carbondata.spark.load.CarbonLoaderUtil.recordLoadMetadata(CarbonLoaderUtil.java:331)
        at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.updateStatus$1(CarbonDataRDDFactory.scala:595)
        at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:1107)
        at org.apache.spark.sql.execution.command.LoadTable.processData(carbonTableSchema.scala:1046)
        at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:754)
        at org.apache.spark.sql.execution.command.LoadTableByInsert.processData(carbonTableSchema.scala:651)
        at org.apache.spark.sql.execution.command.LoadTableByInsert.run(carbonTableSchema.scala:637)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:67)
        at org.apache.spark.sql.Dataset.<init>(Dataset.scala:180)
        at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:65)
        at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:619)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:36)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:41)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:43)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:45)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:47)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw.<init>(<console>:49)
        at $line23.$read$$iw$$iw$$iw$$iw.<init>(<console>:51)
        at $line23.$read$$iw$$iw$$iw.<init>(<console>:53)
        at $line23.$read$$iw$$iw.<init>(<console>:55)
        at $line23.$read$$iw.<init>(<console>:57)
        at $line23.$read.<init>(<console>:59)
        at $line23.$read$.<init>(<console>:63)
        at $line23.$read$.<clinit>(<console>)
        at $line23.$eval$.$print$lzycompute(<console>:7)
        at $line23.$eval$.$print(<console>:6)
        at $line23.$eval.$print(<console>)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:497)
        at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:786)
        at scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1047)
        at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:638)
        at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:637)
        at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
        at scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:19)
        at scala.tools.nsc.interpreter.IMain$WrappedRequest.loadAndRunReq(IMain.scala:637)
        at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:569)
        at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:565)
        at scala.tools.nsc.interpreter.ILoop.interpretStartingWith(ILoop.scala:807)
        at scala.tools.nsc.interpreter.ILoop.command(ILoop.scala:681)
        at scala.tools.nsc.interpreter.ILoop.processLine(ILoop.scala:395)
        at scala.tools.nsc.interpreter.ILoop.loop(ILoop.scala:415)
        at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply$mcZ$sp(ILoop.scala:923)
        at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
        at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
        at scala.reflect.internal.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:97)
        at scala.tools.nsc.interpreter.ILoop.process(ILoop.scala:909)
        at org.apache.spark.repl.Main$.doMain(Main.scala:69)
        at org.apache.spark.repl.Main$.main(Main.scala:52)
        at org.apache.spark.repl.Main.main(Main.scala)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:497)
        at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:743)
        at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:186)
        at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:211)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:126)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
17/10/28 19:25:22 AUDIT [org.apache.spark.sql.execution.command.LoadTable(207) -- main]: [sh-hadoop-datanode-250-104.elenet.me][master][Thread-1]Dataload failure for dm_test.carbondata_table. Please check the logs
java.lang.NullPointerException
  at org.apache.carbondata.core.datastore.filesystem.AbstractDFSCarbonFile.isDirectory(AbstractDFSCarbonFile.java:88)
  at org.apache.carbondata.core.util.CarbonUtil.deleteRecursive(CarbonUtil.java:364)
  at org.apache.carbondata.core.util.CarbonUtil.access$100(CarbonUtil.java:93)
  at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:326)
  at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:322)
  at java.security.AccessController.doPrivileged(Native Method)
  at javax.security.auth.Subject.doAs(Subject.java:422)
  at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)
  at org.apache.carbondata.core.util.CarbonUtil.deleteFoldersAndFiles(CarbonUtil.java:322)
  at org.apache.carbondata.spark.load.CarbonLoaderUtil.recordLoadMetadata(CarbonLoaderUtil.java:331)
  at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.updateStatus$1(CarbonDataRDDFactory.scala:595)
  at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:1107)
  at org.apache.spark.sql.execution.command.LoadTable.processData(carbonTableSchema.scala:1046)
  at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:754)
  at org.apache.spark.sql.execution.command.LoadTableByInsert.processData(carbonTableSchema.scala:651)
  at org.apache.spark.sql.execution.command.LoadTableByInsert.run(carbonTableSchema.scala:637)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:67)
  at org.apache.spark.sql.Dataset.<init>(Dataset.scala:180)
  at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:65)
  at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:619)
  ... 50 elided
{code}

{code}
[[hidden email] ~]$ hadoop fs -ls /user/master/carbon/store/dm_test/carbondata_table/Fact/Part0
Found 1 items
drwxr-xr-x   - master hadoop          0 2017-10-28 19:25 /user/master/carbon/store/dm_test/carbondata_table/Fact/Part0/Segment_2
{code}


hive table
{code}
create table hive_table(id int) partitioned by (dt string)
{code}
carbondata table
{code}
create table hive_table(id int, dt string)
{code}

  was:
carbon.sql("insert overwrite table carbondata_table select * from hive_table where dt = '2017-10-10' ").collect
carbondata wanto find directory Segment_1, but there is Segment_2
{code}
[Stage 0:>                                                      (0 + 504) / 504]17/10/28 19:11:28 WARN [org.glassfish.jersey.internal.Errors(191) -- SparkUI-174]: The following warnings have been detected: WARNING: The (sub)resource method stageData in org.apache.spark.status.api.v1.OneStageResource contains empty path annotation.

17/10/28 19:25:20 ERROR [org.apache.carbondata.core.datastore.filesystem.AbstractDFSCarbonFile(141) -- main]: main Exception occurred:File does not exist: hdfs://bipcluster/user/master/carbon/store/dm_test/carbondata_table/Fact/Part0/Segment_1
17/10/28 19:25:22 ERROR [org.apache.spark.sql.execution.command.LoadTable(143) -- main]: main
java.lang.NullPointerException
        at org.apache.carbondata.core.datastore.filesystem.AbstractDFSCarbonFile.isDirectory(AbstractDFSCarbonFile.java:88)
        at org.apache.carbondata.core.util.CarbonUtil.deleteRecursive(CarbonUtil.java:364)
        at org.apache.carbondata.core.util.CarbonUtil.access$100(CarbonUtil.java:93)
        at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:326)
        at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:322)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:422)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)
        at org.apache.carbondata.core.util.CarbonUtil.deleteFoldersAndFiles(CarbonUtil.java:322)
        at org.apache.carbondata.spark.load.CarbonLoaderUtil.recordLoadMetadata(CarbonLoaderUtil.java:331)
        at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.updateStatus$1(CarbonDataRDDFactory.scala:595)
        at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:1107)
        at org.apache.spark.sql.execution.command.LoadTable.processData(carbonTableSchema.scala:1046)
        at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:754)
        at org.apache.spark.sql.execution.command.LoadTableByInsert.processData(carbonTableSchema.scala:651)
        at org.apache.spark.sql.execution.command.LoadTableByInsert.run(carbonTableSchema.scala:637)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:67)
        at org.apache.spark.sql.Dataset.<init>(Dataset.scala:180)
        at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:65)
        at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:619)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:36)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:41)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:43)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:45)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:47)
        at $line23.$read$$iw$$iw$$iw$$iw$$iw.<init>(<console>:49)
        at $line23.$read$$iw$$iw$$iw$$iw.<init>(<console>:51)
        at $line23.$read$$iw$$iw$$iw.<init>(<console>:53)
        at $line23.$read$$iw$$iw.<init>(<console>:55)
        at $line23.$read$$iw.<init>(<console>:57)
        at $line23.$read.<init>(<console>:59)
        at $line23.$read$.<init>(<console>:63)
        at $line23.$read$.<clinit>(<console>)
        at $line23.$eval$.$print$lzycompute(<console>:7)
        at $line23.$eval$.$print(<console>:6)
        at $line23.$eval.$print(<console>)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:497)
        at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:786)
        at scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1047)
        at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:638)
        at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:637)
        at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
        at scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:19)
        at scala.tools.nsc.interpreter.IMain$WrappedRequest.loadAndRunReq(IMain.scala:637)
        at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:569)
        at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:565)
        at scala.tools.nsc.interpreter.ILoop.interpretStartingWith(ILoop.scala:807)
        at scala.tools.nsc.interpreter.ILoop.command(ILoop.scala:681)
        at scala.tools.nsc.interpreter.ILoop.processLine(ILoop.scala:395)
        at scala.tools.nsc.interpreter.ILoop.loop(ILoop.scala:415)
        at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply$mcZ$sp(ILoop.scala:923)
        at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
        at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
        at scala.reflect.internal.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:97)
        at scala.tools.nsc.interpreter.ILoop.process(ILoop.scala:909)
        at org.apache.spark.repl.Main$.doMain(Main.scala:69)
        at org.apache.spark.repl.Main$.main(Main.scala:52)
        at org.apache.spark.repl.Main.main(Main.scala)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:497)
        at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:743)
        at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:186)
        at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:211)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:126)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
17/10/28 19:25:22 AUDIT [org.apache.spark.sql.execution.command.LoadTable(207) -- main]: [sh-hadoop-datanode-250-104.elenet.me][master][Thread-1]Dataload failure for dm_test.carbondata_table. Please check the logs
java.lang.NullPointerException
  at org.apache.carbondata.core.datastore.filesystem.AbstractDFSCarbonFile.isDirectory(AbstractDFSCarbonFile.java:88)
  at org.apache.carbondata.core.util.CarbonUtil.deleteRecursive(CarbonUtil.java:364)
  at org.apache.carbondata.core.util.CarbonUtil.access$100(CarbonUtil.java:93)
  at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:326)
  at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:322)
  at java.security.AccessController.doPrivileged(Native Method)
  at javax.security.auth.Subject.doAs(Subject.java:422)
  at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)
  at org.apache.carbondata.core.util.CarbonUtil.deleteFoldersAndFiles(CarbonUtil.java:322)
  at org.apache.carbondata.spark.load.CarbonLoaderUtil.recordLoadMetadata(CarbonLoaderUtil.java:331)
  at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.updateStatus$1(CarbonDataRDDFactory.scala:595)
  at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:1107)
  at org.apache.spark.sql.execution.command.LoadTable.processData(carbonTableSchema.scala:1046)
  at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:754)
  at org.apache.spark.sql.execution.command.LoadTableByInsert.processData(carbonTableSchema.scala:651)
  at org.apache.spark.sql.execution.command.LoadTableByInsert.run(carbonTableSchema.scala:637)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
  at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:67)
  at org.apache.spark.sql.Dataset.<init>(Dataset.scala:180)
  at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:65)
  at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:619)
  ... 50 elided
{code}

{code}
[[hidden email] ~]$ hadoop fs -ls /user/master/carbon/store/dm_test/carbondata_table/Fact/Part0
Found 1 items
drwxr-xr-x   - master hadoop          0 2017-10-28 19:25 /user/master/carbon/store/dm_test/carbondata_table/Fact/Part0/Segment_2
{code}


> NullPointerException when insert overwrite table
> ------------------------------------------------
>
>                 Key: CARBONDATA-1654
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-1654
>             Project: CarbonData
>          Issue Type: Bug
>          Components: data-load
>    Affects Versions: 1.2.0
>         Environment: spark 2.1.1 carbondata 1.2.0
>            Reporter: cen yuhai
>            Priority: Critical
>
> carbon.sql("insert overwrite table carbondata_table select * from hive_table where dt = '2017-10-10' ").collect
> carbondata wanto find directory Segment_1, but there is Segment_2
> {code}
> [Stage 0:>                                                      (0 + 504) / 504]17/10/28 19:11:28 WARN [org.glassfish.jersey.internal.Errors(191) -- SparkUI-174]: The following warnings have been detected: WARNING: The (sub)resource method stageData in org.apache.spark.status.api.v1.OneStageResource contains empty path annotation.
> 17/10/28 19:25:20 ERROR [org.apache.carbondata.core.datastore.filesystem.AbstractDFSCarbonFile(141) -- main]: main Exception occurred:File does not exist: hdfs://bipcluster/user/master/carbon/store/dm_test/carbondata_table/Fact/Part0/Segment_1
> 17/10/28 19:25:22 ERROR [org.apache.spark.sql.execution.command.LoadTable(143) -- main]: main
> java.lang.NullPointerException
>         at org.apache.carbondata.core.datastore.filesystem.AbstractDFSCarbonFile.isDirectory(AbstractDFSCarbonFile.java:88)
>         at org.apache.carbondata.core.util.CarbonUtil.deleteRecursive(CarbonUtil.java:364)
>         at org.apache.carbondata.core.util.CarbonUtil.access$100(CarbonUtil.java:93)
>         at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:326)
>         at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:322)
>         at java.security.AccessController.doPrivileged(Native Method)
>         at javax.security.auth.Subject.doAs(Subject.java:422)
>         at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)
>         at org.apache.carbondata.core.util.CarbonUtil.deleteFoldersAndFiles(CarbonUtil.java:322)
>         at org.apache.carbondata.spark.load.CarbonLoaderUtil.recordLoadMetadata(CarbonLoaderUtil.java:331)
>         at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.updateStatus$1(CarbonDataRDDFactory.scala:595)
>         at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:1107)
>         at org.apache.spark.sql.execution.command.LoadTable.processData(carbonTableSchema.scala:1046)
>         at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:754)
>         at org.apache.spark.sql.execution.command.LoadTableByInsert.processData(carbonTableSchema.scala:651)
>         at org.apache.spark.sql.execution.command.LoadTableByInsert.run(carbonTableSchema.scala:637)
>         at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
>         at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
>         at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:67)
>         at org.apache.spark.sql.Dataset.<init>(Dataset.scala:180)
>         at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:65)
>         at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:619)
>         at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:36)
>         at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:41)
>         at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:43)
>         at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:45)
>         at $line23.$read$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:47)
>         at $line23.$read$$iw$$iw$$iw$$iw$$iw.<init>(<console>:49)
>         at $line23.$read$$iw$$iw$$iw$$iw.<init>(<console>:51)
>         at $line23.$read$$iw$$iw$$iw.<init>(<console>:53)
>         at $line23.$read$$iw$$iw.<init>(<console>:55)
>         at $line23.$read$$iw.<init>(<console>:57)
>         at $line23.$read.<init>(<console>:59)
>         at $line23.$read$.<init>(<console>:63)
>         at $line23.$read$.<clinit>(<console>)
>         at $line23.$eval$.$print$lzycompute(<console>:7)
>         at $line23.$eval$.$print(<console>:6)
>         at $line23.$eval.$print(<console>)
>         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>         at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>         at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>         at java.lang.reflect.Method.invoke(Method.java:497)
>         at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:786)
>         at scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1047)
>         at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:638)
>         at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:637)
>         at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
>         at scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:19)
>         at scala.tools.nsc.interpreter.IMain$WrappedRequest.loadAndRunReq(IMain.scala:637)
>         at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:569)
>         at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:565)
>         at scala.tools.nsc.interpreter.ILoop.interpretStartingWith(ILoop.scala:807)
>         at scala.tools.nsc.interpreter.ILoop.command(ILoop.scala:681)
>         at scala.tools.nsc.interpreter.ILoop.processLine(ILoop.scala:395)
>         at scala.tools.nsc.interpreter.ILoop.loop(ILoop.scala:415)
>         at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply$mcZ$sp(ILoop.scala:923)
>         at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
>         at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
>         at scala.reflect.internal.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:97)
>         at scala.tools.nsc.interpreter.ILoop.process(ILoop.scala:909)
>         at org.apache.spark.repl.Main$.doMain(Main.scala:69)
>         at org.apache.spark.repl.Main$.main(Main.scala:52)
>         at org.apache.spark.repl.Main.main(Main.scala)
>         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>         at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
>         at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>         at java.lang.reflect.Method.invoke(Method.java:497)
>         at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:743)
>         at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:186)
>         at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:211)
>         at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:126)
>         at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
> 17/10/28 19:25:22 AUDIT [org.apache.spark.sql.execution.command.LoadTable(207) -- main]: [sh-hadoop-datanode-250-104.elenet.me][master][Thread-1]Dataload failure for dm_test.carbondata_table. Please check the logs
> java.lang.NullPointerException
>   at org.apache.carbondata.core.datastore.filesystem.AbstractDFSCarbonFile.isDirectory(AbstractDFSCarbonFile.java:88)
>   at org.apache.carbondata.core.util.CarbonUtil.deleteRecursive(CarbonUtil.java:364)
>   at org.apache.carbondata.core.util.CarbonUtil.access$100(CarbonUtil.java:93)
>   at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:326)
>   at org.apache.carbondata.core.util.CarbonUtil$2.run(CarbonUtil.java:322)
>   at java.security.AccessController.doPrivileged(Native Method)
>   at javax.security.auth.Subject.doAs(Subject.java:422)
>   at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1693)
>   at org.apache.carbondata.core.util.CarbonUtil.deleteFoldersAndFiles(CarbonUtil.java:322)
>   at org.apache.carbondata.spark.load.CarbonLoaderUtil.recordLoadMetadata(CarbonLoaderUtil.java:331)
>   at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.updateStatus$1(CarbonDataRDDFactory.scala:595)
>   at org.apache.carbondata.spark.rdd.CarbonDataRDDFactory$.loadCarbonData(CarbonDataRDDFactory.scala:1107)
>   at org.apache.spark.sql.execution.command.LoadTable.processData(carbonTableSchema.scala:1046)
>   at org.apache.spark.sql.execution.command.LoadTable.run(carbonTableSchema.scala:754)
>   at org.apache.spark.sql.execution.command.LoadTableByInsert.processData(carbonTableSchema.scala:651)
>   at org.apache.spark.sql.execution.command.LoadTableByInsert.run(carbonTableSchema.scala:637)
>   at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
>   at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
>   at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:67)
>   at org.apache.spark.sql.Dataset.<init>(Dataset.scala:180)
>   at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:65)
>   at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:619)
>   ... 50 elided
> {code}
> {code}
> [[hidden email] ~]$ hadoop fs -ls /user/master/carbon/store/dm_test/carbondata_table/Fact/Part0
> Found 1 items
> drwxr-xr-x   - master hadoop          0 2017-10-28 19:25 /user/master/carbon/store/dm_test/carbondata_table/Fact/Part0/Segment_2
> {code}
> hive table
> {code}
> create table hive_table(id int) partitioned by (dt string)
> {code}
> carbondata table
> {code}
> create table hive_table(id int, dt string)
> {code}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)