Hi All, We have a Datastax/cassandra cluster and I am trying to see if I can get Carbondata working there. scala> import com.datastax.spark.connector._ scala> import org.apache.spark.sql.SaveMode scala> import org.apache.spark.sql.CarbonContext scala> import org.apache.spark.sql.types._ scala> val cc = new CarbonContext(sc, "cfs://127.0.0.1/opt/CarbonStore") scala> val df = cc.read.parquet("file:///home/cassandra/testdata-30day/cassandra/zone.parquet") scala> df.write.format("carbondata").option("tableName", "zone").option("compress", "true").option("TempCSV","false").mode(SaveMode.Overwrite).save() ============================================== java.io.FileNotFoundException: /opt/CarbonStore/default/zone/Metadata/schema (No such file or directory) at java.io.FileOutputStream.open0(Native Method) at java.io.FileOutputStream.open(FileOutputStream.java:270) at java.io.FileOutputStream.<init>(FileOutputStream.java:213) at java.io.FileOutputStream.<init>(FileOutputStream.java:133) at org.apache.carbondata.core.datastore.impl.FileFactory.getDataOutputStream(FileFactory.java:207) at org.apache.carbondata.core.writer.ThriftWriter.open(ThriftWriter.java:84) at org.apache.spark.sql.hive.CarbonMetastore.createTableFromThrift(CarbonMetastore.scala:293) at org.apache.spark.sql.execution.command.CreateTable.run(carbonTableSchema.scala:163) at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult$lzycompute(commands.scala:58) at org.apache.spark.sql.execution.ExecutedCommand.sideEffectResult(commands.scala:56) at org.apache.spark.sql.execution.ExecutedCommand.doExecute(commands.scala:70) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:132) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$5.apply(SparkPlan.scala:130) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:150) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:130) at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:55) at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:55) at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:145) at org.apache.spark.sql.DataFrame.<init>(DataFrame.scala:130) at org.apache.spark.sql.CarbonContext.sql(CarbonContext.scala:139) at org.apache.carbondata.spark.CarbonDataFrameWriter.saveAsCarbonFile(CarbonDataFrameWriter.scala:39) at org.apache.spark.sql.CarbonSource.createRelation(CarbonDatasourceRelation.scala:109) at org.apache.spark.sql.execution.datasources.ResolvedDataSource$.apply(ResolvedDataSource.scala:222) at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:148) ============================================== Thanks, Sanoj carbon-datastax.txt (38K) Download Attachment |
Hi,
Now CarbonData can't support cfs file system. I think we can try to support it. Best regards David CaiQiang.
Best Regards
David Cai |
Free forum by Nabble | Edit this page |