Login  Register

method not found issue when creating table

Posted by lionel061201 on Aug 22, 2017; 6:53am
URL: http://apache-carbondata-dev-mailing-list-archive.168.s1.nabble.com/method-not-found-issue-when-creating-table-tp20640.html

Hi dev,
When I was trying to create table in carbon, below error occurred, anyone
knows how to fix?

1 driver, 6 datanode

*$SPARK_HOME* = /opt/cloudera/parcels/SPARK2/lib/spark2/

*configuration in spark-defaults.conf:*

#Carbondata Settings

spark.yarn.dist.files=/opt/cloudera/parcels/SPARK2/lib/spark2/conf/carbon.properties

spark.yarn.dist.archives=/opt/cloudera/parcels/SPARK2/lib/spark2/carbonlib/carbondata.tar.gz

spark.executor.extraClassPath=/opt/cloudera/parcels/SPARK2/lib/spark2/carbonlib/*

spark.driver.extraClassPath=/opt/cloudera/parcels/SPARK2/lib/spark2/carbonlib/*

*carbon jar (distributed in all driver and datanodes)*

/opt/cloudera/parcels/SPARK2/lib/spark2/carbonlib/carbondata_2.11-1.2.0-SNAPSHOT-shade-hadoop2.2.0.jar

spark2-shell --master yarn --deploy-mode client --num-executors 6
--driver-memory 20G --executor-memory 50G --executor-cores 4


scala> import org.apache.spark.sql.CarbonSession._

scala> import org.apache.spark.sql.SparkSession

scala> import org.apache.carbondata.core.util.CarbonProperties

scala> import org.apache.carbondata.core.constants.CarbonCommonConstants

scala> val cc =
SparkSession.builder().appName("CL_TEST").config(sc.getConf).getOrCreateCarbonSession("hdfs://nameservice2/carbon2/carbonstore")

scala> CarbonProperties.*getInstance*().addProperty(CarbonCommonConstants.
*CARBON_TIMESTAMP_FORMAT*, "yyyy-MM-dd")

scala> cc.sql("use default")

scala> *cc.sql("create table test001(vin string) stored by
'carbondata'").show()*

17/08/22 14:34:35 AUDIT command.CreateTable:
[****.*****.com][carbon2][Thread-1]Creating Table with Database name
[default] and Table name [test001]

*java.lang.NoSuchMethodError:
org.apache.spark.sql.catalyst.catalog.CatalogTable.copy(Lorg/apache/spark/sql/catalyst/TableIdentifier;Lorg/apache/spark/sql/catalyst/catalog/CatalogTableType;Lorg/apache/spark/sql/catalyst/catalog/CatalogStorageFormat;Lorg/apache/spark/sql/types/StructType;Lscala/Option;Lscala/collection/Seq;Lscala/Option;Ljava/lang/String;JJLscala/collection/immutable/Map;Lscala/Option;Lscala/Option;Lscala/Option;Lscala/Option;Lscala/collection/Seq;Z)Lorg/apache/spark/sql/catalyst/catalog/CatalogTable;*

  at
org.apache.spark.sql.CarbonSource$.updateCatalogTableWithCarbonSchema(CarbonSource.scala:277)

  at
org.apache.spark.sql.execution.command.DDLStrategy.apply(DDLStrategy.scala:135)

  at
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:62)

  at
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$1.apply(QueryPlanner.scala:62)

  at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:434)

  at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)

  at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:439)

  at
org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:92)

  at
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:77)

  at
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2$$anonfun$apply$2.apply(QueryPlanner.scala:74)

  at
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)

  at
scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:157)

  at scala.collection.Iterator$class.foreach(Iterator.scala:893)

  at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)

  at
scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:157)

  at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1336)

  at
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:74)

  at
org.apache.spark.sql.catalyst.planning.QueryPlanner$$anonfun$2.apply(QueryPlanner.scala:66)

  at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:434)

  at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)

  at
org.apache.spark.sql.catalyst.planning.QueryPlanner.plan(QueryPlanner.scala:92)

  at
org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:79)

  at
org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:75)

  at
org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:84)

  at
org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:84)

  at
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87)

  at
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87)

  at org.apache.spark.sql.Dataset.<init>(Dataset.scala:185)

  at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64)

  at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592)

  at
org.apache.spark.sql.execution.command.CreateTable.processSchema(carbonTableSchema.scala:496)

  at
org.apache.spark.sql.execution.command.CreateTable.run(carbonTableSchema.scala:452)

  at
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)

  at
org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)

  at
org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:74)

  at
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)

  at
org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:114)

  at
org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:135)

  at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)

  at
org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:132)

  at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:113)

  at
org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:87)

  at
org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:87)

  at org.apache.spark.sql.Dataset.<init>(Dataset.scala:185)

  at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:64)

  at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:592)

  ... 50 elided