[ https://issues.apache.org/jira/browse/CARBONDATA-1726?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Chetan Bhat updated CARBONDATA-1726: ------------------------------------ Description: Steps : // prepare csv file for batch loading cd /srv/spark2.2Bigdata/install/hadoop/datanode/bin // generate streamSample.csv 100000001,batch_1,city_1,0.1,school_1:school_11$20 100000002,batch_2,city_2,0.2,school_2:school_22$30 100000003,batch_3,city_3,0.3,school_3:school_33$40 100000004,batch_4,city_4,0.4,school_4:school_44$50 100000005,batch_5,city_5,0.5,school_5:school_55$60 // put to hdfs /tmp/streamSample.csv ./hadoop fs -put streamSample.csv /tmp // spark-beeline cd /srv/spark2.2Bigdata/install/spark/sparkJdbc bin/spark-submit --master yarn-client --executor-memory 10G --executor-cores 5 --driver-memory 5G --num-executors 3 --class org.apache.carbondata.spark.thriftserver.CarbonThriftServer /srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar "hdfs://hacluster/user/sparkhive/warehouse" bin/beeline -u jdbc:hive2://10.18.98.34:23040 CREATE TABLE stream_table( id INT, name STRING, city STRING, salary FLOAT ) STORED BY 'carbondata' TBLPROPERTIES('streaming'='true', 'sort_columns'='name'); LOAD DATA LOCAL INPATH 'hdfs://hacluster/chetan/streamSample.csv' INTO TABLE stream_table OPTIONS('HEADER'='false'); // spark-shell cd /srv/spark2.2Bigdata/install/spark/sparkJdbc bin/spark-shell --master yarn-client --executor-memory 10G --executor-cores 5 --driver-memory 5G --num-executors 3 --jars /srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar import java.io.{File, PrintWriter} import java.net.ServerSocket import org.apache.spark.sql.{CarbonEnv, SparkSession} import org.apache.spark.sql.hive.CarbonRelation import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery} import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.util.CarbonProperties import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd") import org.apache.spark.sql.CarbonSession._ val carbonSession = SparkSession. builder(). appName("StreamExample"). config("spark.sql.warehouse.dir", "hdfs://hacluster/user/sparkhive/warehouse"). config("javax.jdo.option.ConnectionURL", "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8"). config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver"). config("javax.jdo.option.ConnectionPassword", "huawei"). config("javax.jdo.option.ConnectionUserName", "sparksql"). getOrCreateCarbonSession() carbonSession.sparkContext.setLogLevel("ERROR") carbonSession.sql("select * from stream_table").show def writeSocket(serverSocket: ServerSocket): Thread = { val thread = new Thread() { override def run(): Unit = { // wait for client to connection request and accept val clientSocket = serverSocket.accept() val socketWriter = new PrintWriter(clientSocket.getOutputStream()) var index = 0 for (_ <- 1 to 1000) { // write 5 records per iteration for (_ <- 0 to 100) { index = index + 1 socketWriter.println(index.toString + ",name_" + index + ",city_" + index + "," + (index * 10000.00).toString + ",school_" + index + ":school_" + index + index + "$" + index) } socketWriter.flush() Thread.sleep(2000) } socketWriter.close() System.out.println("Socket closed") } } thread.start() thread } def startStreaming(spark: SparkSession, tablePath: CarbonTablePath): Thread = { val thread = new Thread() { override def run(): Unit = { var qry: StreamingQuery = null try { val readSocketDF = spark.readStream .format("socket") .option("host", "10.18.98.34") .option("port", 7071) .load() // Write data from socket stream to carbondata file qry = readSocketDF.writeStream .format("carbondata") .trigger(ProcessingTime("5 seconds")) .option("checkpointLocation", tablePath.getStreamingCheckpointDir) .option("tablePath", tablePath.getPath) .start() qry.awaitTermination() } catch { case _: InterruptedException => println("Done reading and writing streaming data") } finally { qry.stop() } } } thread.start() thread } val streamTableName = s"stream_table" val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore. lookupRelation(Some("default"), streamTableName)(carbonSession).asInstanceOf[CarbonRelation]. tableMeta.carbonTable val tablePath = CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier) val serverSocket = new ServerSocket(7071) val socketThread = writeSocket(serverSocket) val streamingThread = startStreaming(carbonSession, tablePath) **Issue : There is a null pointer exception when streaming is started.* When the executor and driver cores and memory is increased while launching the spark shell the issue still occurs. scala> import java.io.{File, PrintWriter} import java.io.{File, PrintWriter} scala> import java.net.ServerSocket import java.net.ServerSocket scala> scala> import org.apache.spark.sql.{CarbonEnv, SparkSession} import org.apache.spark.sql.{CarbonEnv, SparkSession} scala> import org.apache.spark.sql.hive.CarbonRelation import org.apache.spark.sql.hive.CarbonRelation scala> import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery} import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery} scala> scala> import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.constants.CarbonCommonConstants scala> import org.apache.carbondata.core.util.CarbonProperties import org.apache.carbondata.core.util.CarbonProperties scala> import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} scala> scala> CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd") res0: org.apache.carbondata.core.util.CarbonProperties = org.apache.carbondata.core.util.CarbonProperties@7212b28e scala> scala> import org.apache.spark.sql.CarbonSession._ import org.apache.spark.sql.CarbonSession._ scala> scala> val carbonSession = SparkSession. | builder(). | appName("StreamExample"). | config("spark.sql.warehouse.dir", "hdfs://hacluster/user/sparkhive/warehouse"). | config("javax.jdo.option.ConnectionURL", "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8"). | config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver"). | config("javax.jdo.option.ConnectionPassword", "huawei"). | config("javax.jdo.option.ConnectionUserName", "sparksql"). | getOrCreateCarbonSession() carbonSession: org.apache.spark.sql.SparkSession = org.apache.spark.sql.CarbonSession@7593716d scala> | carbonSession.sparkContext.setLogLevel("ERROR") scala> scala> carbonSession.sql("select * from stream_table").show +---------+-------+------+------+ | id| name| city|salary| +---------+-------+------+------+ |100000001|batch_1|city_1| 0.1| |100000002|batch_2|city_2| 0.2| |100000003|batch_3|city_3| 0.3| |100000004|batch_4|city_4| 0.4| |100000005|batch_5|city_5| 0.5| +---------+-------+------+------+ scala> def writeSocket(serverSocket: ServerSocket): Thread = { | val thread = new Thread() { | override def run(): Unit = { | // wait for client to connection request and accept | val clientSocket = serverSocket.accept() | val socketWriter = new PrintWriter(clientSocket.getOutputStream()) | var index = 0 | for (_ <- 1 to 1000) { | // write 5 records per iteration | for (_ <- 0 to 100) { | index = index + 1 | socketWriter.println(index.toString + ",name_" + index | + ",city_" + index + "," + (index * 10000.00).toString + | ",school_" + index + ":school_" + index + index + "$" + index) | } | socketWriter.flush() | Thread.sleep(2000) | } | socketWriter.close() | System.out.println("Socket closed") | } | } | thread.start() | thread | } writeSocket: (serverSocket: java.net.ServerSocket)Thread scala> | def startStreaming(spark: SparkSession, tablePath: CarbonTablePath): Thread = { | val thread = new Thread() { | override def run(): Unit = { | var qry: StreamingQuery = null | try { | val readSocketDF = spark.readStream | .format("socket") | .option("host", "10.18.98.34") | .option("port", 7071) | .load() | | // Write data from socket stream to carbondata file | qry = readSocketDF.writeStream | .format("carbondata") | .trigger(ProcessingTime("5 seconds")) | .option("checkpointLocation", tablePath.getStreamingCheckpointDir) | .option("tablePath", tablePath.getPath) | .start() | | qry.awaitTermination() | } catch { | case _: InterruptedException => | println("Done reading and writing streaming data") | } finally { | qry.stop() | } | } | } | thread.start() | thread | } startStreaming: (spark: org.apache.spark.sql.SparkSession, tablePath: org.apache.carbondata.core.util.path.CarbonTablePath)Thread scala> scala> val streamTableName = s"stream_table" streamTableName: String = stream_table scala> scala> val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore. | lookupRelation(Some("default"), streamTableName)(carbonSession).asInstanceOf[CarbonRelation]. | tableMeta.carbonTable carbonTable: org.apache.carbondata.core.metadata.schema.table.CarbonTable = org.apache.carbondata.core.metadata.schema.table.CarbonTable@62cf8fda scala> scala> val tablePath = CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier) tablePath: org.apache.carbondata.core.util.path.CarbonTablePath = hdfs://hacluster/user/hive/warehouse/carbon.store/default/stream_table scala> scala> val serverSocket = new ServerSocket(7071) serverSocket: java.net.ServerSocket = ServerSocket[addr=0.0.0.0/0.0.0.0,localport=7071] scala> val socketThread = writeSocket(serverSocket) socketThread: Thread = Thread[Thread-103,5,main] scala> val streamingThread = startStreaming(carbonSession, tablePath) streamingThread: Thread = Thread[Thread-104,5,main] * *scala> Exception in thread "Thread-104" java.lang.NullPointerException at $line29.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$anon$1.run(<console>:59)** Expected : The startstreaming should not throw exception and should be successful. was: Steps : // prepare csv file for batch loading cd /srv/spark2.2Bigdata/install/hadoop/datanode/bin // generate streamSample.csv 100000001,batch_1,city_1,0.1,school_1:school_11$20 100000002,batch_2,city_2,0.2,school_2:school_22$30 100000003,batch_3,city_3,0.3,school_3:school_33$40 100000004,batch_4,city_4,0.4,school_4:school_44$50 100000005,batch_5,city_5,0.5,school_5:school_55$60 // put to hdfs /tmp/streamSample.csv ./hadoop fs -put streamSample.csv /tmp // spark-beeline cd /srv/spark2.2Bigdata/install/spark/sparkJdbc bin/spark-submit --master yarn-client --executor-memory 10G --executor-cores 5 --driver-memory 5G --num-executors 3 --class org.apache.carbondata.spark.thriftserver.CarbonThriftServer /srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar "hdfs://hacluster/user/sparkhive/warehouse" bin/beeline -u jdbc:hive2://10.18.98.34:23040 CREATE TABLE stream_table( id INT, name STRING, city STRING, salary FLOAT ) STORED BY 'carbondata' TBLPROPERTIES('streaming'='true', 'sort_columns'='name'); LOAD DATA LOCAL INPATH 'hdfs://hacluster/chetan/streamSample.csv' INTO TABLE stream_table OPTIONS('HEADER'='false'); // spark-shell cd /srv/spark2.2Bigdata/install/spark/sparkJdbc bin/spark-shell --master yarn-client import java.io.{File, PrintWriter} import java.net.ServerSocket import org.apache.spark.sql.{CarbonEnv, SparkSession} import org.apache.spark.sql.hive.CarbonRelation import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery} import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.util.CarbonProperties import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd") import org.apache.spark.sql.CarbonSession._ val carbonSession = SparkSession. builder(). appName("StreamExample"). config("spark.sql.warehouse.dir", "hdfs://hacluster/user/sparkhive/warehouse"). config("javax.jdo.option.ConnectionURL", "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8"). config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver"). config("javax.jdo.option.ConnectionPassword", "huawei"). config("javax.jdo.option.ConnectionUserName", "sparksql"). getOrCreateCarbonSession() carbonSession.sparkContext.setLogLevel("ERROR") carbonSession.sql("select * from stream_table").show *Issue : Select query from spark-shell does not execute successfully for streaming table load.* When the executor and driver cores and memory is increased while launching the spark shell the issue still occurs. bin/spark-shell --master yarn-client --executor-memory 10G --executor-cores 5 --driver-memory 5G --num-executors 3 scala> import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.constants.CarbonCommonConstants scala> import org.apache.carbondata.core.util.CarbonProperties import org.apache.carbondata.core.util.CarbonProperties scala> import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} scala> scala> CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd") res29: org.apache.carbondata.core.util.CarbonProperties = org.apache.carbondata.core.util.CarbonProperties@67b056e7 scala> scala> import org.apache.spark.sql.CarbonSession._ import org.apache.spark.sql.CarbonSession._ scala> scala> val carbonSession = SparkSession. | builder(). | appName("StreamExample"). | config("spark.sql.warehouse.dir", "hdfs://hacluster/user/sparkhive/warehouse"). | config("javax.jdo.option.ConnectionURL", "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8"). | config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver"). | config("javax.jdo.option.ConnectionPassword", "huawei"). | config("javax.jdo.option.ConnectionUserName", "sparksql"). | getOrCreateCarbonSession() carbonSession: org.apache.spark.sql.SparkSession = org.apache.spark.sql.CarbonSession@1d0590bc scala> | carbonSession.sparkContext.setLogLevel("ERROR") scala> carbonSession.sql("select * from stream_table").show org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 25.0 failed 4 times, most recent failure: Lost task 0.3 in stage 25.0 (TID 65, BLR1000014269, executor 8): java.lang.IllegalStateException: unread block data at java.io.ObjectInputStream$BlockDataInputStream.setBlockDataMode(ObjectInputStream.java:2424) at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1383) at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1993) at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1918) at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801) at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351) at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371) at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75) at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:258) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Driver stacktrace: at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1435) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1423) at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1422) at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1422) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802) at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:802) at scala.Option.foreach(Option.scala:257) at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:802) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1650) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1605) at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1594) at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48) at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:628) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1918) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1931) at org.apache.spark.SparkContext.runJob(SparkContext.scala:1944) at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:333) at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38) at org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2371) at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57) at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2765) at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2370) at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2377) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2113) at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2112) at org.apache.spark.sql.Dataset.withTypedCallback(Dataset.scala:2795) at org.apache.spark.sql.Dataset.head(Dataset.scala:2112) at org.apache.spark.sql.Dataset.take(Dataset.scala:2327) at org.apache.spark.sql.Dataset.showString(Dataset.scala:248) at org.apache.spark.sql.Dataset.show(Dataset.scala:636) at org.apache.spark.sql.Dataset.show(Dataset.scala:595) at org.apache.spark.sql.Dataset.show(Dataset.scala:604) ... 50 elided Caused by: java.lang.IllegalStateException: unread block data at java.io.ObjectInputStream$BlockDataInputStream.setBlockDataMode(ObjectInputStream.java:2424) at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1383) at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:1993) at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:1918) at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1801) at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1351) at java.io.ObjectInputStream.readObject(ObjectInputStream.java:371) at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75) at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:258) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Expected : Select query from spark-shell should execute successfully for streaming table load. > Carbon1.3.0-Streaming - Select query from spark-shell does not execute successfully for streaming table load > ------------------------------------------------------------------------------------------------------------ > > Key: CARBONDATA-1726 > URL: https://issues.apache.org/jira/browse/CARBONDATA-1726 > Project: CarbonData > Issue Type: Bug > Components: data-query > Affects Versions: 1.3.0 > Environment: 3 node ant cluster SUSE 11 SP4 > Reporter: Chetan Bhat > Priority: Blocker > Labels: Functional > > Steps : > // prepare csv file for batch loading > cd /srv/spark2.2Bigdata/install/hadoop/datanode/bin > // generate streamSample.csv > 100000001,batch_1,city_1,0.1,school_1:school_11$20 > 100000002,batch_2,city_2,0.2,school_2:school_22$30 > 100000003,batch_3,city_3,0.3,school_3:school_33$40 > 100000004,batch_4,city_4,0.4,school_4:school_44$50 > 100000005,batch_5,city_5,0.5,school_5:school_55$60 > // put to hdfs /tmp/streamSample.csv > ./hadoop fs -put streamSample.csv /tmp > // spark-beeline > cd /srv/spark2.2Bigdata/install/spark/sparkJdbc > bin/spark-submit --master yarn-client --executor-memory 10G --executor-cores 5 --driver-memory 5G --num-executors 3 --class org.apache.carbondata.spark.thriftserver.CarbonThriftServer /srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar "hdfs://hacluster/user/sparkhive/warehouse" > bin/beeline -u jdbc:hive2://10.18.98.34:23040 > CREATE TABLE stream_table( > id INT, > name STRING, > city STRING, > salary FLOAT > ) > STORED BY 'carbondata' > TBLPROPERTIES('streaming'='true', 'sort_columns'='name'); > LOAD DATA LOCAL INPATH 'hdfs://hacluster/chetan/streamSample.csv' INTO TABLE stream_table OPTIONS('HEADER'='false'); > // spark-shell > cd /srv/spark2.2Bigdata/install/spark/sparkJdbc > bin/spark-shell --master yarn-client --executor-memory 10G --executor-cores 5 --driver-memory 5G --num-executors 3 --jars /srv/spark2.2Bigdata/install/spark/sparkJdbc/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar > import java.io.{File, PrintWriter} > import java.net.ServerSocket > import org.apache.spark.sql.{CarbonEnv, SparkSession} > import org.apache.spark.sql.hive.CarbonRelation > import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery} > import org.apache.carbondata.core.constants.CarbonCommonConstants > import org.apache.carbondata.core.util.CarbonProperties > import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} > CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd") > import org.apache.spark.sql.CarbonSession._ > val carbonSession = SparkSession. > builder(). > appName("StreamExample"). > config("spark.sql.warehouse.dir", "hdfs://hacluster/user/sparkhive/warehouse"). > config("javax.jdo.option.ConnectionURL", "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8"). > config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver"). > config("javax.jdo.option.ConnectionPassword", "huawei"). > config("javax.jdo.option.ConnectionUserName", "sparksql"). > getOrCreateCarbonSession() > > carbonSession.sparkContext.setLogLevel("ERROR") > carbonSession.sql("select * from stream_table").show > def writeSocket(serverSocket: ServerSocket): Thread = { > val thread = new Thread() { > override def run(): Unit = { > // wait for client to connection request and accept > val clientSocket = serverSocket.accept() > val socketWriter = new PrintWriter(clientSocket.getOutputStream()) > var index = 0 > for (_ <- 1 to 1000) { > // write 5 records per iteration > for (_ <- 0 to 100) { > index = index + 1 > socketWriter.println(index.toString + ",name_" + index > + ",city_" + index + "," + (index * 10000.00).toString + > ",school_" + index + ":school_" + index + index + "$" + index) > } > socketWriter.flush() > Thread.sleep(2000) > } > socketWriter.close() > System.out.println("Socket closed") > } > } > thread.start() > thread > } > > def startStreaming(spark: SparkSession, tablePath: CarbonTablePath): Thread = { > val thread = new Thread() { > override def run(): Unit = { > var qry: StreamingQuery = null > try { > val readSocketDF = spark.readStream > .format("socket") > .option("host", "10.18.98.34") > .option("port", 7071) > .load() > // Write data from socket stream to carbondata file > qry = readSocketDF.writeStream > .format("carbondata") > .trigger(ProcessingTime("5 seconds")) > .option("checkpointLocation", tablePath.getStreamingCheckpointDir) > .option("tablePath", tablePath.getPath) > .start() > qry.awaitTermination() > } catch { > case _: InterruptedException => > println("Done reading and writing streaming data") > } finally { > qry.stop() > } > } > } > thread.start() > thread > } > val streamTableName = s"stream_table" > val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore. > lookupRelation(Some("default"), streamTableName)(carbonSession).asInstanceOf[CarbonRelation]. > tableMeta.carbonTable > val tablePath = CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier) > val serverSocket = new ServerSocket(7071) > val socketThread = writeSocket(serverSocket) > val streamingThread = startStreaming(carbonSession, tablePath) > **Issue : There is a null pointer exception when streaming is started.* > When the executor and driver cores and memory is increased while launching the spark shell the issue still occurs. > scala> import java.io.{File, PrintWriter} > import java.io.{File, PrintWriter} > scala> import java.net.ServerSocket > import java.net.ServerSocket > scala> > scala> import org.apache.spark.sql.{CarbonEnv, SparkSession} > import org.apache.spark.sql.{CarbonEnv, SparkSession} > scala> import org.apache.spark.sql.hive.CarbonRelation > import org.apache.spark.sql.hive.CarbonRelation > scala> import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery} > import org.apache.spark.sql.streaming.{ProcessingTime, StreamingQuery} > scala> > scala> import org.apache.carbondata.core.constants.CarbonCommonConstants > import org.apache.carbondata.core.constants.CarbonCommonConstants > scala> import org.apache.carbondata.core.util.CarbonProperties > import org.apache.carbondata.core.util.CarbonProperties > scala> import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} > import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath} > scala> > scala> CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd") > res0: org.apache.carbondata.core.util.CarbonProperties = org.apache.carbondata.core.util.CarbonProperties@7212b28e > scala> > scala> import org.apache.spark.sql.CarbonSession._ > import org.apache.spark.sql.CarbonSession._ > scala> > scala> val carbonSession = SparkSession. > | builder(). > | appName("StreamExample"). > | config("spark.sql.warehouse.dir", "hdfs://hacluster/user/sparkhive/warehouse"). > | config("javax.jdo.option.ConnectionURL", "jdbc:mysql://10.18.98.34:3306/sparksql?characterEncoding=UTF-8"). > | config("javax.jdo.option.ConnectionDriverName", "com.mysql.jdbc.Driver"). > | config("javax.jdo.option.ConnectionPassword", "huawei"). > | config("javax.jdo.option.ConnectionUserName", "sparksql"). > | getOrCreateCarbonSession() > carbonSession: org.apache.spark.sql.SparkSession = org.apache.spark.sql.CarbonSession@7593716d > scala> > | carbonSession.sparkContext.setLogLevel("ERROR") > scala> > scala> carbonSession.sql("select * from stream_table").show > +---------+-------+------+------+ > | id| name| city|salary| > +---------+-------+------+------+ > |100000001|batch_1|city_1| 0.1| > |100000002|batch_2|city_2| 0.2| > |100000003|batch_3|city_3| 0.3| > |100000004|batch_4|city_4| 0.4| > |100000005|batch_5|city_5| 0.5| > +---------+-------+------+------+ > scala> def writeSocket(serverSocket: ServerSocket): Thread = { > | val thread = new Thread() { > | override def run(): Unit = { > | // wait for client to connection request and accept > | val clientSocket = serverSocket.accept() > | val socketWriter = new PrintWriter(clientSocket.getOutputStream()) > | var index = 0 > | for (_ <- 1 to 1000) { > | // write 5 records per iteration > | for (_ <- 0 to 100) { > | index = index + 1 > | socketWriter.println(index.toString + ",name_" + index > | + ",city_" + index + "," + (index * 10000.00).toString + > | ",school_" + index + ":school_" + index + index + "$" + index) > | } > | socketWriter.flush() > | Thread.sleep(2000) > | } > | socketWriter.close() > | System.out.println("Socket closed") > | } > | } > | thread.start() > | thread > | } > writeSocket: (serverSocket: java.net.ServerSocket)Thread > scala> > | def startStreaming(spark: SparkSession, tablePath: CarbonTablePath): Thread = { > | val thread = new Thread() { > | override def run(): Unit = { > | var qry: StreamingQuery = null > | try { > | val readSocketDF = spark.readStream > | .format("socket") > | .option("host", "10.18.98.34") > | .option("port", 7071) > | .load() > | > | // Write data from socket stream to carbondata file > | qry = readSocketDF.writeStream > | .format("carbondata") > | .trigger(ProcessingTime("5 seconds")) > | .option("checkpointLocation", tablePath.getStreamingCheckpointDir) > | .option("tablePath", tablePath.getPath) > | .start() > | > | qry.awaitTermination() > | } catch { > | case _: InterruptedException => > | println("Done reading and writing streaming data") > | } finally { > | qry.stop() > | } > | } > | } > | thread.start() > | thread > | } > startStreaming: (spark: org.apache.spark.sql.SparkSession, tablePath: org.apache.carbondata.core.util.path.CarbonTablePath)Thread > scala> > scala> val streamTableName = s"stream_table" > streamTableName: String = stream_table > scala> > scala> val carbonTable = CarbonEnv.getInstance(carbonSession).carbonMetastore. > | lookupRelation(Some("default"), streamTableName)(carbonSession).asInstanceOf[CarbonRelation]. > | tableMeta.carbonTable > carbonTable: org.apache.carbondata.core.metadata.schema.table.CarbonTable = org.apache.carbondata.core.metadata.schema.table.CarbonTable@62cf8fda > scala> > scala> val tablePath = CarbonStorePath.getCarbonTablePath(carbonTable.getAbsoluteTableIdentifier) > tablePath: org.apache.carbondata.core.util.path.CarbonTablePath = hdfs://hacluster/user/hive/warehouse/carbon.store/default/stream_table > scala> > scala> val serverSocket = new ServerSocket(7071) > serverSocket: java.net.ServerSocket = ServerSocket[addr=0.0.0.0/0.0.0.0,localport=7071] > scala> val socketThread = writeSocket(serverSocket) > socketThread: Thread = Thread[Thread-103,5,main] > scala> val streamingThread = startStreaming(carbonSession, tablePath) > streamingThread: Thread = Thread[Thread-104,5,main] > * > *scala> Exception in thread "Thread-104" java.lang.NullPointerException > at $line29.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$anon$1.run(<console>:59)** > Expected : The startstreaming should not throw exception and should be successful. -- This message was sent by Atlassian JIRA (v6.4.14#64029) |
Free forum by Nabble | Edit this page |