[ https://issues.apache.org/jira/browse/CARBONDATA-3279?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] weifan updated CARBONDATA-3279: ------------------------------- Description: create datamap carbondata_mix6_test on table carbondata_mix6 using 'lucene' DMPROPERTIES('INDEX_COLUMNS'='rk','SPLIT_BLOCKLET'='true'); exception: Error: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 23.0 failed 4 times, most recent failure: Lost task 0.3 in stage 23.0 (TID 394, node29, executor 1): java.nio.channels.ClosedChannelException at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) at java.io.DataOutputStream.write(DataOutputStream.java:107) at java.util.zip.CheckedOutputStream.write(CheckedOutputStream.java:73) at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) at org.apache.lucene.store.OutputStreamIndexOutput.close(OutputStreamIndexOutput.java:68) at org.apache.lucene.util.IOUtils.close(IOUtils.java:89) at org.apache.lucene.util.IOUtils.close(IOUtils.java:76) at org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter.close(BlockTreeTermsWriter.java:1028) at org.apache.lucene.util.IOUtils.close(IOUtils.java:89) at org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsWriter.close(PerFieldPostingsFormat.java:241) at org.apache.lucene.util.IOUtils.close(IOUtils.java:89) at org.apache.lucene.util.IOUtils.close(IOUtils.java:76) at org.apache.lucene.index.FreqProxTermsWriter.flush(FreqProxTermsWriter.java:111) at org.apache.lucene.index.DefaultIndexingChain.flush(DefaultIndexingChain.java:134) at org.apache.lucene.index.DocumentsWriterPerThread.flush(DocumentsWriterPerThread.java:443) at org.apache.lucene.index.DocumentsWriter.doFlush(DocumentsWriter.java:539) at org.apache.lucene.index.DocumentsWriter.flushAllThreads(DocumentsWriter.java:653) at org.apache.lucene.index.IndexWriter.doFlush(IndexWriter.java:3358) at org.apache.lucene.index.IndexWriter.flush(IndexWriter.java:3333) at org.apache.lucene.index.IndexWriter.shutdown(IndexWriter.java:1117) at org.apache.lucene.index.IndexWriter.close(IndexWriter.java:1162) at org.apache.carbondata.datamap.lucene.LuceneDataMapBuilder.close(LuceneDataMapBuilder.java:171) at org.apache.carbondata.datamap.lucene.LuceneDataMapBuilder.addRow(LuceneDataMapBuilder.java:136) at org.apache.carbondata.datamap.IndexDataMapRebuildRDD.internalCompute(IndexDataMapRebuildRDD.scala:394) at org.apache.carbondata.spark.rdd.CarbonRDD.compute(CarbonRDD.scala:82) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) at org.apache.spark.scheduler.Task.run(Task.scala:99) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Suppressed: java.nio.channels.ClosedChannelException at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) at java.io.DataOutputStream.write(DataOutputStream.java:107) at java.util.zip.CheckedOutputStream.write(CheckedOutputStream.java:73) at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) at java.io.FilterOutputStream.close(FilterOutputStream.java:158) at org.apache.lucene.store.OutputStreamIndexOutput.close(OutputStreamIndexOutput.java:70) ... 28 more Suppressed: java.nio.channels.ClosedChannelException at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) at java.io.DataOutputStream.write(DataOutputStream.java:107) at java.util.zip.CheckedOutputStream.write(CheckedOutputStream.java:73) at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) at org.apache.lucene.store.OutputStreamIndexOutput.getChecksum(OutputStreamIndexOutput.java:80) at org.apache.lucene.codecs.CodecUtil.writeCRC(CodecUtil.java:542) at org.apache.lucene.codecs.CodecUtil.writeFooter(CodecUtil.java:390) at org.apache.lucene.codecs.lucene50.Lucene50PostingsWriter.close(Lucene50PostingsWriter.java:469) ... 28 more Error: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 29.0 failed 4 times, most recent failure: Lost task 0.3 in stage 29.0 (TID 640, node29, executor 1): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.util.concurrent.ExecutionException: java.io.IOException: Filesystem closed at org.apache.carbondata.core.scan.processor.DataBlockIterator.updateScanner(DataBlockIterator.java:153) at org.apache.carbondata.core.scan.processor.DataBlockIterator.next(DataBlockIterator.java:108) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:49) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:41) at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:31) at org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.hasNext(ChunkRowIterator.java:55) at org.apache.carbondata.hadoop.CarbonRecordReader.nextKeyValue(CarbonRecordReader.java:119) at org.apache.carbondata.datamap.IndexDataMapRebuildRDD.internalCompute(IndexDataMapRebuildRDD.scala:383) at org.apache.carbondata.spark.rdd.CarbonRDD.compute(CarbonRDD.scala:82) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) at org.apache.spark.scheduler.Task.run(Task.scala:99) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Caused by: java.util.concurrent.ExecutionException: java.util.concurrent.ExecutionException: java.io.IOException: Filesystem closed at java.util.concurrent.FutureTask.report(FutureTask.java:122) at java.util.concurrent.FutureTask.get(FutureTask.java:192) at org.apache.carbondata.core.scan.processor.DataBlockIterator.processNextBlocklet(DataBlockIterator.java:164) at org.apache.carbondata.core.scan.processor.DataBlockIterator.updateScanner(DataBlockIterator.java:141) ... 16 more Caused by: java.util.concurrent.ExecutionException: java.io.IOException: Filesystem closed at java.util.concurrent.FutureTask.report(FutureTask.java:122) at java.util.concurrent.FutureTask.get(FutureTask.java:192) at org.apache.carbondata.core.scan.processor.DataBlockIterator$1.call(DataBlockIterator.java:210) at org.apache.carbondata.core.scan.processor.DataBlockIterator$1.call(DataBlockIterator.java:205) at java.util.concurrent.FutureTask.run(FutureTask.java:266) ... 3 more Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:798) at org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:827) at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:889) at java.io.DataInputStream.readFully(DataInputStream.java:195) at java.io.DataInputStream.readFully(DataInputStream.java:169) at org.apache.carbondata.core.datastore.impl.DFSFileReaderImpl.read(DFSFileReaderImpl.java:85) at org.apache.carbondata.core.datastore.impl.DFSFileReaderImpl.readByteArray(DFSFileReaderImpl.java:52) at org.apache.carbondata.core.datastore.impl.DFSFileReaderImpl.readByteBuffer(DFSFileReaderImpl.java:141) at org.apache.carbondata.core.datastore.chunk.reader.dimension.v3.CompressedDimensionChunkFileBasedReaderV3.readRawDimensionChunksInGroup(CompressedDimensionChunkFileBasedReaderV3.java:183) at org.apache.carbondata.core.datastore.chunk.reader.dimension.AbstractChunkReaderV2V3Format.readRawDimensionChunks(AbstractChunkReaderV2V3Format.java:88) at org.apache.carbondata.core.indexstore.blockletindex.BlockletDataRefNode.readDimensionChunks(BlockletDataRefNode.java:151) at org.apache.carbondata.core.scan.scanner.impl.BlockletFullScanner.readBlocklet(BlockletFullScanner.java:145) at org.apache.carbondata.core.scan.processor.DataBlockIterator.readNextBlockletColumnChunks(DataBlockIterator.java:185) at org.apache.carbondata.core.scan.processor.DataBlockIterator.access$500(DataBlockIterator.java:46) at org.apache.carbondata.core.scan.processor.DataBlockIterator$2.call(DataBlockIterator.java:231) at org.apache.carbondata.core.scan.processor.DataBlockIterator$2.call(DataBlockIterator.java:226) was: create datamap carbondata_mix6_test on table carbondata_mix6 using 'lucene' DMPROPERTIES('INDEX_COLUMNS'='rk','SPLIT_BLOCKLET'='true'); exception: Error: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 23.0 failed 4 times, most recent failure: Lost task 0.3 in stage 23.0 (TID 394, node29, executor 1): java.nio.channels.ClosedChannelException at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) at java.io.DataOutputStream.write(DataOutputStream.java:107) at java.util.zip.CheckedOutputStream.write(CheckedOutputStream.java:73) at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) at org.apache.lucene.store.OutputStreamIndexOutput.close(OutputStreamIndexOutput.java:68) at org.apache.lucene.util.IOUtils.close(IOUtils.java:89) at org.apache.lucene.util.IOUtils.close(IOUtils.java:76) at org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter.close(BlockTreeTermsWriter.java:1028) at org.apache.lucene.util.IOUtils.close(IOUtils.java:89) at org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsWriter.close(PerFieldPostingsFormat.java:241) at org.apache.lucene.util.IOUtils.close(IOUtils.java:89) at org.apache.lucene.util.IOUtils.close(IOUtils.java:76) at org.apache.lucene.index.FreqProxTermsWriter.flush(FreqProxTermsWriter.java:111) at org.apache.lucene.index.DefaultIndexingChain.flush(DefaultIndexingChain.java:134) at org.apache.lucene.index.DocumentsWriterPerThread.flush(DocumentsWriterPerThread.java:443) at org.apache.lucene.index.DocumentsWriter.doFlush(DocumentsWriter.java:539) at org.apache.lucene.index.DocumentsWriter.flushAllThreads(DocumentsWriter.java:653) at org.apache.lucene.index.IndexWriter.doFlush(IndexWriter.java:3358) at org.apache.lucene.index.IndexWriter.flush(IndexWriter.java:3333) at org.apache.lucene.index.IndexWriter.shutdown(IndexWriter.java:1117) at org.apache.lucene.index.IndexWriter.close(IndexWriter.java:1162) at org.apache.carbondata.datamap.lucene.LuceneDataMapBuilder.close(LuceneDataMapBuilder.java:171) at org.apache.carbondata.datamap.lucene.LuceneDataMapBuilder.addRow(LuceneDataMapBuilder.java:136) at org.apache.carbondata.datamap.IndexDataMapRebuildRDD.internalCompute(IndexDataMapRebuildRDD.scala:394) at org.apache.carbondata.spark.rdd.CarbonRDD.compute(CarbonRDD.scala:82) at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) at org.apache.spark.scheduler.Task.run(Task.scala:99) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) at java.lang.Thread.run(Thread.java:745) Suppressed: java.nio.channels.ClosedChannelException at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) at java.io.DataOutputStream.write(DataOutputStream.java:107) at java.util.zip.CheckedOutputStream.write(CheckedOutputStream.java:73) at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) at java.io.FilterOutputStream.close(FilterOutputStream.java:158) at org.apache.lucene.store.OutputStreamIndexOutput.close(OutputStreamIndexOutput.java:70) ... 28 more Suppressed: java.nio.channels.ClosedChannelException at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) at java.io.DataOutputStream.write(DataOutputStream.java:107) at java.util.zip.CheckedOutputStream.write(CheckedOutputStream.java:73) at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) at org.apache.lucene.store.OutputStreamIndexOutput.getChecksum(OutputStreamIndexOutput.java:80) at org.apache.lucene.codecs.CodecUtil.writeCRC(CodecUtil.java:542) at org.apache.lucene.codecs.CodecUtil.writeFooter(CodecUtil.java:390) at org.apache.lucene.codecs.lucene50.Lucene50PostingsWriter.close(Lucene50PostingsWriter.java:469) ... 28 more > where i create datamap using lucene > ----------------------------------- > > Key: CARBONDATA-3279 > URL: https://issues.apache.org/jira/browse/CARBONDATA-3279 > Project: CarbonData > Issue Type: Bug > Components: spark-integration > Affects Versions: 1.5.0 > Reporter: weifan > Priority: Major > > create datamap carbondata_mix6_test on table carbondata_mix6 using 'lucene' DMPROPERTIES('INDEX_COLUMNS'='rk','SPLIT_BLOCKLET'='true'); > > exception: > > Error: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 23.0 failed 4 times, most recent failure: Lost task 0.3 in stage 23.0 (TID 394, node29, executor 1): java.nio.channels.ClosedChannelException > at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) > at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) > at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) > at java.io.DataOutputStream.write(DataOutputStream.java:107) > at java.util.zip.CheckedOutputStream.write(CheckedOutputStream.java:73) > at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) > at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) > at org.apache.lucene.store.OutputStreamIndexOutput.close(OutputStreamIndexOutput.java:68) > at org.apache.lucene.util.IOUtils.close(IOUtils.java:89) > at org.apache.lucene.util.IOUtils.close(IOUtils.java:76) > at org.apache.lucene.codecs.blocktree.BlockTreeTermsWriter.close(BlockTreeTermsWriter.java:1028) > at org.apache.lucene.util.IOUtils.close(IOUtils.java:89) > at org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsWriter.close(PerFieldPostingsFormat.java:241) > at org.apache.lucene.util.IOUtils.close(IOUtils.java:89) > at org.apache.lucene.util.IOUtils.close(IOUtils.java:76) > at org.apache.lucene.index.FreqProxTermsWriter.flush(FreqProxTermsWriter.java:111) > at org.apache.lucene.index.DefaultIndexingChain.flush(DefaultIndexingChain.java:134) > at org.apache.lucene.index.DocumentsWriterPerThread.flush(DocumentsWriterPerThread.java:443) > at org.apache.lucene.index.DocumentsWriter.doFlush(DocumentsWriter.java:539) > at org.apache.lucene.index.DocumentsWriter.flushAllThreads(DocumentsWriter.java:653) > at org.apache.lucene.index.IndexWriter.doFlush(IndexWriter.java:3358) > at org.apache.lucene.index.IndexWriter.flush(IndexWriter.java:3333) > at org.apache.lucene.index.IndexWriter.shutdown(IndexWriter.java:1117) > at org.apache.lucene.index.IndexWriter.close(IndexWriter.java:1162) > at org.apache.carbondata.datamap.lucene.LuceneDataMapBuilder.close(LuceneDataMapBuilder.java:171) > at org.apache.carbondata.datamap.lucene.LuceneDataMapBuilder.addRow(LuceneDataMapBuilder.java:136) > at org.apache.carbondata.datamap.IndexDataMapRebuildRDD.internalCompute(IndexDataMapRebuildRDD.scala:394) > at org.apache.carbondata.spark.rdd.CarbonRDD.compute(CarbonRDD.scala:82) > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) > at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) > at org.apache.spark.scheduler.Task.run(Task.scala:99) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) > at java.lang.Thread.run(Thread.java:745) > Suppressed: java.nio.channels.ClosedChannelException > at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) > at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) > at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) > at java.io.DataOutputStream.write(DataOutputStream.java:107) > at java.util.zip.CheckedOutputStream.write(CheckedOutputStream.java:73) > at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) > at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) > at java.io.FilterOutputStream.close(FilterOutputStream.java:158) > at org.apache.lucene.store.OutputStreamIndexOutput.close(OutputStreamIndexOutput.java:70) > ... 28 more > Suppressed: java.nio.channels.ClosedChannelException > at org.apache.hadoop.hdfs.DFSOutputStream.checkClosed(DFSOutputStream.java:1622) > at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:104) > at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:58) > at java.io.DataOutputStream.write(DataOutputStream.java:107) > at java.util.zip.CheckedOutputStream.write(CheckedOutputStream.java:73) > at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:82) > at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:140) > at org.apache.lucene.store.OutputStreamIndexOutput.getChecksum(OutputStreamIndexOutput.java:80) > at org.apache.lucene.codecs.CodecUtil.writeCRC(CodecUtil.java:542) > at org.apache.lucene.codecs.CodecUtil.writeFooter(CodecUtil.java:390) > at org.apache.lucene.codecs.lucene50.Lucene50PostingsWriter.close(Lucene50PostingsWriter.java:469) > ... 28 more > > > > > > > > > > > > > > Error: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 29.0 failed 4 times, most recent failure: Lost task 0.3 in stage 29.0 (TID 640, node29, executor 1): java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.util.concurrent.ExecutionException: java.io.IOException: Filesystem closed > at org.apache.carbondata.core.scan.processor.DataBlockIterator.updateScanner(DataBlockIterator.java:153) > at org.apache.carbondata.core.scan.processor.DataBlockIterator.next(DataBlockIterator.java:108) > at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.getBatchResult(DetailQueryResultIterator.java:49) > at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:41) > at org.apache.carbondata.core.scan.result.iterator.DetailQueryResultIterator.next(DetailQueryResultIterator.java:31) > at org.apache.carbondata.core.scan.result.iterator.ChunkRowIterator.hasNext(ChunkRowIterator.java:55) > at org.apache.carbondata.hadoop.CarbonRecordReader.nextKeyValue(CarbonRecordReader.java:119) > at org.apache.carbondata.datamap.IndexDataMapRebuildRDD.internalCompute(IndexDataMapRebuildRDD.scala:383) > at org.apache.carbondata.spark.rdd.CarbonRDD.compute(CarbonRDD.scala:82) > at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323) > at org.apache.spark.rdd.RDD.iterator(RDD.scala:287) > at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87) > at org.apache.spark.scheduler.Task.run(Task.scala:99) > at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282) > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617) > at java.lang.Thread.run(Thread.java:745) > Caused by: java.util.concurrent.ExecutionException: java.util.concurrent.ExecutionException: java.io.IOException: Filesystem closed > at java.util.concurrent.FutureTask.report(FutureTask.java:122) > at java.util.concurrent.FutureTask.get(FutureTask.java:192) > at org.apache.carbondata.core.scan.processor.DataBlockIterator.processNextBlocklet(DataBlockIterator.java:164) > at org.apache.carbondata.core.scan.processor.DataBlockIterator.updateScanner(DataBlockIterator.java:141) > ... 16 more > Caused by: java.util.concurrent.ExecutionException: java.io.IOException: Filesystem closed > at java.util.concurrent.FutureTask.report(FutureTask.java:122) > at java.util.concurrent.FutureTask.get(FutureTask.java:192) > at org.apache.carbondata.core.scan.processor.DataBlockIterator$1.call(DataBlockIterator.java:210) > at org.apache.carbondata.core.scan.processor.DataBlockIterator$1.call(DataBlockIterator.java:205) > at java.util.concurrent.FutureTask.run(FutureTask.java:266) > ... 3 more > Caused by: java.io.IOException: Filesystem closed > at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:798) > at org.apache.hadoop.hdfs.DFSInputStream.readWithStrategy(DFSInputStream.java:827) > at org.apache.hadoop.hdfs.DFSInputStream.read(DFSInputStream.java:889) > at java.io.DataInputStream.readFully(DataInputStream.java:195) > at java.io.DataInputStream.readFully(DataInputStream.java:169) > at org.apache.carbondata.core.datastore.impl.DFSFileReaderImpl.read(DFSFileReaderImpl.java:85) > at org.apache.carbondata.core.datastore.impl.DFSFileReaderImpl.readByteArray(DFSFileReaderImpl.java:52) > at org.apache.carbondata.core.datastore.impl.DFSFileReaderImpl.readByteBuffer(DFSFileReaderImpl.java:141) > at org.apache.carbondata.core.datastore.chunk.reader.dimension.v3.CompressedDimensionChunkFileBasedReaderV3.readRawDimensionChunksInGroup(CompressedDimensionChunkFileBasedReaderV3.java:183) > at org.apache.carbondata.core.datastore.chunk.reader.dimension.AbstractChunkReaderV2V3Format.readRawDimensionChunks(AbstractChunkReaderV2V3Format.java:88) > at org.apache.carbondata.core.indexstore.blockletindex.BlockletDataRefNode.readDimensionChunks(BlockletDataRefNode.java:151) > at org.apache.carbondata.core.scan.scanner.impl.BlockletFullScanner.readBlocklet(BlockletFullScanner.java:145) > at org.apache.carbondata.core.scan.processor.DataBlockIterator.readNextBlockletColumnChunks(DataBlockIterator.java:185) > at org.apache.carbondata.core.scan.processor.DataBlockIterator.access$500(DataBlockIterator.java:46) > at org.apache.carbondata.core.scan.processor.DataBlockIterator$2.call(DataBlockIterator.java:231) > at org.apache.carbondata.core.scan.processor.DataBlockIterator$2.call(DataBlockIterator.java:226) -- This message was sent by Atlassian JIRA (v7.6.3#76005) |
Free forum by Nabble | Edit this page |