Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2624#discussion_r210213840 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomIndexFileStore.java --- @@ -0,0 +1,259 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.carbondata.datamap.bloom; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +import org.apache.carbondata.common.logging.LogService; +import org.apache.carbondata.common.logging.LogServiceFactory; +import org.apache.carbondata.core.datastore.filesystem.CarbonFile; +import org.apache.carbondata.core.datastore.filesystem.CarbonFileFilter; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.fileoperations.FileWriteOperation; +import org.apache.carbondata.core.reader.ThriftReader; +import org.apache.carbondata.core.util.CarbonUtil; +import org.apache.carbondata.core.writer.ThriftWriter; +import org.apache.carbondata.format.MergedBloomIndex; +import org.apache.carbondata.format.MergedBloomIndexHeader; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.bloom.CarbonBloomFilter; +import org.apache.thrift.TBase; + +public class BloomIndexFileStore { + + private static final LogService LOGGER = + LogServiceFactory.getLogService(BloomIndexFileStore.class.getName()); + + /*suffix of original generated file*/ + public static final String BLOOM_INDEX_SUFFIX = ".bloomindex"; + /*suffix of merged bloom index file*/ + public static final String MERGE_BLOOM_INDEX_SUFFIX = ".bloomindexmerge"; + /* directory to store merged bloom index files */ + public static final String MERGE_BLOOM_INDEX_SHARD_NAME = "mergeShard"; + /** + * flag file for merging + * if flag file exists, query won't use mergeShard + * if flag file not exists and mergeShard generated, query will use mergeShard + */ + public static final String MERGE_INPROGRESS_FILE = "mergeShard.inprogress"; + + + public static void mergeBloomIndexFile(String dmSegmentPathString, List<String> indexCols) { + // get all shard paths of old store + CarbonFile segmentPath = FileFactory.getCarbonFile(dmSegmentPathString, + FileFactory.getFileType(dmSegmentPathString)); + CarbonFile[] shardPaths = segmentPath.listFiles(new CarbonFileFilter() { + @Override + public boolean accept(CarbonFile file) { + return file.isDirectory() && !file.getName().equals(MERGE_BLOOM_INDEX_SHARD_NAME); + } + }); + + String mergeShardPath = dmSegmentPathString + File.separator + MERGE_BLOOM_INDEX_SHARD_NAME; + String mergeInprofressFile = dmSegmentPathString + File.separator + MERGE_INPROGRESS_FILE; + try { + // delete mergeShard folder if exists + if (FileFactory.isFileExist(mergeShardPath)) { + FileFactory.deleteFile(mergeShardPath, FileFactory.getFileType(mergeShardPath)); + } + // create flag file before creating mergeShard folder + if (!FileFactory.isFileExist(mergeInprofressFile)) { + FileFactory.createNewFile( + mergeInprofressFile, FileFactory.getFileType(mergeInprofressFile)); + } + // prepare mergeShard output folder + if (!FileFactory.mkdirs(mergeShardPath, FileFactory.getFileType(mergeShardPath))) { + throw new RuntimeException("Failed to create directory " + mergeShardPath); + } + } catch (IOException e) { + LOGGER.error(e, "Error occurs while create directory " + mergeShardPath); + throw new RuntimeException("Error occurs while create directory " + mergeShardPath); + } + + // for each index column, merge the bloomindex files from all shards into one + for (String indexCol: indexCols) { + MergedBloomIndexHeader indexHeader = new MergedBloomIndexHeader(); + MergedBloomIndex mergedBloomIndex = new MergedBloomIndex(); + List<String> shardNames = new ArrayList<>(); + List<ByteBuffer> data = new ArrayList<>(); + try { + for (CarbonFile shardPath : shardPaths) { + String bloomIndexFile = getBloomIndexFile(shardPath.getCanonicalPath(), indexCol); + DataInputStream dataInputStream = FileFactory.getDataInputStream( + bloomIndexFile, FileFactory.getFileType(bloomIndexFile)); + byte[] bytes = new byte[(int) FileFactory.getCarbonFile(bloomIndexFile).getSize()]; + try { + dataInputStream.readFully(bytes); + shardNames.add(shardPath.getName()); + data.add(ByteBuffer.wrap(bytes)); + } finally { + dataInputStream.close(); + } + } + indexHeader.setShard_names(shardNames); + mergedBloomIndex.setFileData(data); + // write segment level file + String mergeIndexFileName = getMergeBloomIndexFile(mergeShardPath, indexCol); + ThriftWriter thriftWriter = new ThriftWriter(mergeIndexFileName, false); --- End diff -- You write the merged bloom index through thrift? I'd prefer to write it in raw bytes, so that this modification and this bloomfilter datamap module do not need to depend on carbon-format directly. --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2624#discussion_r210212039 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java --- @@ -222,102 +220,95 @@ public DataMapBuilder createBuilder(Segment segment, String shardName, @Override public List<CoarseGrainDataMap> getDataMaps(Segment segment) throws IOException { - List<CoarseGrainDataMap> dataMaps = new ArrayList<CoarseGrainDataMap>(1); + List<CoarseGrainDataMap> dataMaps = new ArrayList<>(); try { Set<String> shardPaths = segmentMap.get(segment.getSegmentNo()); if (shardPaths == null) { - String dataMapStorePath = DataMapWriter.getDefaultDataMapPath( - getCarbonTable().getTablePath(), segment.getSegmentNo(), dataMapName); - CarbonFile[] carbonFiles = FileFactory.getCarbonFile(dataMapStorePath).listFiles(); - shardPaths = new HashSet<>(); - for (CarbonFile carbonFile : carbonFiles) { - shardPaths.add(carbonFile.getAbsolutePath()); - } + shardPaths = getAllShardPaths(getCarbonTable().getTablePath(), segment.getSegmentNo()); segmentMap.put(segment.getSegmentNo(), shardPaths); } + Set<String> filteredShards = segment.getFilteredIndexShardNames(); for (String shard : shardPaths) { - BloomCoarseGrainDataMap bloomDM = new BloomCoarseGrainDataMap(); - bloomDM.init(new BloomDataMapModel(shard, cache)); - bloomDM.initIndexColumnConverters(getCarbonTable(), dataMapMeta.getIndexedColumns()); - dataMaps.add(bloomDM); + if (shard.endsWith(BloomIndexFileStore.MERGE_BLOOM_INDEX_SHARD_NAME) || + filteredShards.contains(new File(shard).getName())) { + // Filter out the tasks which are filtered through Main datamap. + // for merge shard, shard pruning delay to be done before pruning blocklet + BloomCoarseGrainDataMap bloomDM = new BloomCoarseGrainDataMap(); + bloomDM.init(new BloomDataMapModel(shard, cache)); + bloomDM.initIndexColumnConverters(getCarbonTable(), dataMapMeta.getIndexedColumns()); + bloomDM.setFilteredShard(filteredShards); + dataMaps.add(bloomDM); + } } } catch (Exception e) { throw new IOException("Error occurs while init Bloom DataMap", e); } return dataMaps; } - @Override - public List<CoarseGrainDataMap> getDataMaps(DataMapDistributable distributable) - throws IOException { - List<CoarseGrainDataMap> coarseGrainDataMaps = new ArrayList<>(); - BloomCoarseGrainDataMap bloomCoarseGrainDataMap = new BloomCoarseGrainDataMap(); - String indexPath = ((BloomDataMapDistributable) distributable).getIndexPath(); - bloomCoarseGrainDataMap.init(new BloomDataMapModel(indexPath, cache)); - bloomCoarseGrainDataMap.initIndexColumnConverters(getCarbonTable(), - dataMapMeta.getIndexedColumns()); - coarseGrainDataMaps.add(bloomCoarseGrainDataMap); - return coarseGrainDataMaps; - } - /** - * returns all the directories of lucene index files for query - * Note: copied from luceneDataMapFactory, will extract to a common interface + * returns all shard directories of bloom index files for query + * if bloom index files are merged we should get only one shard path */ - private CarbonFile[] getAllIndexDirs(String tablePath, String segmentId) { - List<CarbonFile> indexDirs = new ArrayList<>(); - List<TableDataMap> dataMaps; - try { - // there can be multiple bloom datamaps present on a table, so get all datamaps and form - // the path till the index file directories in all datamaps folders present in each segment - dataMaps = DataMapStoreManager.getInstance().getAllDataMap(getCarbonTable()); - } catch (IOException ex) { - LOGGER.error(ex, String.format("failed to get datamaps for tablePath %s, segmentId %s", - tablePath, segmentId)); - throw new RuntimeException(ex); - } - if (dataMaps.size() > 0) { - for (TableDataMap dataMap : dataMaps) { - if (dataMap.getDataMapSchema().getDataMapName().equals(this.dataMapName)) { - List<CarbonFile> indexFiles; - String dmPath = CarbonTablePath.getDataMapStorePath(tablePath, segmentId, - dataMap.getDataMapSchema().getDataMapName()); - FileFactory.FileType fileType = FileFactory.getFileType(dmPath); - final CarbonFile dirPath = FileFactory.getCarbonFile(dmPath, fileType); - indexFiles = Arrays.asList(dirPath.listFiles(new CarbonFileFilter() { - @Override - public boolean accept(CarbonFile file) { - return file.isDirectory(); - } - })); - indexDirs.addAll(indexFiles); + private Set<String> getAllShardPaths(String tablePath, String segmentId) { --- End diff -- Can you provide a comment for this method? --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on the issue:
https://github.com/apache/carbondata/pull/2624 Please provide a brief description of the modifications you have made in this PR, such as add Listener, add RDD, use thrift, etc... --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on the issue:
https://github.com/apache/carbondata/pull/2624 The main point I concern is that you use thrift to write the merged bloom index file. I am not sure whether we should add a new thrift file in Carbon-Format and the file only works for Carbon-Bloom. @ravipesala @jackylk How do you think about it? --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/7917/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/6641/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2624 SDV Build Fail , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/6272/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Failed with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/7919/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/6643/ --- |
In reply to this post by qiuchenjian-2
Github user kevinjmh commented on the issue:
https://github.com/apache/carbondata/pull/2624 retest this please --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Failed with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/7924/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Failed with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/6647/ --- |
In reply to this post by qiuchenjian-2
Github user kevinjmh commented on the issue:
https://github.com/apache/carbondata/pull/2624 retest this please --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Failed with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/7927/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/6650/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2624 SDV Build Fail , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/6289/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/7937/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2624 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/6660/ --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on the issue:
https://github.com/apache/carbondata/pull/2624 I prefer not to add new thrift file definition, can we write binary file by ourselves? --- |
In reply to this post by qiuchenjian-2
Github user kevinjmh commented on the issue:
https://github.com/apache/carbondata/pull/2624 we can change that to Length-Value style. Like: [len] [shardName] [len] [bloomIndexData] [len][shardName] [len] [bloomIndexData] .... --- |
Free forum by Nabble | Edit this page |