Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189613145 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java --- @@ -86,12 +86,31 @@ public void onBlockletStart(int blockletId) { protected void resetBloomFilters() { indexBloomFilters.clear(); List<CarbonColumn> indexColumns = getIndexColumns(); + int[] stats = calculateBloomStats(); for (int i = 0; i < indexColumns.size(); i++) { - indexBloomFilters.add(BloomFilter.create(Funnels.byteArrayFunnel(), - bloomFilterSize, bloomFilterFpp)); + indexBloomFilters + .add(new CarbonBloomFilter(stats[0], stats[1], Hash.MURMUR_HASH, compressBloom)); } } + /** + * It calculates the bits size and number of hash functions to calculate bloom. + */ + private int[] calculateBloomStats() { + /* + * n: how many items you expect to have in your filter + * p: your acceptable false positive rate + * Number of bits (m) = -n*ln(p) / (ln(2)^2) + * Number of hashes(k) = m/n * ln(2) --- End diff -- Why not calculate `k` before `m`? It can save some evaluation. --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189605492 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java --- @@ -163,21 +176,37 @@ private double validateAndGetBloomFilterFpp(DataMapSchema dmSchema) return bloomFilterFpp; } + /** + * validate bloom DataMap BLOOM_FPP --- End diff -- fix the description --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on the issue:
https://github.com/apache/carbondata/pull/2324 @ravipesala @jackylk I'm not OK with the default size and fpp for bloomfilter. In general usecases, it will result in many false samples. In my test, a 96GB customer table with size=320K & fpp=0.00001 will result in 18 false samples. --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2324 @xuchuanyin it is all about the cardinality of column he is trying to create a bloom, I don't think we should create very high cardinality bloom by default. If the user wants he can still pass through the property. We cannot make the defaults very high as per the specific scenarios. --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189647130 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapCache.java --- @@ -133,15 +132,14 @@ private int validateAndGetCacheSize() { */ private List<BloomDMModel> loadBloomDataMapModel(CacheKey cacheKey) { DataInputStream dataInStream = null; - ObjectInputStream objectInStream = null; List<BloomDMModel> bloomDMModels = new ArrayList<BloomDMModel>(); try { String indexFile = getIndexFileFromCacheKey(cacheKey); dataInStream = FileFactory.getDataInputStream(indexFile, FileFactory.getFileType(indexFile)); - objectInStream = new ObjectInputStream(dataInStream); try { - BloomDMModel model = null; - while ((model = (BloomDMModel) objectInStream.readObject()) != null) { + while (dataInStream.available() > 0) { --- End diff -- I don't know the problem you are facing, but the old cannot work as we are not doing object serialization here. --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189647160 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java --- @@ -163,21 +176,37 @@ private double validateAndGetBloomFilterFpp(DataMapSchema dmSchema) return bloomFilterFpp; } + /** + * validate bloom DataMap BLOOM_FPP --- End diff -- ok --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189647321 --- Diff: datamap/bloom/src/main/java/org/apache/hadoop/util/bloom/CarbonBloomFilter.java --- @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util.bloom; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.BitSet; + +import org.roaringbitmap.RoaringBitmap; + +/** + * It is the extendable class to hadoop bloomfilter, it is extendable to implement compressed bloom + * and fast serialize and deserialize of bloom. + */ +public class CarbonBloomFilter extends BloomFilter { + + private RoaringBitmap bitmap; + + private boolean compress; + + public CarbonBloomFilter() { + } + + public CarbonBloomFilter(int vectorSize, int nbHash, int hashType, boolean compress) { + super(vectorSize, nbHash, hashType); + this.compress = compress; + } + + @Override + public boolean membershipTest(Key key) { + if (key == null) { + throw new NullPointerException("key cannot be null"); + } + + int[] h = hash.hash(key); + hash.clear(); + if (compress) { + // If it is compressed chek in roaring bitmap --- End diff -- ok --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189647499 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDMModel.java --- @@ -40,15 +46,29 @@ public int getBlockletNo() { return blockletNo; } - public BloomFilter<byte[]> getBloomFilter() { + public BloomFilter getBloomFilter() { --- End diff -- ok --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189647876 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapWriter.java --- @@ -86,12 +86,31 @@ public void onBlockletStart(int blockletId) { protected void resetBloomFilters() { indexBloomFilters.clear(); List<CarbonColumn> indexColumns = getIndexColumns(); + int[] stats = calculateBloomStats(); for (int i = 0; i < indexColumns.size(); i++) { - indexBloomFilters.add(BloomFilter.create(Funnels.byteArrayFunnel(), - bloomFilterSize, bloomFilterFpp)); + indexBloomFilters + .add(new CarbonBloomFilter(stats[0], stats[1], Hash.MURMUR_HASH, compressBloom)); } } + /** + * It calculates the bits size and number of hash functions to calculate bloom. + */ + private int[] calculateBloomStats() { + /* + * n: how many items you expect to have in your filter + * p: your acceptable false positive rate + * Number of bits (m) = -n*ln(p) / (ln(2)^2) + * Number of hashes(k) = m/n * ln(2) --- End diff -- Can't as `k` is dependent on `m` --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2324 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/6027/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2324 Build Failed with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/4868/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2324 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/5042/ --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189797259 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapCache.java --- @@ -133,15 +132,14 @@ private int validateAndGetCacheSize() { */ private List<BloomDMModel> loadBloomDataMapModel(CacheKey cacheKey) { DataInputStream dataInStream = null; - ObjectInputStream objectInStream = null; List<BloomDMModel> bloomDMModels = new ArrayList<BloomDMModel>(); try { String indexFile = getIndexFileFromCacheKey(cacheKey); dataInStream = FileFactory.getDataInputStream(indexFile, FileFactory.getFileType(indexFile)); - objectInStream = new ObjectInputStream(dataInStream); try { - BloomDMModel model = null; - while ((model = (BloomDMModel) objectInStream.readObject()) != null) { + while (dataInStream.available() > 0) { --- End diff -- The javadoc said ``` Returns an estimate of the number of bytes that can be read (or skipped over) from this input stream without blocking by the next invocation of a method for this input stream. ``` So `dataInStream.available()== 0` does not mean that we have already read all the content from the stream. --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189797915 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapCache.java --- @@ -133,15 +132,14 @@ private int validateAndGetCacheSize() { */ private List<BloomDMModel> loadBloomDataMapModel(CacheKey cacheKey) { DataInputStream dataInStream = null; - ObjectInputStream objectInStream = null; List<BloomDMModel> bloomDMModels = new ArrayList<BloomDMModel>(); try { String indexFile = getIndexFileFromCacheKey(cacheKey); dataInStream = FileFactory.getDataInputStream(indexFile, FileFactory.getFileType(indexFile)); - objectInStream = new ObjectInputStream(dataInStream); try { - BloomDMModel model = null; - while ((model = (BloomDMModel) objectInStream.readObject()) != null) { + while (dataInStream.available() > 0) { --- End diff -- It seems that you use `dataInStream.available()== 0` to indicate that all the content of the stream has been consumed, however it only means that you have only consumed all the available content with non-blocking, there may be leftover content in the stream which will be reached for the next blocking call. --- |
In reply to this post by qiuchenjian-2
Github user xuchuanyin commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/2324#discussion_r189798225 --- Diff: datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomDataMapCache.java --- @@ -133,15 +132,14 @@ private int validateAndGetCacheSize() { */ private List<BloomDMModel> loadBloomDataMapModel(CacheKey cacheKey) { DataInputStream dataInStream = null; - ObjectInputStream objectInStream = null; List<BloomDMModel> bloomDMModels = new ArrayList<BloomDMModel>(); try { String indexFile = getIndexFileFromCacheKey(cacheKey); dataInStream = FileFactory.getDataInputStream(indexFile, FileFactory.getFileType(indexFile)); - objectInStream = new ObjectInputStream(dataInStream); try { - BloomDMModel model = null; - while ((model = (BloomDMModel) objectInStream.readObject()) != null) { + while (dataInStream.available() > 0) { --- End diff -- That's my understanding of the javadoc. If the datamap file is small, it may not cause problem, but I insist that it will cause problem in some scenario. --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2324 Build Success with Spark 2.2.1, Please check CI http://88.99.58.216:8080/job/ApacheCarbonPRBuilder/4877/ --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on the issue:
https://github.com/apache/carbondata/pull/2324 SDV Build Success , Please check CI http://144.76.159.231:8080/job/ApacheSDVTests/5046/ --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/2324 Build Success with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder1/6036/ --- |
In reply to this post by qiuchenjian-2
|
In reply to this post by qiuchenjian-2
|
Free forum by Nabble | Edit this page |