Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121608358 --- Diff: core/src/main/java/org/apache/carbondata/core/cache/dictionary/DoubleArrayTrieDictionary.java --- @@ -43,7 +43,7 @@ private static final int UUSD_ROOM_VALUE = -2; // unused position, only for zero private static final int EPTY_BACK_VALUE = 0; // value of empty position - private static final int ENCODE_BASE_VALUE = 10; // encode start number + private static final int ENCODE_BASE_VALUE = 10; // apply start number --- End diff -- ok, it is modified by mistake --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121608594 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java --- @@ -22,22 +22,38 @@ import java.util.BitSet; import org.apache.carbondata.core.constants.CarbonCommonConstants; -import org.apache.carbondata.core.datastore.page.statistics.ColumnPageStatistics; --- End diff -- Yes, you are right. To make this PR smaller, I suggest to do it in another PR (EncodingStrategy for dimension) --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121608648 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/AdaptiveIntegerCodec.java --- @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.core.datastore.page.encoding; + +import org.apache.carbondata.core.datastore.compression.Compressor; +import org.apache.carbondata.core.datastore.page.ColumnPage; +import org.apache.carbondata.core.datastore.page.LazyColumnPage; +import org.apache.carbondata.core.datastore.page.PrimitiveCodec; +import org.apache.carbondata.core.datastore.page.statistics.ColumnPageStatsVO; +import org.apache.carbondata.core.metadata.datatype.DataType; + +/** + * Codec for integer (byte, short, int, long) data type page. + * This codec will do type casting on page data to make storage minimum. + */ +class AdaptiveIntegerCodec extends AdaptiveCompressionCodec { + + private ColumnPage encodedPage; + + public static ColumnPageCodec newInstance(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + return new AdaptiveIntegerCodec(srcDataType, targetDataType, stats, compressor); + } + + private AdaptiveIntegerCodec(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + super(srcDataType, targetDataType, stats, compressor); + } + + @Override + public String getName() { + return "AdaptiveIntegerCodec"; + } + + @Override + public byte[] encode(ColumnPage input) { + if (srcDataType.equals(targetDataType)) { + return input.compress(compressor); + } else { + encodedPage = ColumnPage.newPage(targetDataType, input.getPageSize()); + input.encode(codec); + return encodedPage.compress(compressor); + } + } + + @Override + public ColumnPage decode(byte[] input, int offset, int length) { + if (srcDataType.equals(targetDataType)) { + return ColumnPage.decompress(compressor, targetDataType, input, offset, length); + } else { + ColumnPage page = ColumnPage.decompress(compressor, targetDataType, input, offset, length); + return LazyColumnPage.newPage(page, codec); + } + } + + private PrimitiveCodec codec = new PrimitiveCodec() { + @Override + public void encode(int rowId, byte value) { + switch (targetDataType) { + default: + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + } + + @Override + public void encode(int rowId, short value) { + switch (targetDataType) { + case BYTE: + encodedPage.putByte(rowId, (byte) value); + break; + default: + assert (false); --- End diff -- ok --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121608684 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageCodec.java --- @@ -21,15 +21,28 @@ /** * Codec for a column page data, implementation should not keep state across pages, - * caller will use the same object to encode multiple pages. + * caller may use the same object to apply multiple pages. */ -public interface ColumnCodec { +public interface ColumnPageCodec { --- End diff -- Yes, you are right. To make this PR smaller, I suggest to do it in another PR (EncodingStrategy for dimension) --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121612421 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/UpscaleDeltaFloatingCodec.java --- @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.core.datastore.page.encoding; + +import java.math.BigDecimal; + +import org.apache.carbondata.core.datastore.compression.Compressor; +import org.apache.carbondata.core.datastore.page.ColumnPage; +import org.apache.carbondata.core.datastore.page.LazyColumnPage; +import org.apache.carbondata.core.datastore.page.PrimitiveCodec; +import org.apache.carbondata.core.datastore.page.statistics.ColumnPageStatsVO; +import org.apache.carbondata.core.metadata.datatype.DataType; + +/** + * Codec for floating point (float, double) data type page. + * This codec will upscale (multiple page value by decimal) to integer value, + * and do type casting to make storage minimum. + */ +public class UpscaleDeltaFloatingCodec extends AdaptiveCompressionCodec { + + private ColumnPage encodedPage; + + private BigDecimal max; + private double factor; + + public static ColumnPageCodec newInstance(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + return new UpscaleDeltaFloatingCodec(srcDataType, targetDataType, stats, compressor); + } + + private UpscaleDeltaFloatingCodec(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + super(srcDataType, targetDataType, stats, compressor); + this.max = BigDecimal.valueOf((double) stats.getMax()); + this.factor = Math.pow(10, stats.getDecimal()); + } + + @Override + public String getName() { + return "UpscaleDeltaFloatingCodec"; + } + + @Override + public byte[] encode(ColumnPage input) { + encodedPage = ColumnPage.newPage(targetDataType, input.getPageSize()); + input.encode(codec); + return encodedPage.compress(compressor); + } + + @Override + public ColumnPage decode(byte[] input, int offset, int length) { + ColumnPage page = ColumnPage.decompress(compressor, targetDataType, input, offset, length); + return LazyColumnPage.newPage(page, codec); + } + + private PrimitiveCodec codec = new PrimitiveCodec() { + @Override + public void encode(int rowId, byte value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public void encode(int rowId, short value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public void encode(int rowId, int value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public void encode(int rowId, long value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public void encode(int rowId, float value) { + double diff = max.subtract(BigDecimal.valueOf(value)).doubleValue(); + switch (targetDataType) { + case BYTE: + encodedPage.putByte(rowId, (byte)(Math.round(factor * diff))); + break; + case SHORT: + encodedPage.putShort(rowId, (short)(Math.round(factor * diff))); + break; + case INT: + encodedPage.putInt(rowId, (int)(Math.round(factor * diff))); + break; + case LONG: + encodedPage.putLong(rowId, (long)(Math.round(factor * diff))); + break; + default: + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + } + + @Override + public void encode(int rowId, double value) { + double diff = max.subtract(BigDecimal.valueOf(value)).doubleValue(); + switch (targetDataType) { + case BYTE: + encodedPage.putByte(rowId, (byte)(Math.round(factor * diff))); + break; + case SHORT: + encodedPage.putShort(rowId, (short)(Math.round(factor * diff))); + break; + case INT: + encodedPage.putInt(rowId, (int)(Math.round(factor * diff))); + break; + case LONG: + encodedPage.putLong(rowId, (long)(Math.round(factor * diff))); + break; + default: + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + } + + @Override + public long decodeLong(byte value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public long decodeLong(short value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public long decodeLong(int value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public double decodeDouble(byte value) { + return max.subtract(BigDecimal.valueOf(value / factor)).doubleValue(); + } + + @Override + public double decodeDouble(short value) { + return max.subtract(BigDecimal.valueOf(value / factor)).doubleValue(); + } + + @Override + public double decodeDouble(int value) { + return max.subtract(BigDecimal.valueOf(value / factor)).doubleValue(); + } + + @Override + public double decodeDouble(long value) { + return max.subtract(BigDecimal.valueOf(value / factor)).doubleValue(); + } + + @Override + public double decodeDouble(float value) { + assert (false); --- End diff -- I think assert is not required as exception is throwing --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121613071 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/UpscaleFloatingCodec.java --- @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.core.datastore.page.encoding; + +import org.apache.carbondata.core.datastore.compression.Compressor; +import org.apache.carbondata.core.datastore.page.ColumnPage; +import org.apache.carbondata.core.datastore.page.LazyColumnPage; +import org.apache.carbondata.core.datastore.page.PrimitiveCodec; +import org.apache.carbondata.core.datastore.page.statistics.ColumnPageStatsVO; +import org.apache.carbondata.core.metadata.datatype.DataType; + +/** + * Codec for floating point (float, double) data type page. + * This codec will upscale the diff from page max value to integer value, + * and do type casting to make storage minimum. + */ +public class UpscaleFloatingCodec extends AdaptiveCompressionCodec { + + private ColumnPage encodedPage; + private double factor; + + public static ColumnPageCodec newInstance(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + return new UpscaleFloatingCodec(srcDataType, targetDataType, stats, compressor); + } + + private UpscaleFloatingCodec(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + super(srcDataType, targetDataType, stats, compressor); + this.factor = Math.pow(10, stats.getDecimal()); + } + + @Override public String getName() { + return "UpscaleFloatingCodec"; + } + + @Override + public byte[] encode(ColumnPage input) { + encodedPage = ColumnPage.newPage(targetDataType, input.getPageSize()); + input.encode(codec); + return encodedPage.compress(compressor); + } + + + @Override + public ColumnPage decode(byte[] input, int offset, int length) { + ColumnPage page = ColumnPage.decompress(compressor, targetDataType, input, offset, length); + return LazyColumnPage.newPage(page, codec); + } + + private PrimitiveCodec codec = new PrimitiveCodec() { --- End diff -- Better use some abstract class and implement all methods with default implementation, and extended classes can just use required methods --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user ravipesala commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121613640 --- Diff: core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java --- @@ -123,4 +121,29 @@ public boolean isMemoryAvailable() { public long getUsableMemory() { return totalMemory - minimumMemory; } + + /** + * It tries to allocate memory of `size` bytes, keep retry until it allocates successfully. + */ + public static MemoryBlock allocateMemoryBlocking(long size) throws MemoryException { --- End diff -- typo `allocateMemoryBlock` --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121615305 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/UpscaleDeltaFloatingCodec.java --- @@ -0,0 +1,197 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.core.datastore.page.encoding; + +import java.math.BigDecimal; + +import org.apache.carbondata.core.datastore.compression.Compressor; +import org.apache.carbondata.core.datastore.page.ColumnPage; +import org.apache.carbondata.core.datastore.page.LazyColumnPage; +import org.apache.carbondata.core.datastore.page.PrimitiveCodec; +import org.apache.carbondata.core.datastore.page.statistics.ColumnPageStatsVO; +import org.apache.carbondata.core.metadata.datatype.DataType; + +/** + * Codec for floating point (float, double) data type page. + * This codec will upscale (multiple page value by decimal) to integer value, + * and do type casting to make storage minimum. + */ +public class UpscaleDeltaFloatingCodec extends AdaptiveCompressionCodec { + + private ColumnPage encodedPage; + + private BigDecimal max; + private double factor; + + public static ColumnPageCodec newInstance(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + return new UpscaleDeltaFloatingCodec(srcDataType, targetDataType, stats, compressor); + } + + private UpscaleDeltaFloatingCodec(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + super(srcDataType, targetDataType, stats, compressor); + this.max = BigDecimal.valueOf((double) stats.getMax()); + this.factor = Math.pow(10, stats.getDecimal()); + } + + @Override + public String getName() { + return "UpscaleDeltaFloatingCodec"; + } + + @Override + public byte[] encode(ColumnPage input) { + encodedPage = ColumnPage.newPage(targetDataType, input.getPageSize()); + input.encode(codec); + return encodedPage.compress(compressor); + } + + @Override + public ColumnPage decode(byte[] input, int offset, int length) { + ColumnPage page = ColumnPage.decompress(compressor, targetDataType, input, offset, length); + return LazyColumnPage.newPage(page, codec); + } + + private PrimitiveCodec codec = new PrimitiveCodec() { + @Override + public void encode(int rowId, byte value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public void encode(int rowId, short value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public void encode(int rowId, int value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public void encode(int rowId, long value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public void encode(int rowId, float value) { + double diff = max.subtract(BigDecimal.valueOf(value)).doubleValue(); + switch (targetDataType) { + case BYTE: + encodedPage.putByte(rowId, (byte)(Math.round(factor * diff))); + break; + case SHORT: + encodedPage.putShort(rowId, (short)(Math.round(factor * diff))); + break; + case INT: + encodedPage.putInt(rowId, (int)(Math.round(factor * diff))); + break; + case LONG: + encodedPage.putLong(rowId, (long)(Math.round(factor * diff))); + break; + default: + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + } + + @Override + public void encode(int rowId, double value) { + double diff = max.subtract(BigDecimal.valueOf(value)).doubleValue(); + switch (targetDataType) { + case BYTE: + encodedPage.putByte(rowId, (byte)(Math.round(factor * diff))); + break; + case SHORT: + encodedPage.putShort(rowId, (short)(Math.round(factor * diff))); + break; + case INT: + encodedPage.putInt(rowId, (int)(Math.round(factor * diff))); + break; + case LONG: + encodedPage.putLong(rowId, (long)(Math.round(factor * diff))); + break; + default: + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + } + + @Override + public long decodeLong(byte value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public long decodeLong(short value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public long decodeLong(int value) { + // this codec is for floating point type only + assert (false); + throw new RuntimeException("internal error: " + debugInfo()); + } + + @Override + public double decodeDouble(byte value) { + return max.subtract(BigDecimal.valueOf(value / factor)).doubleValue(); + } + + @Override + public double decodeDouble(short value) { + return max.subtract(BigDecimal.valueOf(value / factor)).doubleValue(); + } + + @Override + public double decodeDouble(int value) { + return max.subtract(BigDecimal.valueOf(value / factor)).doubleValue(); + } + + @Override + public double decodeDouble(long value) { + return max.subtract(BigDecimal.valueOf(value / factor)).doubleValue(); + } + + @Override + public double decodeDouble(float value) { + assert (false); --- End diff -- ok --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121616959 --- Diff: core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java --- @@ -123,4 +121,29 @@ public boolean isMemoryAvailable() { public long getUsableMemory() { return totalMemory - minimumMemory; } + + /** + * It tries to allocate memory of `size` bytes, keep retry until it allocates successfully. + */ + public static MemoryBlock allocateMemoryBlocking(long size) throws MemoryException { --- End diff -- Actually it is not a typo, I want to express this function will block (retry) until it allocate memory successfully. Since this name is not good, I will change it to `allocateMemoryWithRetry` --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121618486 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/UpscaleFloatingCodec.java --- @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.core.datastore.page.encoding; + +import org.apache.carbondata.core.datastore.compression.Compressor; +import org.apache.carbondata.core.datastore.page.ColumnPage; +import org.apache.carbondata.core.datastore.page.LazyColumnPage; +import org.apache.carbondata.core.datastore.page.PrimitiveCodec; +import org.apache.carbondata.core.datastore.page.statistics.ColumnPageStatsVO; +import org.apache.carbondata.core.metadata.datatype.DataType; + +/** + * Codec for floating point (float, double) data type page. + * This codec will upscale the diff from page max value to integer value, + * and do type casting to make storage minimum. + */ +public class UpscaleFloatingCodec extends AdaptiveCompressionCodec { + + private ColumnPage encodedPage; + private double factor; + + public static ColumnPageCodec newInstance(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + return new UpscaleFloatingCodec(srcDataType, targetDataType, stats, compressor); + } + + private UpscaleFloatingCodec(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + super(srcDataType, targetDataType, stats, compressor); + this.factor = Math.pow(10, stats.getDecimal()); + } + + @Override public String getName() { + return "UpscaleFloatingCodec"; + } + + @Override + public byte[] encode(ColumnPage input) { + encodedPage = ColumnPage.newPage(targetDataType, input.getPageSize()); + input.encode(codec); + return encodedPage.compress(compressor); + } + + + @Override + public ColumnPage decode(byte[] input, int offset, int length) { + ColumnPage page = ColumnPage.decompress(compressor, targetDataType, input, offset, length); + return LazyColumnPage.newPage(page, codec); + } + + private PrimitiveCodec codec = new PrimitiveCodec() { --- End diff -- ok --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121619385 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/UpscaleFloatingCodec.java --- @@ -0,0 +1,190 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.core.datastore.page.encoding; + +import org.apache.carbondata.core.datastore.compression.Compressor; +import org.apache.carbondata.core.datastore.page.ColumnPage; +import org.apache.carbondata.core.datastore.page.LazyColumnPage; +import org.apache.carbondata.core.datastore.page.PrimitiveCodec; +import org.apache.carbondata.core.datastore.page.statistics.ColumnPageStatsVO; +import org.apache.carbondata.core.metadata.datatype.DataType; + +/** + * Codec for floating point (float, double) data type page. + * This codec will upscale the diff from page max value to integer value, + * and do type casting to make storage minimum. + */ +public class UpscaleFloatingCodec extends AdaptiveCompressionCodec { + + private ColumnPage encodedPage; + private double factor; + + public static ColumnPageCodec newInstance(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + return new UpscaleFloatingCodec(srcDataType, targetDataType, stats, compressor); + } + + private UpscaleFloatingCodec(DataType srcDataType, DataType targetDataType, + ColumnPageStatsVO stats, Compressor compressor) { + super(srcDataType, targetDataType, stats, compressor); + this.factor = Math.pow(10, stats.getDecimal()); + } + + @Override public String getName() { + return "UpscaleFloatingCodec"; + } + + @Override + public byte[] encode(ColumnPage input) { + encodedPage = ColumnPage.newPage(targetDataType, input.getPageSize()); + input.encode(codec); + return encodedPage.compress(compressor); + } + + + @Override + public ColumnPage decode(byte[] input, int offset, int length) { + ColumnPage page = ColumnPage.decompress(compressor, targetDataType, input, offset, length); + return LazyColumnPage.newPage(page, codec); + } + + private PrimitiveCodec codec = new PrimitiveCodec() { --- End diff -- I thought about it earlier, but in that case the log message will not be as good as now, like "internal error: " + debugInfo(). --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121625468 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java --- @@ -22,22 +22,38 @@ import java.util.BitSet; import org.apache.carbondata.core.constants.CarbonCommonConstants; -import org.apache.carbondata.core.datastore.page.statistics.ColumnPageStatistics; +import org.apache.carbondata.core.constants.CarbonV3DataFormatConstants; +import org.apache.carbondata.core.datastore.compression.Compressor; +import org.apache.carbondata.core.datastore.page.statistics.ColumnPageStatsVO; import org.apache.carbondata.core.metadata.datatype.DataType; +import org.apache.carbondata.core.util.ByteUtil; +import org.apache.carbondata.core.util.CarbonProperties; import org.apache.carbondata.core.util.DataTypeUtil; -// Represent a columnar data in one page for one column. +import static org.apache.carbondata.core.metadata.datatype.DataType.BYTE; +import static org.apache.carbondata.core.metadata.datatype.DataType.DECIMAL; +import static org.apache.carbondata.core.metadata.datatype.DataType.DOUBLE; +import static org.apache.carbondata.core.metadata.datatype.DataType.FLOAT; +import static org.apache.carbondata.core.metadata.datatype.DataType.INT; +import static org.apache.carbondata.core.metadata.datatype.DataType.LONG; +import static org.apache.carbondata.core.metadata.datatype.DataType.SHORT; +import static org.apache.carbondata.core.metadata.datatype.DataType.STRING; + +/** + * Represent a columnar data in one page for one column. + */ public class ColumnPage { - protected final DataType dataType; - protected final int pageSize; - protected ColumnPageStatistics stats; + private final int pageSize; + private DataType dataType; + private ColumnPageStatsVO stats; --- End diff -- We better design this feature before changing it to interface. I think both write and read need to change. --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user jackylk commented on a diff in the pull request:
https://github.com/apache/carbondata/pull/987#discussion_r121628329 --- Diff: core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java --- @@ -46,51 +62,157 @@ // The index of the rowId whose value is null, will be set to 1 private BitSet nullBitSet; - public ColumnPage(DataType dataType, int pageSize) { - this.dataType = dataType; + protected ColumnPage(DataType dataType, int pageSize) { this.pageSize = pageSize; - this.stats = new ColumnPageStatistics(dataType); - this.nullBitSet = new BitSet(pageSize); + this.dataType = dataType; + } + + // create a new page + public static ColumnPage newPage(DataType dataType, int pageSize) { + ColumnPage instance; switch (dataType) { + case BYTE: + instance = newBytePage(new byte[pageSize]); + break; case SHORT: + instance = newShortPage(new short[pageSize]); + break; case INT: + instance = newIntPage(new int[pageSize]); + break; case LONG: - longData = new long[pageSize]; + instance = newLongPage(new long[pageSize]); + break; + case FLOAT: + instance = newFloatPage(new float[pageSize]); break; case DOUBLE: - doubleData = new double[pageSize]; + instance = newDoublePage(new double[pageSize]); break; case DECIMAL: - byteArrayData = new byte[pageSize][]; + instance = newDecimalPage(new byte[pageSize][]); break; case STRING: - byteArrayData = new byte[pageSize][]; + instance = newStringPage(new byte[pageSize][]); break; default: throw new RuntimeException("Unsupported data dataType: " + dataType); } + instance.stats = new ColumnPageStatsVO(dataType); + instance.nullBitSet = new BitSet(pageSize); + return instance; + } + + // create a new page and set data with input `pageData` + public static ColumnPage newPage(DataType dataType, Object pageData) { + // This is used in read path, since statistics and nullBitSet is not required, not creating + // them to make object minimum + switch (dataType) { + case BYTE: + byte[] byteData = (byte[]) pageData; + return newBytePage(byteData); + case SHORT: + short[] shortData = (short[]) pageData; + return newShortPage(shortData); + case INT: + int[] intData = (int[]) pageData; + return newIntPage(intData); + case LONG: + long[] longData = (long[]) pageData; + return newLongPage(longData); + case FLOAT: + float[] floatData = (float[]) pageData; + return newFloatPage(floatData); + case DOUBLE: + double[] doubleData = (double[]) pageData; + return newDoublePage(doubleData); + case DECIMAL: + byte[][] decimalData = (byte[][]) pageData; + return newDecimalPage(decimalData); + case STRING: --- End diff -- ok --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user asfgit commented on the issue:
https://github.com/apache/carbondata/pull/987 Refer to this link for build results (access rights to CI server needed): https://builds.apache.org/job/carbondata-pr-spark-1.6/315/<h2>Build result: FAILURE</span></h2>[...truncated 51.50 KB...][ERROR] Re-run Maven using the -X switch to enable full debug logging.[ERROR] [ERROR] For more information about the errors and possible solutions, please read the following articles:[ERROR] [Help 1] http://cwiki.apache.org/confluence/display/MAVEN/MojoExecutionException[ERROR] [ERROR] After correcting the problems, you can resume the build with the command[ERROR] mvn <goals> -rf :carbondata-processing[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/processing/pom.xml to org.apache.carbondata/carbondata-processing/1.2.0-SNAPSHOT/carbondata-processing-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/integration/spark-common/pom.xml to org.apache.carbondata/carbondata-spark-common/1.2.0-SNAPSHOT/carbondata-spark-common-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins- slave/workspace/carbondata-pr-spark-1.6/examples/spark/pom.xml to org.apache.carbondata/carbondata-examples-spark/1.2.0-SNAPSHOT/carbondata-examples-spark-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/integration/hive/pom.xml to org.apache.carbondata/carbondata-hive/1.2.0-SNAPSHOT/carbondata-hive-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/common/pom.xml to org.apache.carbondata/carbondata-common/1.2.0-SNAPSHOT/carbondata-common-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/assembly/pom.xml to org.apache.carbondata/carbondata-assembly/1.2.0-SNAPSHOT/carbondata-assembly-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/pom.xml to org.apache.carbondata/carbondata-parent/1.2.0-SNAPSHOT/carbondata-parent-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspa ce/carbondata-pr-spark-1.6/examples/flink/pom.xml to org.apache.carbondata/carbondata-examples-flink/1.2.0-SNAPSHOT/carbondata-examples-flink-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/core/pom.xml to org.apache.carbondata/carbondata-core/1.2.0-SNAPSHOT/carbondata-core-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/hadoop/pom.xml to org.apache.carbondata/carbondata-hadoop/1.2.0-SNAPSHOT/carbondata-hadoop-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/integration/presto/pom.xml to org.apache.carbondata/carbondata-presto/1.2.0-SNAPSHOT/carbondata-presto-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /home/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/integration/spark-common-test/pom.xml to org.apache.carbondata/carbondata-spark-common-test/1.2.0-SNAPSHOT/carbondata-spark-common-test-1.2.0-SNAPSHOT.pom[JENKINS] Archiving /h ome/jenkins/jenkins-slave/workspace/carbondata-pr-spark-1.6/integration/spark/pom.xml to org.apache.carbondata/carbondata-spark/1.2.0-SNAPSHOT/carbondata-spark-1.2.0-SNAPSHOT.pomchannel stoppedSetting status of 56d21b1db66e70758b44e386659e3c670531c57e to FAILURE with url https://builds.apache.org/job/carbondata-pr-spark-1.6/315/ and message: 'Tests Failed for Spark1.6 'Using context: Jenkins(Spark1.6): mvn clean test -Pspark-1.6 --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/987 Build Failed with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder/2434/ --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/987 Build Failed with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder/2435/ --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user asfgit commented on the issue:
https://github.com/apache/carbondata/pull/987 Refer to this link for build results (access rights to CI server needed): https://builds.apache.org/job/carbondata-pr-spark-1.6/316/<h2>Failed Tests: <span class='status-failure'>9</span></h2><h3><a name='carbondata-pr-spark-1.6/org.apache.carbondata:carbondata-spark-common-test' /><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common-test/testReport'>carbondata-pr-spark-1.6/org.apache.carbondata:carbondata-spark-common-test</a>: <span class='status-failure'>9</span></h3><ul><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV1Format/test_data_loading_and_validate_query_output/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV1Format.test data loading and validate query output</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common- test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV1Format/test_data_loading_with_different_case_file_header_and_validate_query_output/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV1Format.test data loading with different case file header and validate query output</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV1Format/test_data_loading_when_delimiter_is_____and_data_with_header/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV1Format.test data loading when delimiter is '|' and data with header</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadData WithHiveSyntaxV2Format/test_data_loading_and_validate_query_output/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV2Format.test data loading and validate query output</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV2Format/test_data_loading_with_different_case_file_header_and_validate_query_output/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV2Format.test data loading with different case file header and validate query output</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV2Format/test_data_loading_when_delimiter_is_____and_data_with_header/'><strong>org.apache.carb ondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV2Format.test data loading when delimiter is '|' and data with header</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithNoMeasure/test_data_loading_with_single_no_dictionary_column/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithNoMeasure.test data loading with single no dictionary column</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithNoMeasure/test_data_loading_with_single_complex_struct_type_column/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithNoMeasure.test data loading with single complex struct type column</strong></a></li><li><a href='https://bui lds.apache.org/job/carbondata-pr-spark-1.6/316/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithNoMeasure/test_data_loading_with_single_complex_array_type_column/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithNoMeasure.test data loading with single complex array type column</strong></a></li></ul> --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/987 Build Failed with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder/2436/ --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user asfgit commented on the issue:
https://github.com/apache/carbondata/pull/987 Refer to this link for build results (access rights to CI server needed): https://builds.apache.org/job/carbondata-pr-spark-1.6/317/<h2>Failed Tests: <span class='status-failure'>9</span></h2><h3><a name='carbondata-pr-spark-1.6/org.apache.carbondata:carbondata-spark-common-test' /><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common-test/testReport'>carbondata-pr-spark-1.6/org.apache.carbondata:carbondata-spark-common-test</a>: <span class='status-failure'>9</span></h3><ul><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV1Format/test_data_loading_and_validate_query_output/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV1Format.test data loading and validate query output</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common- test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV1Format/test_data_loading_with_different_case_file_header_and_validate_query_output/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV1Format.test data loading with different case file header and validate query output</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV1Format/test_data_loading_when_delimiter_is_____and_data_with_header/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV1Format.test data loading when delimiter is '|' and data with header</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadData WithHiveSyntaxV2Format/test_data_loading_and_validate_query_output/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV2Format.test data loading and validate query output</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV2Format/test_data_loading_with_different_case_file_header_and_validate_query_output/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV2Format.test data loading with different case file header and validate query output</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithHiveSyntaxV2Format/test_data_loading_when_delimiter_is_____and_data_with_header/'><strong>org.apache.carb ondata.spark.testsuite.dataload.TestLoadDataWithHiveSyntaxV2Format.test data loading when delimiter is '|' and data with header</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithNoMeasure/test_data_loading_with_single_no_dictionary_column/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithNoMeasure.test data loading with single no dictionary column</strong></a></li><li><a href='https://builds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithNoMeasure/test_data_loading_with_single_complex_struct_type_column/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithNoMeasure.test data loading with single complex struct type column</strong></a></li><li><a href='https://bui lds.apache.org/job/carbondata-pr-spark-1.6/317/org.apache.carbondata$carbondata-spark-common-test/testReport/org.apache.carbondata.spark.testsuite.dataload/TestLoadDataWithNoMeasure/test_data_loading_with_single_complex_array_type_column/'><strong>org.apache.carbondata.spark.testsuite.dataload.TestLoadDataWithNoMeasure.test data loading with single complex array type column</strong></a></li></ul> --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
In reply to this post by qiuchenjian-2
Github user CarbonDataQA commented on the issue:
https://github.com/apache/carbondata/pull/987 Build Failed with Spark 2.1.0, Please check CI http://136.243.101.176:8080/job/ApacheCarbonPRBuilder/2437/ --- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at [hidden email] or file a JIRA ticket with INFRA. --- |
Free forum by Nabble | Edit this page |