kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388142709 ########## File path: integration/hive/pom.xml ########## @@ -177,7 +188,7 @@ <!-- Note config is repeated in scalatest config --> <configuration> <reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory> - <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m</argLine> + <argLine>-Xmx3g -XX:MaxPermSize=512m -XX:ReservedCodeCacheSize=512m -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/tmp/kunal</argLine> Review comment: done ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388142737 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/CarbonHiveRecordReader.java ########## @@ -106,6 +86,7 @@ private void initialize(InputSplit inputSplit, Configuration conf) throws IOExce int columnId = 0; for (int j = 0; j < arraySelectedColId.length; j++) { columnId = Integer.parseInt(arraySelectedColId[j]); + Review comment: done ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388142780 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/util/HiveCarbonUtil.java ########## @@ -0,0 +1,332 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.hive.util; + +import java.io.File; +import java.io.IOException; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.carbondata.common.Strings; +import org.apache.carbondata.common.logging.LogServiceFactory; +import org.apache.carbondata.core.constants.CarbonCommonConstants; +import org.apache.carbondata.core.datastore.compression.CompressorFactory; +import org.apache.carbondata.core.datastore.filesystem.CarbonFile; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.fileoperations.FileWriteOperation; +import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier; +import org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl; +import org.apache.carbondata.core.metadata.datatype.DataType; +import org.apache.carbondata.core.metadata.datatype.Field; +import org.apache.carbondata.core.metadata.datatype.StructField; +import org.apache.carbondata.core.metadata.schema.PartitionInfo; +import org.apache.carbondata.core.metadata.schema.SchemaEvolution; +import org.apache.carbondata.core.metadata.schema.SchemaEvolutionEntry; +import org.apache.carbondata.core.metadata.schema.SchemaReader; +import org.apache.carbondata.core.metadata.schema.partition.PartitionType; +import org.apache.carbondata.core.metadata.schema.table.CarbonTable; +import org.apache.carbondata.core.metadata.schema.table.TableInfo; +import org.apache.carbondata.core.metadata.schema.table.TableSchema; +import org.apache.carbondata.core.metadata.schema.table.TableSchemaBuilder; +import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema; +import org.apache.carbondata.core.util.CarbonUtil; +import org.apache.carbondata.core.util.OutputFilesInfoHolder; +import org.apache.carbondata.core.util.path.CarbonTablePath; +import org.apache.carbondata.core.writer.ThriftWriter; +import org.apache.carbondata.processing.loading.constants.DataLoadProcessorConstants; +import org.apache.carbondata.processing.loading.model.CarbonDataLoadSchema; +import org.apache.carbondata.processing.loading.model.CarbonLoadModel; +import org.apache.carbondata.processing.util.TableOptionConstant; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.log4j.Logger; + +public class HiveCarbonUtil { + + private static final Logger LOGGER = + LogServiceFactory.getLogService(HiveCarbonUtil.class.getName()); + + public static CarbonLoadModel getCarbonLoadModel(Configuration tableProperties) { + String[] tableUniqueName = tableProperties.get("name").split("\\."); + String databaseName = tableUniqueName[0]; + String tableName = tableUniqueName[1]; + String tablePath = tableProperties.get("location"); + String columns = tableProperties.get("columns"); + String sortColumns = tableProperties.get("sort_columns"); + String columnTypes = tableProperties.get("columns.types"); + String partitionColumns = tableProperties.get("partition_columns"); + String partitionColumnTypes = tableProperties.get("partition_columns.types"); + if (partitionColumns != null) { + columns = columns + "," + partitionColumns; + columnTypes = columnTypes + ":" + partitionColumnTypes; + } + String[] columnTypeArray = splitSchemaStringToArray(columnTypes); + String complexDelim = tableProperties.get("complex_delimiter", ""); + CarbonLoadModel carbonLoadModel = + getCarbonLoadModel(tableName, databaseName, tablePath, sortColumns, columns.split(","), + columnTypeArray, tableProperties); + carbonLoadModel.setCarbonTransactionalTable(true); + carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().setTransactionalTable(true); + for (String delim : complexDelim.split(",")) { + carbonLoadModel.setComplexDelimiter(delim); + } + return carbonLoadModel; + } + + public static CarbonLoadModel getCarbonLoadModel(Properties tableProperties, + Configuration configuration) { + String[] tableUniqueName = tableProperties.getProperty("name").split("\\."); + String databaseName = tableUniqueName[0]; + String tableName = tableUniqueName[1]; + String tablePath = tableProperties.getProperty("location"); + String columns = tableProperties.getProperty("columns"); + String sortColumns = tableProperties.getProperty("sort_columns"); + String[] columnTypes = splitSchemaStringToArray(tableProperties.getProperty("columns.types")); + String complexDelim = tableProperties.getProperty("complex_delimiter", ""); + CarbonLoadModel carbonLoadModel = + getCarbonLoadModel(tableName, databaseName, tablePath, sortColumns, columns.split(","), + columnTypes, configuration); + for (String delim : complexDelim.split(",")) { + carbonLoadModel.setComplexDelimiter(delim); + } + return carbonLoadModel; + } + + public static CarbonLoadModel getCarbonLoadModel(String tableName, String databaseName, Review comment: done ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388142806 ########## File path: integration/hive/src/test/java/org/apache/carbondata/hive/HiveTestUtils.java ########## @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.carbondata.hive; + +import java.io.File; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.ResultSet; +import java.sql.SQLException; + +import org.apache.carbondata.hive.test.server.HiveEmbeddedServer2; + +import org.junit.Assert; + +/** + * A utility class to start and stop the Hive Embedded Server. + */ +public abstract class HiveTestUtils { + + private static Connection connection; + + private static HiveEmbeddedServer2 hiveEmbeddedServer2; + + public HiveTestUtils() { + } + + private static void setup() { + try { + File rootPath = new File(HiveTestUtils.class.getResource("/").getPath() + "../../../.."); + String targetLoc = rootPath.getAbsolutePath() + "/integration/hive/target/warehouse"; + hiveEmbeddedServer2 = new HiveEmbeddedServer2(); + hiveEmbeddedServer2.start(targetLoc); + int port = hiveEmbeddedServer2.getFreePort(); + connection = DriverManager.getConnection("jdbc:hive2://localhost:" + port + "/default", "", ""); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + static Connection getConnection() { + if (connection == null) { + setup(); + tearDown(); + } + return connection; + } + + public static void tearDown() { + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + try { + connection.close(); + hiveEmbeddedServer2.stop(); + } catch (SQLException e) { + throw new RuntimeException("Unable to close Hive Embedded Server", e); + } + })); + } + + public String getFieldValue(ResultSet rs, String field) throws Exception { + while (rs.next()) { + System.out.println(rs.getString(1)); + System.out.println("-- " + rs.getString(2)); + if (rs.getString(1).toLowerCase().contains(field.toLowerCase())) { + return rs.getString(2); + } + } + return ""; + } + + public boolean checkAnswer(ResultSet actual, ResultSet expected) throws SQLException { + Assert.assertEquals("Row Count Mismatch: ", expected.getFetchSize(), actual.getFetchSize()); + while(expected.next()) { Review comment: done ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388142834 ########## File path: integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/addsegment/AddSegmentTestCase.scala ########## @@ -758,7 +758,9 @@ class AddSegmentTestCase extends QueryTest with BeforeAndAfterAll { val writer = CarbonWriter.builder .outputPath(externalSegmentPath) .writtenBy("AddSegmentTestCase") - .withCsvInput(new Schema(fields)) + .withSchemaFile(CarbonTablePath.getSchemaFilePath(CarbonEnv Review comment: done ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388142864 ########## File path: processing/src/main/java/org/apache/carbondata/processing/loading/parser/GenericParser.java ########## @@ -32,4 +32,6 @@ */ E parse(Object data); + E parseRaw(Object data); Review comment: done ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
ajantha-bhat commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388148584 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonInputFormat.java ########## @@ -88,12 +89,23 @@ private static void populateCarbonTable(Configuration configuration, String path } } } - if (null != validInputPath) { - AbsoluteTableIdentifier absoluteTableIdentifier = AbsoluteTableIdentifier - .from(validInputPath, getDatabaseName(configuration), getTableName(configuration)); + if (null != paths) { // read the schema file to get the absoluteTableIdentifier having the correct table id // persisted in the schema - CarbonTable carbonTable = SchemaReader.readCarbonTableFromStore(absoluteTableIdentifier); + CarbonTable carbonTable; + AbsoluteTableIdentifier absoluteTableIdentifier = AbsoluteTableIdentifier + .from(validInputPath, getDatabaseName(configuration), getTableName(configuration)); + String schemaPath = CarbonTablePath.getSchemaFilePath(validInputPath); + if (FileFactory.getCarbonFile(schemaPath).exists()) { + // read the schema file to get the absoluteTableIdentifier having the correct table id + // persisted in the schema + carbonTable = SchemaReader.readCarbonTableFromStore(absoluteTableIdentifier); + } else { + // InferSchema from data file + carbonTable = CarbonTable.buildFromTableInfo(SchemaReader + .inferSchema(absoluteTableIdentifier, false)); + carbonTable.setTransactionalTable(true); Review comment: If shema doesn't exists, will it be non-transactional table ? can you add comment why it is set as transactional ? ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
ajantha-bhat commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388152290 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonOutputFormat.java ########## @@ -18,43 +18,115 @@ package org.apache.carbondata.hive; import java.io.IOException; +import java.util.Arrays; +import java.util.Map; import java.util.Properties; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.metadata.schema.PartitionInfo; +import org.apache.carbondata.core.util.ObjectSerializationUtil; +import org.apache.carbondata.core.util.ThreadLocalSessionInfo; import org.apache.carbondata.hadoop.api.CarbonTableOutputFormat; +import org.apache.carbondata.hadoop.internal.ObjectArrayWritable; +import org.apache.carbondata.hive.util.HiveCarbonUtil; +import org.apache.carbondata.processing.loading.model.CarbonLoadModel; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Progressable; -/** - * TODO : To extend CarbonOutputFormat - */ public class MapredCarbonOutputFormat<T> extends CarbonTableOutputFormat - implements HiveOutputFormat<Void, T> { + implements HiveOutputFormat<Void, T>, OutputFormat<Void, T> { @Override public RecordWriter<Void, T> getRecordWriter(FileSystem fileSystem, JobConf jobConf, String s, - Progressable progressable) { - return null; + Progressable progressable) throws IOException { + throw new RuntimeException("Should never be used"); Review comment: better to throw unsupported or more meaningful exception ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
ajantha-bhat commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388152804 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonOutputFormat.java ########## @@ -18,43 +18,115 @@ package org.apache.carbondata.hive; import java.io.IOException; +import java.util.Arrays; +import java.util.Map; import java.util.Properties; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.metadata.schema.PartitionInfo; +import org.apache.carbondata.core.util.ObjectSerializationUtil; +import org.apache.carbondata.core.util.ThreadLocalSessionInfo; import org.apache.carbondata.hadoop.api.CarbonTableOutputFormat; +import org.apache.carbondata.hadoop.internal.ObjectArrayWritable; +import org.apache.carbondata.hive.util.HiveCarbonUtil; +import org.apache.carbondata.processing.loading.model.CarbonLoadModel; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Progressable; -/** - * TODO : To extend CarbonOutputFormat - */ public class MapredCarbonOutputFormat<T> extends CarbonTableOutputFormat - implements HiveOutputFormat<Void, T> { + implements HiveOutputFormat<Void, T>, OutputFormat<Void, T> { @Override public RecordWriter<Void, T> getRecordWriter(FileSystem fileSystem, JobConf jobConf, String s, - Progressable progressable) { - return null; + Progressable progressable) throws IOException { + throw new RuntimeException("Should never be used"); } @Override - public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) - throws IOException { - org.apache.hadoop.mapreduce.JobContext jobContext = Job.getInstance(jobConf); - super.checkOutputSpecs(jobContext); + public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) throws IOException { } Review comment: can add some comment, why no functionality required here ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
ajantha-bhat commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388154014 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonOutputFormat.java ########## @@ -18,43 +18,115 @@ package org.apache.carbondata.hive; import java.io.IOException; +import java.util.Arrays; +import java.util.Map; import java.util.Properties; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.metadata.schema.PartitionInfo; +import org.apache.carbondata.core.util.ObjectSerializationUtil; +import org.apache.carbondata.core.util.ThreadLocalSessionInfo; import org.apache.carbondata.hadoop.api.CarbonTableOutputFormat; +import org.apache.carbondata.hadoop.internal.ObjectArrayWritable; +import org.apache.carbondata.hive.util.HiveCarbonUtil; +import org.apache.carbondata.processing.loading.model.CarbonLoadModel; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Progressable; -/** - * TODO : To extend CarbonOutputFormat - */ public class MapredCarbonOutputFormat<T> extends CarbonTableOutputFormat - implements HiveOutputFormat<Void, T> { + implements HiveOutputFormat<Void, T>, OutputFormat<Void, T> { @Override public RecordWriter<Void, T> getRecordWriter(FileSystem fileSystem, JobConf jobConf, String s, - Progressable progressable) { - return null; + Progressable progressable) throws IOException { + throw new RuntimeException("Should never be used"); } @Override - public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) - throws IOException { - org.apache.hadoop.mapreduce.JobContext jobContext = Job.getInstance(jobConf); - super.checkOutputSpecs(jobContext); + public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) throws IOException { } @Override public FileSinkOperator.RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath, Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties, - Progressable progress) { - return null; + Progressable progress) throws IOException { + CarbonLoadModel carbonLoadModel = null; + String encodedString = jc.get(LOAD_MODEL); + if (encodedString != null) { + carbonLoadModel = + (CarbonLoadModel) ObjectSerializationUtil.convertStringToObject(encodedString); + } + if (carbonLoadModel == null) { + carbonLoadModel = HiveCarbonUtil.getCarbonLoadModel(tableProperties, jc); Review comment: better to keep it in `else` case of `if (encodedString != null)` ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
ajantha-bhat commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388156847 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonOutputFormat.java ########## @@ -18,43 +18,115 @@ package org.apache.carbondata.hive; import java.io.IOException; +import java.util.Arrays; +import java.util.Map; import java.util.Properties; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.metadata.schema.PartitionInfo; +import org.apache.carbondata.core.util.ObjectSerializationUtil; +import org.apache.carbondata.core.util.ThreadLocalSessionInfo; import org.apache.carbondata.hadoop.api.CarbonTableOutputFormat; +import org.apache.carbondata.hadoop.internal.ObjectArrayWritable; +import org.apache.carbondata.hive.util.HiveCarbonUtil; +import org.apache.carbondata.processing.loading.model.CarbonLoadModel; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Progressable; -/** - * TODO : To extend CarbonOutputFormat - */ public class MapredCarbonOutputFormat<T> extends CarbonTableOutputFormat - implements HiveOutputFormat<Void, T> { + implements HiveOutputFormat<Void, T>, OutputFormat<Void, T> { @Override public RecordWriter<Void, T> getRecordWriter(FileSystem fileSystem, JobConf jobConf, String s, - Progressable progressable) { - return null; + Progressable progressable) throws IOException { + throw new RuntimeException("Should never be used"); } @Override - public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) - throws IOException { - org.apache.hadoop.mapreduce.JobContext jobContext = Job.getInstance(jobConf); - super.checkOutputSpecs(jobContext); + public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) throws IOException { } @Override public FileSinkOperator.RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath, Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties, - Progressable progress) { - return null; + Progressable progress) throws IOException { + CarbonLoadModel carbonLoadModel = null; + String encodedString = jc.get(LOAD_MODEL); + if (encodedString != null) { + carbonLoadModel = + (CarbonLoadModel) ObjectSerializationUtil.convertStringToObject(encodedString); + } + if (carbonLoadModel == null) { + carbonLoadModel = HiveCarbonUtil.getCarbonLoadModel(tableProperties, jc); + } else { + for (Map.Entry<Object, Object> entry : tableProperties.entrySet()) { + carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().getTableInfo().getFactTable() + .getTableProperties().put(entry.getKey().toString().toLowerCase(), + entry.getValue().toString().toLowerCase()); + } + } + String tablePath = FileFactory.getCarbonFile(carbonLoadModel.getTablePath()).getAbsolutePath(); + TaskAttemptID taskAttemptID = TaskAttemptID.forName(jc.get("mapred.task.id")); + TaskAttemptContextImpl context = new TaskAttemptContextImpl(jc, taskAttemptID); + final boolean isHivePartitionedTable = + carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().isHivePartitionTable(); + PartitionInfo partitionInfo = + carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().getPartitionInfo(); + final int partitionColumn = + partitionInfo != null ? partitionInfo.getColumnSchemaList().size() : 0; + String finalOutputPath = FileFactory.getCarbonFile(finalOutPath.toString()).getAbsolutePath(); + if (carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().isHivePartitionTable()) { + carbonLoadModel.getOutputFilesInfoHolder().addToPartitionPath(finalOutputPath); + context.getConfiguration().set("carbon.outputformat.writepath", finalOutputPath); + } + CarbonTableOutputFormat.setLoadModel(context.getConfiguration(), carbonLoadModel); + org.apache.hadoop.mapreduce.RecordWriter<NullWritable, ObjectArrayWritable> re = + super.getRecordWriter(context); + return new FileSinkOperator.RecordWriter() { + @Override + public void write(Writable writable) throws IOException { + try { + ObjectArrayWritable objectArrayWritable = new ObjectArrayWritable(); + if (isHivePartitionedTable) { + Object[] actualRow = ((CarbonHiveRow) writable).getData(); + Object[] newData = Arrays.copyOf(actualRow, actualRow.length + partitionColumn); + String[] partitionValues = finalOutputPath.substring(tablePath.length()).split("/"); Review comment: what if the partition value itself contains "/" or "=" ? can happen if partition table is string column ! ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388158515 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonInputFormat.java ########## @@ -88,12 +89,23 @@ private static void populateCarbonTable(Configuration configuration, String path } } } - if (null != validInputPath) { - AbsoluteTableIdentifier absoluteTableIdentifier = AbsoluteTableIdentifier - .from(validInputPath, getDatabaseName(configuration), getTableName(configuration)); + if (null != paths) { // read the schema file to get the absoluteTableIdentifier having the correct table id // persisted in the schema - CarbonTable carbonTable = SchemaReader.readCarbonTableFromStore(absoluteTableIdentifier); + CarbonTable carbonTable; + AbsoluteTableIdentifier absoluteTableIdentifier = AbsoluteTableIdentifier + .from(validInputPath, getDatabaseName(configuration), getTableName(configuration)); + String schemaPath = CarbonTablePath.getSchemaFilePath(validInputPath); + if (FileFactory.getCarbonFile(schemaPath).exists()) { + // read the schema file to get the absoluteTableIdentifier having the correct table id + // persisted in the schema + carbonTable = SchemaReader.readCarbonTableFromStore(absoluteTableIdentifier); + } else { + // InferSchema from data file + carbonTable = CarbonTable.buildFromTableInfo(SchemaReader + .inferSchema(absoluteTableIdentifier, false)); + carbonTable.setTransactionalTable(true); Review comment: it should be false.. changed it ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388159518 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonOutputFormat.java ########## @@ -18,43 +18,115 @@ package org.apache.carbondata.hive; import java.io.IOException; +import java.util.Arrays; +import java.util.Map; import java.util.Properties; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.metadata.schema.PartitionInfo; +import org.apache.carbondata.core.util.ObjectSerializationUtil; +import org.apache.carbondata.core.util.ThreadLocalSessionInfo; import org.apache.carbondata.hadoop.api.CarbonTableOutputFormat; +import org.apache.carbondata.hadoop.internal.ObjectArrayWritable; +import org.apache.carbondata.hive.util.HiveCarbonUtil; +import org.apache.carbondata.processing.loading.model.CarbonLoadModel; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Progressable; -/** - * TODO : To extend CarbonOutputFormat - */ public class MapredCarbonOutputFormat<T> extends CarbonTableOutputFormat - implements HiveOutputFormat<Void, T> { + implements HiveOutputFormat<Void, T>, OutputFormat<Void, T> { @Override public RecordWriter<Void, T> getRecordWriter(FileSystem fileSystem, JobConf jobConf, String s, - Progressable progressable) { - return null; + Progressable progressable) throws IOException { + throw new RuntimeException("Should never be used"); Review comment: it do not matter because this API is not called ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388161829 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonOutputFormat.java ########## @@ -18,43 +18,115 @@ package org.apache.carbondata.hive; import java.io.IOException; +import java.util.Arrays; +import java.util.Map; import java.util.Properties; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.metadata.schema.PartitionInfo; +import org.apache.carbondata.core.util.ObjectSerializationUtil; +import org.apache.carbondata.core.util.ThreadLocalSessionInfo; import org.apache.carbondata.hadoop.api.CarbonTableOutputFormat; +import org.apache.carbondata.hadoop.internal.ObjectArrayWritable; +import org.apache.carbondata.hive.util.HiveCarbonUtil; +import org.apache.carbondata.processing.loading.model.CarbonLoadModel; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Progressable; -/** - * TODO : To extend CarbonOutputFormat - */ public class MapredCarbonOutputFormat<T> extends CarbonTableOutputFormat - implements HiveOutputFormat<Void, T> { + implements HiveOutputFormat<Void, T>, OutputFormat<Void, T> { @Override public RecordWriter<Void, T> getRecordWriter(FileSystem fileSystem, JobConf jobConf, String s, - Progressable progressable) { - return null; + Progressable progressable) throws IOException { + throw new RuntimeException("Should never be used"); } @Override - public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) - throws IOException { - org.apache.hadoop.mapreduce.JobContext jobContext = Job.getInstance(jobConf); - super.checkOutputSpecs(jobContext); + public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) throws IOException { } Review comment: not needed for us ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388167171 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonOutputFormat.java ########## @@ -18,43 +18,115 @@ package org.apache.carbondata.hive; import java.io.IOException; +import java.util.Arrays; +import java.util.Map; import java.util.Properties; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.metadata.schema.PartitionInfo; +import org.apache.carbondata.core.util.ObjectSerializationUtil; +import org.apache.carbondata.core.util.ThreadLocalSessionInfo; import org.apache.carbondata.hadoop.api.CarbonTableOutputFormat; +import org.apache.carbondata.hadoop.internal.ObjectArrayWritable; +import org.apache.carbondata.hive.util.HiveCarbonUtil; +import org.apache.carbondata.processing.loading.model.CarbonLoadModel; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Progressable; -/** - * TODO : To extend CarbonOutputFormat - */ public class MapredCarbonOutputFormat<T> extends CarbonTableOutputFormat - implements HiveOutputFormat<Void, T> { + implements HiveOutputFormat<Void, T>, OutputFormat<Void, T> { @Override public RecordWriter<Void, T> getRecordWriter(FileSystem fileSystem, JobConf jobConf, String s, - Progressable progressable) { - return null; + Progressable progressable) throws IOException { + throw new RuntimeException("Should never be used"); } @Override - public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) - throws IOException { - org.apache.hadoop.mapreduce.JobContext jobContext = Job.getInstance(jobConf); - super.checkOutputSpecs(jobContext); + public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) throws IOException { } @Override public FileSinkOperator.RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath, Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties, - Progressable progress) { - return null; + Progressable progress) throws IOException { + CarbonLoadModel carbonLoadModel = null; + String encodedString = jc.get(LOAD_MODEL); + if (encodedString != null) { + carbonLoadModel = + (CarbonLoadModel) ObjectSerializationUtil.convertStringToObject(encodedString); + } + if (carbonLoadModel == null) { + carbonLoadModel = HiveCarbonUtil.getCarbonLoadModel(tableProperties, jc); + } else { + for (Map.Entry<Object, Object> entry : tableProperties.entrySet()) { + carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().getTableInfo().getFactTable() + .getTableProperties().put(entry.getKey().toString().toLowerCase(), + entry.getValue().toString().toLowerCase()); + } + } + String tablePath = FileFactory.getCarbonFile(carbonLoadModel.getTablePath()).getAbsolutePath(); + TaskAttemptID taskAttemptID = TaskAttemptID.forName(jc.get("mapred.task.id")); + TaskAttemptContextImpl context = new TaskAttemptContextImpl(jc, taskAttemptID); + final boolean isHivePartitionedTable = + carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().isHivePartitionTable(); + PartitionInfo partitionInfo = + carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().getPartitionInfo(); + final int partitionColumn = + partitionInfo != null ? partitionInfo.getColumnSchemaList().size() : 0; + String finalOutputPath = FileFactory.getCarbonFile(finalOutPath.toString()).getAbsolutePath(); + if (carbonLoadModel.getCarbonDataLoadSchema().getCarbonTable().isHivePartitionTable()) { + carbonLoadModel.getOutputFilesInfoHolder().addToPartitionPath(finalOutputPath); + context.getConfiguration().set("carbon.outputformat.writepath", finalOutputPath); + } + CarbonTableOutputFormat.setLoadModel(context.getConfiguration(), carbonLoadModel); + org.apache.hadoop.mapreduce.RecordWriter<NullWritable, ObjectArrayWritable> re = + super.getRecordWriter(context); + return new FileSinkOperator.RecordWriter() { + @Override + public void write(Writable writable) throws IOException { + try { + ObjectArrayWritable objectArrayWritable = new ObjectArrayWritable(); + if (isHivePartitionedTable) { + Object[] actualRow = ((CarbonHiveRow) writable).getData(); + Object[] newData = Arrays.copyOf(actualRow, actualRow.length + partitionColumn); + String[] partitionValues = finalOutputPath.substring(tablePath.length()).split("/"); Review comment: / in partition would be converted to '%2' when creating the partition folder, so it wount fail. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
kunal642 commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388167385 ########## File path: integration/hive/src/main/java/org/apache/carbondata/hive/MapredCarbonOutputFormat.java ########## @@ -18,43 +18,115 @@ package org.apache.carbondata.hive; import java.io.IOException; +import java.util.Arrays; +import java.util.Map; import java.util.Properties; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.metadata.schema.PartitionInfo; +import org.apache.carbondata.core.util.ObjectSerializationUtil; +import org.apache.carbondata.core.util.ThreadLocalSessionInfo; import org.apache.carbondata.hadoop.api.CarbonTableOutputFormat; +import org.apache.carbondata.hadoop.internal.ObjectArrayWritable; +import org.apache.carbondata.hive.util.HiveCarbonUtil; +import org.apache.carbondata.processing.loading.model.CarbonLoadModel; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; import org.apache.hadoop.mapred.RecordWriter; -import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl; import org.apache.hadoop.util.Progressable; -/** - * TODO : To extend CarbonOutputFormat - */ public class MapredCarbonOutputFormat<T> extends CarbonTableOutputFormat - implements HiveOutputFormat<Void, T> { + implements HiveOutputFormat<Void, T>, OutputFormat<Void, T> { @Override public RecordWriter<Void, T> getRecordWriter(FileSystem fileSystem, JobConf jobConf, String s, - Progressable progressable) { - return null; + Progressable progressable) throws IOException { + throw new RuntimeException("Should never be used"); } @Override - public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) - throws IOException { - org.apache.hadoop.mapreduce.JobContext jobContext = Job.getInstance(jobConf); - super.checkOutputSpecs(jobContext); + public void checkOutputSpecs(FileSystem fileSystem, JobConf jobConf) throws IOException { } @Override public FileSinkOperator.RecordWriter getHiveRecordWriter(JobConf jc, Path finalOutPath, Class<? extends Writable> valueClass, boolean isCompressed, Properties tableProperties, - Progressable progress) { - return null; + Progressable progress) throws IOException { + CarbonLoadModel carbonLoadModel = null; + String encodedString = jc.get(LOAD_MODEL); + if (encodedString != null) { + carbonLoadModel = + (CarbonLoadModel) ObjectSerializationUtil.convertStringToObject(encodedString); + } + if (carbonLoadModel == null) { + carbonLoadModel = HiveCarbonUtil.getCarbonLoadModel(tableProperties, jc); Review comment: done ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
ajantha-bhat commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388164466 ########## File path: integration/hive/src/test/java/org/apache/carbondata/hive/HiveCarbonTest.java ########## @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.carbondata.hive; + +import java.sql.ResultSet; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.carbondata.core.constants.CarbonCommonConstants; +import org.apache.carbondata.core.datastore.filesystem.CarbonFile; +import org.apache.carbondata.core.datastore.impl.FileFactory; +import org.apache.carbondata.core.metadata.schema.SchemaReader; +import org.apache.carbondata.core.util.CarbonProperties; +import org.apache.carbondata.core.util.path.CarbonTablePath; + +import org.junit.After; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Ignore; +import org.junit.Test; + +public class HiveCarbonTest extends HiveTestUtils { + + private static Statement statement; + + @BeforeClass + public static void setup() throws Exception { + CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_OFFHEAP_SORT_DEFAULT, "false"); + CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_UNSAFE_SORT, "false"); + CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_WRITTEN_BY_APPNAME, "hive"); Review comment: Please set back these values to default at the end of test case, to avoid impact on existing test case. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
ajantha-bhat commented on a change in pull request #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#discussion_r388167177 ########## File path: integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/addsegment/AddSegmentTestCase.scala ########## @@ -758,7 +758,9 @@ class AddSegmentTestCase extends QueryTest with BeforeAndAfterAll { val writer = CarbonWriter.builder .outputPath(externalSegmentPath) .writtenBy("AddSegmentTestCase") - .withCsvInput(new Schema(fields)) + .withSchemaFile(CarbonTablePath.getSchemaFilePath(CarbonEnv.getCarbonTable(None, Review comment: why this change ? now above fields() become unused. revert it ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
CarbonDataQA1 commented on issue #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#issuecomment-595142247 Build Success with Spark 2.4.4, Please check CI http://121.244.95.60:12545/job/ApacheCarbon_PR_Builder_2.4.4/631/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
CarbonDataQA1 commented on issue #3583: [CARBONDATA-3687] Support writing non-transactional carbondata files through hive
URL: https://github.com/apache/carbondata/pull/3583#issuecomment-595143308 Build Success with Spark 2.3.4, Please check CI http://121.244.95.60:12545/job/ApacheCarbonPRBuilder2.3/2338/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
Free forum by Nabble | Edit this page |