[GitHub] incubator-carbondata pull request #729: [CARBONDATA-850] Fix the comment def...

classic Classic list List threaded Threaded
1 message Options
Reply | Threaded
Open this post in threaded view
|

[GitHub] incubator-carbondata pull request #729: [CARBONDATA-850] Fix the comment def...

qiuchenjian-2
Github user chenliang613 commented on a diff in the pull request:

    https://github.com/apache/incubator-carbondata/pull/729#discussion_r109628602
 
    --- Diff: format/src/main/thrift/carbondata.thrift ---
    @@ -1,223 +1,226 @@
    -/**
    - * Licensed to the Apache Software Foundation (ASF) under one
    - * or more contributor license agreements.  See the NOTICE file
    - * distributed with this work for additional information
    - * regarding copyright ownership.  The ASF licenses this file
    - * to you under the Apache License, Version 2.0 (the
    - * "License"); you may not use this file except in compliance
    - * with the License.  You may obtain a copy of the License at
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
      *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    + *    http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
     
     /**
    - * File format description for the carbon file format
    + * File format description for the CarbonData file format
      */
     namespace java org.apache.carbondata.format
     
     include "schema.thrift"
     include "dictionary.thrift"
     
     /**
    -* Information about a segment, that represents one data load
    -*/
    + * Information about a segment, that represents one data load
    + */
     struct SegmentInfo{
    -    1: required i32 num_cols; // Number of columns in this load, because schema can evolve . TODO: Check whether this is really required
    +    1: required i32 num_cols; // Number of columns in this load, because schema can evolve, different segments may have different columns
         2: required list<i32> column_cardinalities; // Cardinality of columns
     }
     
     /**
    -* Btree index of one blocklet
    -*/
    + * Btree index of one blocklet
    + */
     struct BlockletBTreeIndex{
         1: required binary start_key; // Bit-packed start key of one blocklet
    -    2: required binary end_key; // Bit-packed start key of one blocklet
    +    2: required binary end_key; // Bit-packed end key of one blocklet
     }
     
     /**
    -* Min-max index of one blocklet
    -*/
    + * Min-max index of one blocklet
    + */
     struct BlockletMinMaxIndex{
         1: required list<binary> min_values; //Min value of all columns of one blocklet Bit-Packed
         2: required list<binary> max_values; //Max value of all columns of one blocklet Bit-Packed
     }
     
     /**
    -* Index of one blocklet
    -**/
    + * Index of one blocklet
    + */
     struct BlockletIndex{
         1: optional BlockletMinMaxIndex min_max_index;
         2: optional BlockletBTreeIndex b_tree_index;
     }
     
     /**
    -* Sort state of one column
    -*/
    + * Sort state of one column
    + */
     enum SortState{
         SORT_NONE = 0; // Data is not sorted
         SORT_NATIVE = 1; //Source data was sorted
         SORT_EXPLICIT = 2; // Sorted (ascending) when loading
     }
     
     /**
    -* Compressions supported by Carbon Data.
    -*/
    + * Compressions supported by CarbonData.
    + */
     enum CompressionCodec{
         SNAPPY = 0;
     }
     
     /**
    -* Represents the data of one dimension one dimension group in one blocklet
    -*/
    -// add a innger level placeholder for further I/O granulatity
    + * Represents the data of one column page or one column page group in one blocklet.
    + * Currently CarbonData using snappy for compression directly, user can specify the different compression by ChunkCompressionMeta.
    + */
     struct ChunkCompressionMeta{
    -    1: required CompressionCodec compression_codec; // the compressor used
    -    /** total byte size of all uncompressed pages in this column chunk (including the headers) **/
    +    1: required CompressionCodec compression_codec; // The compressor used
    +    /** Total byte size of all uncompressed pages in this column chunk (including the headers) **/
         2: required i64 total_uncompressed_size;
    -    /** total byte size of all compressed pages in this column chunk (including the headers) **/
    +    /** Total byte size of all compressed pages in this column chunk (including the headers) **/
         3: required i64 total_compressed_size;
     }
     
     /**
    -* To handle space data with nulls
    -*/
    + * To handle space data with nulls
    + */
     struct PresenceMeta{
    -    1: required bool represents_presence; // if true, ones in the bit stream reprents presence. otherwise represents absence
    +    1: required bool represents_presence; // If true, ones in the bit stream reprents presence. otherwise represents absence
         2: required binary present_bit_stream; // Compressed bit stream representing the presence of null values
     }
     
     /**
    -* Represents a chunk of data. The chunk can be a single column stored in Column Major format or a group of columns stored in Row Major Format.
    -**/
    + * Represents a chunk of data. The chunk can be a single column stored in Column Major format or a group of columns stored in Row Major Format.
    + * For V1 format.
    + */
     struct DataChunk{
    -    1: required ChunkCompressionMeta chunk_meta; // the metadata of a chunk
    -    2: required bool rowMajor; // whether this chunk is a row chunk or column chunk ? Decide whether this can be replace with counting od columnIDs
    +    1: required ChunkCompressionMeta chunk_meta; // The metadata of a chunk
    +    2: required bool rowMajor; // Whether this chunk is a row chunk or column chunk, Decide whether this can be replace with counting of columnIDs.
      /** The column IDs in this chunk, in the order in which the data is physically stored, will have atleast one column ID for columnar format, many column ID for row major format**/
         3: required list<i32> column_ids;
         4: required i64 data_page_offset; // Offset of data page
    -    5: required i32 data_page_length; // length of data page
    -    6: optional i64 rowid_page_offset; //offset of row id page, only if encoded using inverted index
    -    7: optional i32 rowid_page_length; //length of row id page, only if encoded using inverted index
    -    8: optional i64 rle_page_offset; // offset of rle page, only if RLE coded.
    -    9: optional i32 rle_page_length; // length of rle page, only if RLE coded.
    -    10: optional PresenceMeta presence; // information about presence of values in each row of this column chunk
    +    5: required i32 data_page_length; // Length of data page
    +    6: optional i64 rowid_page_offset; // Offset of row id page, only if encoded using inverted index
    +    7: optional i32 rowid_page_length; // Length of row id page, only if encoded using inverted index
    +    8: optional i64 rle_page_offset; // Offset of rle page, only if RLE coded.
    +    9: optional i32 rle_page_length; // Length of rle page, only if RLE coded.
    +    10: optional PresenceMeta presence; // Information about presence of values in each row of this column chunk
         11: optional SortState sort_state;
         12: optional list<schema.Encoding> encoders; // The List of encoders overriden at node level
    -    13: optional list<binary> encoder_meta; // extra information required by encoders
    +    13: optional list<binary> encoder_meta; // Extra information required by encoders
     }
     
     /**
    -* Represents a chunk of data. The chunk can be a single column stored in Column Major format or a group of columns stored in Row Major Format.
    -**/
    + * Represents a chunk of data. The chunk can be a single column stored in Column Major format or a group of columns stored in Row Major Format.
    + * For V2 format.
    + */
     struct DataChunk2{
    -    1: required ChunkCompressionMeta chunk_meta; // the metadata of a chunk
    -    2: required bool rowMajor; // whether this chunk is a row chunk or column chunk ? Decide whether this can be replace with counting od columnIDs
    +    1: required ChunkCompressionMeta chunk_meta; // The metadata of a chunk
    +    2: required bool rowMajor; // Whether this chunk is a row chunk or column chunk, Decide whether this can be replace with counting of columnIDs.
    --- End diff --
   
    ok, agree.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [hidden email] or file a JIRA ticket
with INFRA.
---