jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402778148 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala ########## @@ -38,126 +35,141 @@ import org.apache.carbondata.core.locks.{CarbonLockUtil, ICarbonLock, LockUsage} import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, SegmentFileStore} import org.apache.carbondata.core.metadata.schema.table.CarbonTable import org.apache.carbondata.core.mutate.CarbonUpdateUtil -import org.apache.carbondata.core.statusmanager.{FileFormat, SegmentStatus, SegmentStatusManager} +import org.apache.carbondata.core.statusmanager.{FileFormat, LoadMetadataDetails, SegmentStatus, SegmentStatusManager} import org.apache.carbondata.core.util.path.CarbonTablePath import org.apache.carbondata.streaming.segment.StreamSegment object CarbonStore { private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName) - def showSegments( - limit: Option[String], - tablePath: String, - showHistory: Boolean): Seq[Row] = { + def readSegments(tablePath: String, showHistory: Boolean): Array[LoadMetadataDetails] = { val metaFolder = CarbonTablePath.getMetadataPath(tablePath) - val loadMetadataDetailsArray = if (showHistory) { + val allSegments = if (showHistory) { SegmentStatusManager.readLoadMetadata(metaFolder) ++ SegmentStatusManager.readLoadHistoryMetadata(metaFolder) } else { SegmentStatusManager.readLoadMetadata(metaFolder) } + val segments = if (!showHistory) { + allSegments.filter(_.getVisibility.equalsIgnoreCase("true")) + } else { + allSegments + } + segments Review comment: ok, renamed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402778356 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala ########## @@ -38,126 +35,141 @@ import org.apache.carbondata.core.locks.{CarbonLockUtil, ICarbonLock, LockUsage} import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, SegmentFileStore} import org.apache.carbondata.core.metadata.schema.table.CarbonTable import org.apache.carbondata.core.mutate.CarbonUpdateUtil -import org.apache.carbondata.core.statusmanager.{FileFormat, SegmentStatus, SegmentStatusManager} +import org.apache.carbondata.core.statusmanager.{FileFormat, LoadMetadataDetails, SegmentStatus, SegmentStatusManager} import org.apache.carbondata.core.util.path.CarbonTablePath import org.apache.carbondata.streaming.segment.StreamSegment object CarbonStore { private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName) - def showSegments( - limit: Option[String], - tablePath: String, - showHistory: Boolean): Seq[Row] = { + def readSegments(tablePath: String, showHistory: Boolean): Array[LoadMetadataDetails] = { val metaFolder = CarbonTablePath.getMetadataPath(tablePath) - val loadMetadataDetailsArray = if (showHistory) { + val allSegments = if (showHistory) { SegmentStatusManager.readLoadMetadata(metaFolder) ++ SegmentStatusManager.readLoadHistoryMetadata(metaFolder) } else { SegmentStatusManager.readLoadMetadata(metaFolder) } + val segments = if (!showHistory) { + allSegments.filter(_.getVisibility.equalsIgnoreCase("true")) + } else { + allSegments + } + segments + } - if (loadMetadataDetailsArray.nonEmpty) { - var loadMetadataDetailsSortedArray = loadMetadataDetailsArray.sortWith { (l1, l2) => - java.lang.Double.parseDouble(l1.getLoadName) > java.lang.Double.parseDouble(l2.getLoadName) - } - if (!showHistory) { - loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray - .filter(_.getVisibility.equalsIgnoreCase("true")) + def getPartitions(tablePath: String, load: LoadMetadataDetails): Seq[String] = { + val segmentFile = SegmentFileStore.readSegmentFile( + CarbonTablePath.getSegmentFilePath(tablePath, load.getSegmentFile)) + if (segmentFile == null) { + return Seq.empty + } + val locationMap = segmentFile.getLocationMap + if (locationMap != null) { + locationMap.asScala.map { + case (path, detail) => Review comment: fixed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402778705 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala ########## @@ -38,126 +35,141 @@ import org.apache.carbondata.core.locks.{CarbonLockUtil, ICarbonLock, LockUsage} import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, SegmentFileStore} import org.apache.carbondata.core.metadata.schema.table.CarbonTable import org.apache.carbondata.core.mutate.CarbonUpdateUtil -import org.apache.carbondata.core.statusmanager.{FileFormat, SegmentStatus, SegmentStatusManager} +import org.apache.carbondata.core.statusmanager.{FileFormat, LoadMetadataDetails, SegmentStatus, SegmentStatusManager} import org.apache.carbondata.core.util.path.CarbonTablePath import org.apache.carbondata.streaming.segment.StreamSegment object CarbonStore { private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName) - def showSegments( - limit: Option[String], - tablePath: String, - showHistory: Boolean): Seq[Row] = { + def readSegments(tablePath: String, showHistory: Boolean): Array[LoadMetadataDetails] = { val metaFolder = CarbonTablePath.getMetadataPath(tablePath) - val loadMetadataDetailsArray = if (showHistory) { + val allSegments = if (showHistory) { SegmentStatusManager.readLoadMetadata(metaFolder) ++ SegmentStatusManager.readLoadHistoryMetadata(metaFolder) } else { SegmentStatusManager.readLoadMetadata(metaFolder) } + val segments = if (!showHistory) { + allSegments.filter(_.getVisibility.equalsIgnoreCase("true")) + } else { + allSegments + } + segments + } - if (loadMetadataDetailsArray.nonEmpty) { - var loadMetadataDetailsSortedArray = loadMetadataDetailsArray.sortWith { (l1, l2) => - java.lang.Double.parseDouble(l1.getLoadName) > java.lang.Double.parseDouble(l2.getLoadName) - } - if (!showHistory) { - loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray - .filter(_.getVisibility.equalsIgnoreCase("true")) + def getPartitions(tablePath: String, load: LoadMetadataDetails): Seq[String] = { + val segmentFile = SegmentFileStore.readSegmentFile( + CarbonTablePath.getSegmentFilePath(tablePath, load.getSegmentFile)) + if (segmentFile == null) { + return Seq.empty + } + val locationMap = segmentFile.getLocationMap + if (locationMap != null) { + locationMap.asScala.map { + case (path, detail) => + s"{${ detail.getPartitions.asScala.mkString(",") }}" + }.toSeq + } else { + Seq.empty + } + } + + def getMergeTo(load: LoadMetadataDetails): String = { + if (load.getMergedLoadName != null) { + load.getMergedLoadName + } else { + "NA" + } + } + + def getSegmentPath(load: LoadMetadataDetails): String = { + if (StringUtils.isNotEmpty(load.getPath)) { + load.getPath Review comment: fixed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402779432 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala ########## @@ -38,126 +35,141 @@ import org.apache.carbondata.core.locks.{CarbonLockUtil, ICarbonLock, LockUsage} import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, SegmentFileStore} import org.apache.carbondata.core.metadata.schema.table.CarbonTable import org.apache.carbondata.core.mutate.CarbonUpdateUtil -import org.apache.carbondata.core.statusmanager.{FileFormat, SegmentStatus, SegmentStatusManager} +import org.apache.carbondata.core.statusmanager.{FileFormat, LoadMetadataDetails, SegmentStatus, SegmentStatusManager} import org.apache.carbondata.core.util.path.CarbonTablePath import org.apache.carbondata.streaming.segment.StreamSegment object CarbonStore { private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName) - def showSegments( - limit: Option[String], - tablePath: String, - showHistory: Boolean): Seq[Row] = { + def readSegments(tablePath: String, showHistory: Boolean): Array[LoadMetadataDetails] = { val metaFolder = CarbonTablePath.getMetadataPath(tablePath) - val loadMetadataDetailsArray = if (showHistory) { + val allSegments = if (showHistory) { SegmentStatusManager.readLoadMetadata(metaFolder) ++ SegmentStatusManager.readLoadHistoryMetadata(metaFolder) } else { SegmentStatusManager.readLoadMetadata(metaFolder) } + val segments = if (!showHistory) { + allSegments.filter(_.getVisibility.equalsIgnoreCase("true")) + } else { + allSegments + } + segments + } - if (loadMetadataDetailsArray.nonEmpty) { - var loadMetadataDetailsSortedArray = loadMetadataDetailsArray.sortWith { (l1, l2) => - java.lang.Double.parseDouble(l1.getLoadName) > java.lang.Double.parseDouble(l2.getLoadName) - } - if (!showHistory) { - loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray - .filter(_.getVisibility.equalsIgnoreCase("true")) + def getPartitions(tablePath: String, load: LoadMetadataDetails): Seq[String] = { + val segmentFile = SegmentFileStore.readSegmentFile( + CarbonTablePath.getSegmentFilePath(tablePath, load.getSegmentFile)) + if (segmentFile == null) { + return Seq.empty + } + val locationMap = segmentFile.getLocationMap + if (locationMap != null) { + locationMap.asScala.map { + case (path, detail) => + s"{${ detail.getPartitions.asScala.mkString(",") }}" + }.toSeq + } else { + Seq.empty + } + } + + def getMergeTo(load: LoadMetadataDetails): String = { + if (load.getMergedLoadName != null) { + load.getMergedLoadName + } else { + "NA" + } + } + + def getSegmentPath(load: LoadMetadataDetails): String = { + if (StringUtils.isNotEmpty(load.getPath)) { + load.getPath + } else { + "NA" + } + } + + def getLoadStartTime(load: LoadMetadataDetails): String = { + val startTime = + if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { + "NA" + } else { + new java.sql.Timestamp(load.getLoadStartTime).toString } - if (limit.isDefined) { - val limitLoads = limit.get - try { - val lim = Integer.parseInt(limitLoads) - loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray.slice(0, lim) - } catch { - case _: NumberFormatException => - CarbonException.analysisException("Entered limit is not a valid Number") - } + startTime + } + + def getLoadEndTime(load: LoadMetadataDetails): String = { + val endTime = + if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { + "NA" + } else { + new java.sql.Timestamp(load.getLoadEndTime).toString } + endTime + } - loadMetadataDetailsSortedArray - .map { load => - val mergedTo = - if (load.getMergedLoadName != null) { - load.getMergedLoadName - } else { - "NA" - } - - val path = - if (StringUtils.isNotEmpty(load.getPath)) { - load.getPath - } else { - "NA" - } - - val startTime = - if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { - "NA" - } else { - new java.sql.Timestamp(load.getLoadStartTime).toString - } - - val endTime = - if (load.getLoadEndTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { - "NA" - } else { - new java.sql.Timestamp(load.getLoadEndTime).toString - } - - val (dataSize, indexSize) = if (load.getFileFormat.equals(FileFormat.ROW_V1)) { - // for streaming segment, we should get the actual size from the index file - // since it is continuously inserting data - val segmentDir = CarbonTablePath.getSegmentPath(tablePath, load.getLoadName) - val indexPath = CarbonTablePath.getCarbonStreamIndexFilePath(segmentDir) - val indexFile = FileFactory.getCarbonFile(indexPath) - if (indexFile.exists()) { - val indices = - StreamSegment.readIndexFile(indexPath) - (indices.asScala.map(_.getFile_size).sum, indexFile.getSize) - } else { - (-1L, -1L) - } - } else { - // If the added segment is other than carbon segment then we can only display the data - // size and not index size, we can get the data size from table status file directly - if (!load.getFileFormat.isCarbonFormat) { - (if (load.getIndexSize == null) -1L else load.getIndexSize.toLong, -1L) - } else { - (if (load.getDataSize == null) -1L else load.getDataSize.toLong, - if (load.getIndexSize == null) -1L else load.getIndexSize.toLong) - } - } + def getSpentTime(load: LoadMetadataDetails): String = { Review comment: fixed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402779878 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala ########## @@ -38,126 +35,141 @@ import org.apache.carbondata.core.locks.{CarbonLockUtil, ICarbonLock, LockUsage} import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, SegmentFileStore} import org.apache.carbondata.core.metadata.schema.table.CarbonTable import org.apache.carbondata.core.mutate.CarbonUpdateUtil -import org.apache.carbondata.core.statusmanager.{FileFormat, SegmentStatus, SegmentStatusManager} +import org.apache.carbondata.core.statusmanager.{FileFormat, LoadMetadataDetails, SegmentStatus, SegmentStatusManager} import org.apache.carbondata.core.util.path.CarbonTablePath import org.apache.carbondata.streaming.segment.StreamSegment object CarbonStore { private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName) - def showSegments( - limit: Option[String], - tablePath: String, - showHistory: Boolean): Seq[Row] = { + def readSegments(tablePath: String, showHistory: Boolean): Array[LoadMetadataDetails] = { val metaFolder = CarbonTablePath.getMetadataPath(tablePath) - val loadMetadataDetailsArray = if (showHistory) { + val allSegments = if (showHistory) { SegmentStatusManager.readLoadMetadata(metaFolder) ++ SegmentStatusManager.readLoadHistoryMetadata(metaFolder) } else { SegmentStatusManager.readLoadMetadata(metaFolder) } + val segments = if (!showHistory) { + allSegments.filter(_.getVisibility.equalsIgnoreCase("true")) + } else { + allSegments + } + segments + } - if (loadMetadataDetailsArray.nonEmpty) { - var loadMetadataDetailsSortedArray = loadMetadataDetailsArray.sortWith { (l1, l2) => - java.lang.Double.parseDouble(l1.getLoadName) > java.lang.Double.parseDouble(l2.getLoadName) - } - if (!showHistory) { - loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray - .filter(_.getVisibility.equalsIgnoreCase("true")) + def getPartitions(tablePath: String, load: LoadMetadataDetails): Seq[String] = { + val segmentFile = SegmentFileStore.readSegmentFile( + CarbonTablePath.getSegmentFilePath(tablePath, load.getSegmentFile)) + if (segmentFile == null) { + return Seq.empty + } + val locationMap = segmentFile.getLocationMap + if (locationMap != null) { + locationMap.asScala.map { + case (path, detail) => + s"{${ detail.getPartitions.asScala.mkString(",") }}" + }.toSeq + } else { + Seq.empty + } + } + + def getMergeTo(load: LoadMetadataDetails): String = { + if (load.getMergedLoadName != null) { + load.getMergedLoadName + } else { + "NA" + } + } + + def getSegmentPath(load: LoadMetadataDetails): String = { + if (StringUtils.isNotEmpty(load.getPath)) { + load.getPath + } else { + "NA" + } + } + + def getLoadStartTime(load: LoadMetadataDetails): String = { + val startTime = + if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { + "NA" + } else { + new java.sql.Timestamp(load.getLoadStartTime).toString } - if (limit.isDefined) { - val limitLoads = limit.get - try { - val lim = Integer.parseInt(limitLoads) - loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray.slice(0, lim) - } catch { - case _: NumberFormatException => - CarbonException.analysisException("Entered limit is not a valid Number") - } + startTime + } + + def getLoadEndTime(load: LoadMetadataDetails): String = { + val endTime = + if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { + "NA" + } else { + new java.sql.Timestamp(load.getLoadEndTime).toString } + endTime + } - loadMetadataDetailsSortedArray - .map { load => - val mergedTo = - if (load.getMergedLoadName != null) { - load.getMergedLoadName - } else { - "NA" - } - - val path = - if (StringUtils.isNotEmpty(load.getPath)) { - load.getPath - } else { - "NA" - } - - val startTime = - if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { - "NA" - } else { - new java.sql.Timestamp(load.getLoadStartTime).toString - } - - val endTime = - if (load.getLoadEndTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { - "NA" - } else { - new java.sql.Timestamp(load.getLoadEndTime).toString - } - - val (dataSize, indexSize) = if (load.getFileFormat.equals(FileFormat.ROW_V1)) { - // for streaming segment, we should get the actual size from the index file - // since it is continuously inserting data - val segmentDir = CarbonTablePath.getSegmentPath(tablePath, load.getLoadName) - val indexPath = CarbonTablePath.getCarbonStreamIndexFilePath(segmentDir) - val indexFile = FileFactory.getCarbonFile(indexPath) - if (indexFile.exists()) { - val indices = - StreamSegment.readIndexFile(indexPath) - (indices.asScala.map(_.getFile_size).sum, indexFile.getSize) - } else { - (-1L, -1L) - } - } else { - // If the added segment is other than carbon segment then we can only display the data - // size and not index size, we can get the data size from table status file directly - if (!load.getFileFormat.isCarbonFormat) { - (if (load.getIndexSize == null) -1L else load.getIndexSize.toLong, -1L) - } else { - (if (load.getDataSize == null) -1L else load.getDataSize.toLong, - if (load.getIndexSize == null) -1L else load.getIndexSize.toLong) - } - } + def getSpentTime(load: LoadMetadataDetails): String = { + if (load.getLoadEndTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { + "NA" + } else { + Duration.between( + Instant.ofEpochMilli(load.getLoadEndTime), + Instant.ofEpochMilli(load.getLoadStartTime) + ).toString + } + } - if (showHistory) { - Row( - load.getLoadName, - load.getSegmentStatus.getMessage, - startTime, - endTime, - mergedTo, - load.getFileFormat.toString.toUpperCase, - load.getVisibility, - Strings.formatSize(dataSize.toFloat), - Strings.formatSize(indexSize.toFloat), - path) - } else { - Row( - load.getLoadName, - load.getSegmentStatus.getMessage, - startTime, - endTime, - mergedTo, - load.getFileFormat.toString.toUpperCase, - Strings.formatSize(dataSize.toFloat), - Strings.formatSize(indexSize.toFloat), - path) - } - }.toSeq + def getSpentTimeAsMillis(load: LoadMetadataDetails): Long = { Review comment: I think it is simpler to make two functions for the reader ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402779878 ########## File path: integration/spark/src/main/scala/org/apache/carbondata/api/CarbonStore.scala ########## @@ -38,126 +35,141 @@ import org.apache.carbondata.core.locks.{CarbonLockUtil, ICarbonLock, LockUsage} import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, SegmentFileStore} import org.apache.carbondata.core.metadata.schema.table.CarbonTable import org.apache.carbondata.core.mutate.CarbonUpdateUtil -import org.apache.carbondata.core.statusmanager.{FileFormat, SegmentStatus, SegmentStatusManager} +import org.apache.carbondata.core.statusmanager.{FileFormat, LoadMetadataDetails, SegmentStatus, SegmentStatusManager} import org.apache.carbondata.core.util.path.CarbonTablePath import org.apache.carbondata.streaming.segment.StreamSegment object CarbonStore { private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName) - def showSegments( - limit: Option[String], - tablePath: String, - showHistory: Boolean): Seq[Row] = { + def readSegments(tablePath: String, showHistory: Boolean): Array[LoadMetadataDetails] = { val metaFolder = CarbonTablePath.getMetadataPath(tablePath) - val loadMetadataDetailsArray = if (showHistory) { + val allSegments = if (showHistory) { SegmentStatusManager.readLoadMetadata(metaFolder) ++ SegmentStatusManager.readLoadHistoryMetadata(metaFolder) } else { SegmentStatusManager.readLoadMetadata(metaFolder) } + val segments = if (!showHistory) { + allSegments.filter(_.getVisibility.equalsIgnoreCase("true")) + } else { + allSegments + } + segments + } - if (loadMetadataDetailsArray.nonEmpty) { - var loadMetadataDetailsSortedArray = loadMetadataDetailsArray.sortWith { (l1, l2) => - java.lang.Double.parseDouble(l1.getLoadName) > java.lang.Double.parseDouble(l2.getLoadName) - } - if (!showHistory) { - loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray - .filter(_.getVisibility.equalsIgnoreCase("true")) + def getPartitions(tablePath: String, load: LoadMetadataDetails): Seq[String] = { + val segmentFile = SegmentFileStore.readSegmentFile( + CarbonTablePath.getSegmentFilePath(tablePath, load.getSegmentFile)) + if (segmentFile == null) { + return Seq.empty + } + val locationMap = segmentFile.getLocationMap + if (locationMap != null) { + locationMap.asScala.map { + case (path, detail) => + s"{${ detail.getPartitions.asScala.mkString(",") }}" + }.toSeq + } else { + Seq.empty + } + } + + def getMergeTo(load: LoadMetadataDetails): String = { + if (load.getMergedLoadName != null) { + load.getMergedLoadName + } else { + "NA" + } + } + + def getSegmentPath(load: LoadMetadataDetails): String = { + if (StringUtils.isNotEmpty(load.getPath)) { + load.getPath + } else { + "NA" + } + } + + def getLoadStartTime(load: LoadMetadataDetails): String = { + val startTime = + if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { + "NA" + } else { + new java.sql.Timestamp(load.getLoadStartTime).toString } - if (limit.isDefined) { - val limitLoads = limit.get - try { - val lim = Integer.parseInt(limitLoads) - loadMetadataDetailsSortedArray = loadMetadataDetailsSortedArray.slice(0, lim) - } catch { - case _: NumberFormatException => - CarbonException.analysisException("Entered limit is not a valid Number") - } + startTime + } + + def getLoadEndTime(load: LoadMetadataDetails): String = { + val endTime = + if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { + "NA" + } else { + new java.sql.Timestamp(load.getLoadEndTime).toString } + endTime + } - loadMetadataDetailsSortedArray - .map { load => - val mergedTo = - if (load.getMergedLoadName != null) { - load.getMergedLoadName - } else { - "NA" - } - - val path = - if (StringUtils.isNotEmpty(load.getPath)) { - load.getPath - } else { - "NA" - } - - val startTime = - if (load.getLoadStartTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { - "NA" - } else { - new java.sql.Timestamp(load.getLoadStartTime).toString - } - - val endTime = - if (load.getLoadEndTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { - "NA" - } else { - new java.sql.Timestamp(load.getLoadEndTime).toString - } - - val (dataSize, indexSize) = if (load.getFileFormat.equals(FileFormat.ROW_V1)) { - // for streaming segment, we should get the actual size from the index file - // since it is continuously inserting data - val segmentDir = CarbonTablePath.getSegmentPath(tablePath, load.getLoadName) - val indexPath = CarbonTablePath.getCarbonStreamIndexFilePath(segmentDir) - val indexFile = FileFactory.getCarbonFile(indexPath) - if (indexFile.exists()) { - val indices = - StreamSegment.readIndexFile(indexPath) - (indices.asScala.map(_.getFile_size).sum, indexFile.getSize) - } else { - (-1L, -1L) - } - } else { - // If the added segment is other than carbon segment then we can only display the data - // size and not index size, we can get the data size from table status file directly - if (!load.getFileFormat.isCarbonFormat) { - (if (load.getIndexSize == null) -1L else load.getIndexSize.toLong, -1L) - } else { - (if (load.getDataSize == null) -1L else load.getDataSize.toLong, - if (load.getIndexSize == null) -1L else load.getIndexSize.toLong) - } - } + def getSpentTime(load: LoadMetadataDetails): String = { + if (load.getLoadEndTime == CarbonCommonConstants.SEGMENT_LOAD_TIME_DEFAULT) { + "NA" + } else { + Duration.between( + Instant.ofEpochMilli(load.getLoadEndTime), + Instant.ofEpochMilli(load.getLoadStartTime) + ).toString + } + } - if (showHistory) { - Row( - load.getLoadName, - load.getSegmentStatus.getMessage, - startTime, - endTime, - mergedTo, - load.getFileFormat.toString.toUpperCase, - load.getVisibility, - Strings.formatSize(dataSize.toFloat), - Strings.formatSize(indexSize.toFloat), - path) - } else { - Row( - load.getLoadName, - load.getSegmentStatus.getMessage, - startTime, - endTime, - mergedTo, - load.getFileFormat.toString.toUpperCase, - Strings.formatSize(dataSize.toFloat), - Strings.formatSize(indexSize.toFloat), - path) - } - }.toSeq + def getSpentTimeAsMillis(load: LoadMetadataDetails): Long = { Review comment: I think it is simpler to make two functions for the reader. This two functions are small only ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402788850 ########## File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsByQueryCommand.scala ########## @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.management + +import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SparkSession} +import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} +import org.apache.spark.sql.execution.command.{Checker, DataCommand} + +import org.apache.carbondata.api.CarbonStore +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.schema.table.CarbonTable +import org.apache.carbondata.core.statusmanager.LoadMetadataDetails + +case class SegmentRow( + id: String, status: String, loadStartTime: String, spentTimeMs: Long, partitions: Seq[String], + dataSize: Long, indexSize: Long, mergedToId: String, format: String, path: String, + loadEndTime: String, segmentFileName: String) + +case class CarbonShowSegmentsByQueryCommand( + databaseNameOp: Option[String], + tableName: String, + query: String, + showHistory: Boolean = false) + extends DataCommand { + + private lazy val df = createDataFrame + + override def output: Seq[Attribute] = { + df.queryExecution.analyzed.output.map { attr => + AttributeReference(attr.name, attr.dataType, nullable = false)() + } + } + + override def processData(sparkSession: SparkSession): Seq[Row] = { + Checker.validateTableExists(databaseNameOp, tableName, sparkSession) + val carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName)(sparkSession) + setAuditTable(carbonTable) + if (!carbonTable.getTableInfo.isTransactionalTable) { + throw new MalformedCarbonCommandException("Unsupported operation on non transactional table") + } + try { + df.collect() + } catch { + case ex: Throwable => + throw new MalformedCarbonCommandException("failed to run query: " + ex.getMessage) + } finally { + sparkSession.catalog.dropTempView(makeTempViewName(carbonTable)) + } + } + + override protected def opName: String = "SHOW SEGMENTS" + + private def createDataFrame: DataFrame = { + val sparkSession = SparkSession.getActiveSession.get + val carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName)(sparkSession) + val tablePath = carbonTable.getTablePath + val segments = CarbonStore.readSegments(tablePath, showHistory) + val tempViewName = makeTempViewName(carbonTable) + registerSegmentRowView(sparkSession, tempViewName, carbonTable, segments) + try { + sparkSession.sql(query) + } catch { + case t: Throwable => + sparkSession.catalog.dropTempView(tempViewName) + throw t + } + } + + /** + * Generate temp view name for the query to execute + */ + private def makeTempViewName(carbonTable: CarbonTable): String = { + s"${carbonTable.getTableName}_segments" + } + + private def registerSegmentRowView( + sparkSession: SparkSession, + tempViewName: String, + carbonTable: CarbonTable, + segments: Array[LoadMetadataDetails]): Unit = { + + // populate a dataframe containing all segment information + val tablePath = carbonTable.getTablePath + val segmentRows = segments.toSeq.map { segment => + val mergedToId = CarbonStore.getMergeTo(segment) + val path = CarbonStore.getSegmentPath(segment) + val startTime = CarbonStore.getLoadStartTime(segment) + val endTime = CarbonStore.getLoadEndTime(segment) + val spentTime = CarbonStore.getSpentTimeAsMillis(segment) + val (dataSize, indexSize) = CarbonStore.getDataAndIndexSize(tablePath, segment) + val partitions = CarbonStore.getPartitions(tablePath, segment) + SegmentRow( + segment.getLoadName, + segment.getSegmentStatus.toString, + startTime, + spentTime, + partitions, + dataSize, + indexSize, + mergedToId, + segment.getFileFormat.toString, + path, + endTime, + if (segment.getSegmentFile == null) "NA" else segment.getSegmentFile) + } + + // create a temp view using the populated dataframe and execute the query on it + val df = sparkSession.createDataFrame(segmentRows) + checkIfTableExist(sparkSession, tempViewName) + df.createOrReplaceTempView(tempViewName) + } + + private def checkIfTableExist(sparkSession: SparkSession, tempViewName: String): Unit = { + if (sparkSession.catalog.tableExists(tempViewName)) { + throw new MalformedCarbonCommandException(s"$tempViewName already exists, " + Review comment: I think it is better to fail it since user may depends on the result to do something programatically. If we return something not expected, then the behavior is hard to understand ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402791833 ########## File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsByQueryCommand.scala ########## @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.management + +import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SparkSession} +import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} +import org.apache.spark.sql.execution.command.{Checker, DataCommand} + +import org.apache.carbondata.api.CarbonStore +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.schema.table.CarbonTable +import org.apache.carbondata.core.statusmanager.LoadMetadataDetails + +case class SegmentRow( + id: String, status: String, loadStartTime: String, spentTimeMs: Long, partitions: Seq[String], + dataSize: Long, indexSize: Long, mergedToId: String, format: String, path: String, + loadEndTime: String, segmentFileName: String) + +case class CarbonShowSegmentsByQueryCommand( + databaseNameOp: Option[String], + tableName: String, + query: String, + showHistory: Boolean = false) + extends DataCommand { + + private lazy val df = createDataFrame + + override def output: Seq[Attribute] = { + df.queryExecution.analyzed.output.map { attr => + AttributeReference(attr.name, attr.dataType, nullable = false)() + } + } + + override def processData(sparkSession: SparkSession): Seq[Row] = { + Checker.validateTableExists(databaseNameOp, tableName, sparkSession) + val carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName)(sparkSession) + setAuditTable(carbonTable) + if (!carbonTable.getTableInfo.isTransactionalTable) { + throw new MalformedCarbonCommandException("Unsupported operation on non transactional table") + } + try { + df.collect() Review comment: fixed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402793977 ########## File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsCommand.scala ########## @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.management + +import org.apache.spark.sql.{CarbonEnv, Row, SparkSession} +import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} +import org.apache.spark.sql.execution.command.{Checker, DataCommand} +import org.apache.spark.sql.types.StringType + +import org.apache.carbondata.api.CarbonStore.{getDataAndIndexSize, getLoadStartTime, getPartitions, getSpentTime, readSegments} +import org.apache.carbondata.common.Strings +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.statusmanager.LoadMetadataDetails + +case class CarbonShowSegmentsCommand( + databaseNameOp: Option[String], + tableName: String, + showHistory: Boolean = false) + extends DataCommand { + + // add new columns of show segments at last + override def output: Seq[Attribute] = { + Seq( + AttributeReference("ID", StringType, nullable = false)(), + AttributeReference("Status", StringType, nullable = false)(), + AttributeReference("Load Start Time", StringType, nullable = false)(), + AttributeReference("Spent", StringType, nullable = true)(), Review comment: changed to "Load Time Taken" ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402794462 ########## File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsCommand.scala ########## @@ -0,0 +1,97 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.management + +import org.apache.spark.sql.{CarbonEnv, Row, SparkSession} +import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} +import org.apache.spark.sql.execution.command.{Checker, DataCommand} +import org.apache.spark.sql.types.StringType + +import org.apache.carbondata.api.CarbonStore.{getDataAndIndexSize, getLoadStartTime, getPartitions, getSpentTime, readSegments} +import org.apache.carbondata.common.Strings +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.statusmanager.LoadMetadataDetails + +case class CarbonShowSegmentsCommand( + databaseNameOp: Option[String], + tableName: String, + showHistory: Boolean = false) + extends DataCommand { + + // add new columns of show segments at last + override def output: Seq[Attribute] = { + Seq( + AttributeReference("ID", StringType, nullable = false)(), + AttributeReference("Status", StringType, nullable = false)(), + AttributeReference("Load Start Time", StringType, nullable = false)(), + AttributeReference("Spent", StringType, nullable = true)(), + AttributeReference("Partition", StringType, nullable = true)(), + AttributeReference("Data Size", StringType, nullable = false)(), + AttributeReference("Index Size", StringType, nullable = false)()) + } + + override def processData(sparkSession: SparkSession): Seq[Row] = { + Checker.validateTableExists(databaseNameOp, tableName, sparkSession) + val carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName)(sparkSession) + setAuditTable(carbonTable) + if (!carbonTable.getTableInfo.isTransactionalTable) { + throw new MalformedCarbonCommandException("Unsupported operation on non transactional table") + } + val tablePath = carbonTable.getTablePath + val segments = readSegments(tablePath, showHistory) + if (segments.nonEmpty) { + showBasic(segments, tablePath) + } else { + Seq.empty + } + } + + override protected def opName: String = "SHOW SEGMENTS" + + private def showBasic( + allSegments: Array[LoadMetadataDetails], + tablePath: String): Seq[Row] = { + val segments = allSegments.sortWith { (l1, l2) => + java.lang.Double.parseDouble(l1.getLoadName) > + java.lang.Double.parseDouble(l2.getLoadName) + } + + segments + .map { segment => + val startTime = getLoadStartTime(segment) + val spentTime = getSpentTime(segment) Review comment: fixed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402795062 ########## File path: integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/segment/ShowSegmentTestCase.scala ########## @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.spark.testsuite.segment + +import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row} +import org.apache.spark.sql.test.util.QueryTest +import org.scalatest.BeforeAndAfterAll + +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.CarbonMetadata +import org.apache.carbondata.core.statusmanager.SegmentStatusManager + +/** + * Test Class for SHOW SEGMENTS command + */ +class ShowSegmentTestCase extends QueryTest with BeforeAndAfterAll { + + test("verify success case") { + sql("drop table if exists source") + sql( + """ + |create table source (age int) + |STORED AS carbondata + |partitioned by (name string, class string) + |TBLPROPERTIES('AUTO_LOAD_MERGE'='true','COMPACTION_LEVEL_THRESHOLD'='2,2') + |""".stripMargin) + sql("insert into source select 1, 'abc1', 'classA'") + sql("insert into source select 2, 'abc2', 'classB'") + sql("insert into source select 3, 'abc3', 'classA'") + sql("insert into source select 4, 'abc4', 'classB'") + sql("insert into source select 5, 'abc5', 'classA'") + sql("insert into source select 6, 'abc6', 'classC'") + sql("show segments on source").show(false) + + val df = sql(s"""show segments on source""").collect() + // validating headers + val header = df(0).schema + assert(header(0).name.equalsIgnoreCase("ID")) + assert(header(1).name.equalsIgnoreCase("Status")) + assert(header(2).name.equalsIgnoreCase("Load Start Time")) + assert(header(3).name.equalsIgnoreCase("Spent")) + assert(header(4).name.equalsIgnoreCase("Partition")) + assert(header(5).name.equalsIgnoreCase("Data Size")) + assert(header(6).name.equalsIgnoreCase("Index Size")) + val col = df + .map(row => Row(row.getString(0), row.getString(1))) + .filter(_.getString(1).equals("Success")) + .toSeq + assert(col.equals(Seq(Row("4.1", "Success"), Row("0.2", "Success")))) + + sql( Review comment: removed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402795271 ########## File path: integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/segment/ShowSegmentTestCase.scala ########## @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.spark.testsuite.segment + +import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row} +import org.apache.spark.sql.test.util.QueryTest +import org.scalatest.BeforeAndAfterAll + +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.CarbonMetadata +import org.apache.carbondata.core.statusmanager.SegmentStatusManager + +/** + * Test Class for SHOW SEGMENTS command + */ +class ShowSegmentTestCase extends QueryTest with BeforeAndAfterAll { + + test("verify success case") { + sql("drop table if exists source") + sql( + """ + |create table source (age int) + |STORED AS carbondata + |partitioned by (name string, class string) + |TBLPROPERTIES('AUTO_LOAD_MERGE'='true','COMPACTION_LEVEL_THRESHOLD'='2,2') + |""".stripMargin) + sql("insert into source select 1, 'abc1', 'classA'") + sql("insert into source select 2, 'abc2', 'classB'") + sql("insert into source select 3, 'abc3', 'classA'") + sql("insert into source select 4, 'abc4', 'classB'") + sql("insert into source select 5, 'abc5', 'classA'") + sql("insert into source select 6, 'abc6', 'classC'") + sql("show segments on source").show(false) + + val df = sql(s"""show segments on source""").collect() + // validating headers + val header = df(0).schema + assert(header(0).name.equalsIgnoreCase("ID")) + assert(header(1).name.equalsIgnoreCase("Status")) + assert(header(2).name.equalsIgnoreCase("Load Start Time")) + assert(header(3).name.equalsIgnoreCase("Spent")) + assert(header(4).name.equalsIgnoreCase("Partition")) + assert(header(5).name.equalsIgnoreCase("Data Size")) + assert(header(6).name.equalsIgnoreCase("Index Size")) + val col = df + .map(row => Row(row.getString(0), row.getString(1))) + .filter(_.getString(1).equals("Success")) + .toSeq + assert(col.equals(Seq(Row("4.1", "Success"), Row("0.2", "Success")))) + + sql( + """ + | show segments on source as + | select id, status, datasize from source_segments where status = 'Success' order by dataSize + |""".stripMargin).show(false) + + val rows = sql( + """ + | show segments on source as + | select id, status, datasize from source_segments where status = 'Success' order by dataSize + |""".stripMargin).collect() + + assertResult("4.1")(rows(0).get(0)) + assertResult("Success")(rows(0).get(1)) + assertResult(1762)(rows(0).get(2)) Review comment: removed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402795492 ########## File path: integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/segment/ShowSegmentTestCase.scala ########## @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.spark.testsuite.segment + +import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row} +import org.apache.spark.sql.test.util.QueryTest +import org.scalatest.BeforeAndAfterAll + +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.CarbonMetadata +import org.apache.carbondata.core.statusmanager.SegmentStatusManager + +/** + * Test Class for SHOW SEGMENTS command + */ +class ShowSegmentTestCase extends QueryTest with BeforeAndAfterAll { + + test("verify success case") { + sql("drop table if exists source") + sql( + """ + |create table source (age int) + |STORED AS carbondata + |partitioned by (name string, class string) + |TBLPROPERTIES('AUTO_LOAD_MERGE'='true','COMPACTION_LEVEL_THRESHOLD'='2,2') + |""".stripMargin) + sql("insert into source select 1, 'abc1', 'classA'") + sql("insert into source select 2, 'abc2', 'classB'") + sql("insert into source select 3, 'abc3', 'classA'") + sql("insert into source select 4, 'abc4', 'classB'") + sql("insert into source select 5, 'abc5', 'classA'") + sql("insert into source select 6, 'abc6', 'classC'") + sql("show segments on source").show(false) + + val df = sql(s"""show segments on source""").collect() + // validating headers + val header = df(0).schema + assert(header(0).name.equalsIgnoreCase("ID")) + assert(header(1).name.equalsIgnoreCase("Status")) + assert(header(2).name.equalsIgnoreCase("Load Start Time")) + assert(header(3).name.equalsIgnoreCase("Spent")) + assert(header(4).name.equalsIgnoreCase("Partition")) + assert(header(5).name.equalsIgnoreCase("Data Size")) + assert(header(6).name.equalsIgnoreCase("Index Size")) + val col = df + .map(row => Row(row.getString(0), row.getString(1))) + .filter(_.getString(1).equals("Success")) + .toSeq + assert(col.equals(Seq(Row("4.1", "Success"), Row("0.2", "Success")))) + + sql( + """ + | show segments on source as + | select id, status, datasize from source_segments where status = 'Success' order by dataSize + |""".stripMargin).show(false) + + val rows = sql( + """ + | show segments on source as + | select id, status, datasize from source_segments where status = 'Success' order by dataSize + |""".stripMargin).collect() + + assertResult("4.1")(rows(0).get(0)) + assertResult("Success")(rows(0).get(1)) + assertResult(1762)(rows(0).get(2)) + assertResult("0.2")(rows(1).get(0)) + assertResult("Success")(rows(1).get(1)) + assertResult(3524)(rows(1).get(2)) + + val tables = sql("show tables").collect() + assert(!tables.toSeq.exists(_.get(1).equals("source_segments"))) + + sql(s"""drop table source""").collect + } + + test("Show Segments on empty table") { + sql(s"""drop TABLE if exists source""").collect + sql(s"""CREATE TABLE source (CUST_ID int,CUST_NAME String,ACTIVE_EMUI_VERSION string,DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),Double_COLUMN1 double,DECIMAL_COLUMN2 decimal(36,10), Double_COLUMN2 double,INTEGER_COLUMN1 int) STORED AS carbondata TBLPROPERTIES('table_blocksize'='1')""").collect + checkAnswer(sql("show segments on source"), Seq.empty) + val result = sql("show segments on source as select * from source_segments").collect() + assertResult(0)(result.length) + } + + test("can not show segments on exist table") { Review comment: fixed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402795773 ########## File path: integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/segment/ShowSegmentTestCase.scala ########## @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.spark.testsuite.segment + +import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row} +import org.apache.spark.sql.test.util.QueryTest +import org.scalatest.BeforeAndAfterAll + +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.CarbonMetadata +import org.apache.carbondata.core.statusmanager.SegmentStatusManager + +/** + * Test Class for SHOW SEGMENTS command + */ +class ShowSegmentTestCase extends QueryTest with BeforeAndAfterAll { + + test("verify success case") { + sql("drop table if exists source") + sql( + """ + |create table source (age int) + |STORED AS carbondata + |partitioned by (name string, class string) + |TBLPROPERTIES('AUTO_LOAD_MERGE'='true','COMPACTION_LEVEL_THRESHOLD'='2,2') + |""".stripMargin) + sql("insert into source select 1, 'abc1', 'classA'") + sql("insert into source select 2, 'abc2', 'classB'") + sql("insert into source select 3, 'abc3', 'classA'") + sql("insert into source select 4, 'abc4', 'classB'") + sql("insert into source select 5, 'abc5', 'classA'") + sql("insert into source select 6, 'abc6', 'classC'") + sql("show segments on source").show(false) + + val df = sql(s"""show segments on source""").collect() + // validating headers + val header = df(0).schema + assert(header(0).name.equalsIgnoreCase("ID")) + assert(header(1).name.equalsIgnoreCase("Status")) + assert(header(2).name.equalsIgnoreCase("Load Start Time")) + assert(header(3).name.equalsIgnoreCase("Spent")) + assert(header(4).name.equalsIgnoreCase("Partition")) + assert(header(5).name.equalsIgnoreCase("Data Size")) + assert(header(6).name.equalsIgnoreCase("Index Size")) + val col = df + .map(row => Row(row.getString(0), row.getString(1))) + .filter(_.getString(1).equals("Success")) + .toSeq + assert(col.equals(Seq(Row("4.1", "Success"), Row("0.2", "Success")))) + + sql( + """ + | show segments on source as + | select id, status, datasize from source_segments where status = 'Success' order by dataSize + |""".stripMargin).show(false) + + val rows = sql( + """ + | show segments on source as + | select id, status, datasize from source_segments where status = 'Success' order by dataSize + |""".stripMargin).collect() + + assertResult("4.1")(rows(0).get(0)) + assertResult("Success")(rows(0).get(1)) + assertResult(1762)(rows(0).get(2)) + assertResult("0.2")(rows(1).get(0)) + assertResult("Success")(rows(1).get(1)) + assertResult(3524)(rows(1).get(2)) + + val tables = sql("show tables").collect() + assert(!tables.toSeq.exists(_.get(1).equals("source_segments"))) + + sql(s"""drop table source""").collect + } + + test("Show Segments on empty table") { + sql(s"""drop TABLE if exists source""").collect + sql(s"""CREATE TABLE source (CUST_ID int,CUST_NAME String,ACTIVE_EMUI_VERSION string,DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),Double_COLUMN1 double,DECIMAL_COLUMN2 decimal(36,10), Double_COLUMN2 double,INTEGER_COLUMN1 int) STORED AS carbondata TBLPROPERTIES('table_blocksize'='1')""").collect + checkAnswer(sql("show segments on source"), Seq.empty) + val result = sql("show segments on source as select * from source_segments").collect() + assertResult(0)(result.length) + } + + test("can not show segments on exist table") { + sql("drop TABLE if exists source").collect + sql( + """ + |create table source (age int, name string, class string) + |STORED AS carbondata + |""".stripMargin) + sql("insert into source select 1, 'abc1', 'classA'") + sql("drop table if exists source_segments") + sql("create table source_segments (age int)") + val ex = intercept[MalformedCarbonCommandException](sql("show segments on source as select * from source_segments")) + assert(ex.getMessage.contains("source_segments already exists")) + sql("drop TABLE if exists source") + sql("drop table if exists source_segments") + } + + test("can not show segments by wrong query") { Review comment: fixed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402796411 ########## File path: integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/segment/ShowSegmentTestCase.scala ########## @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.spark.testsuite.segment + +import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row} +import org.apache.spark.sql.test.util.QueryTest +import org.scalatest.BeforeAndAfterAll + +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.CarbonMetadata +import org.apache.carbondata.core.statusmanager.SegmentStatusManager + +/** + * Test Class for SHOW SEGMENTS command + */ +class ShowSegmentTestCase extends QueryTest with BeforeAndAfterAll { + + test("verify success case") { + sql("drop table if exists source") + sql( + """ + |create table source (age int) + |STORED AS carbondata + |partitioned by (name string, class string) + |TBLPROPERTIES('AUTO_LOAD_MERGE'='true','COMPACTION_LEVEL_THRESHOLD'='2,2') + |""".stripMargin) + sql("insert into source select 1, 'abc1', 'classA'") + sql("insert into source select 2, 'abc2', 'classB'") + sql("insert into source select 3, 'abc3', 'classA'") + sql("insert into source select 4, 'abc4', 'classB'") + sql("insert into source select 5, 'abc5', 'classA'") + sql("insert into source select 6, 'abc6', 'classC'") + sql("show segments on source").show(false) + + val df = sql(s"""show segments on source""").collect() + // validating headers + val header = df(0).schema + assert(header(0).name.equalsIgnoreCase("ID")) + assert(header(1).name.equalsIgnoreCase("Status")) + assert(header(2).name.equalsIgnoreCase("Load Start Time")) + assert(header(3).name.equalsIgnoreCase("Spent")) + assert(header(4).name.equalsIgnoreCase("Partition")) + assert(header(5).name.equalsIgnoreCase("Data Size")) + assert(header(6).name.equalsIgnoreCase("Index Size")) + val col = df + .map(row => Row(row.getString(0), row.getString(1))) + .filter(_.getString(1).equals("Success")) + .toSeq + assert(col.equals(Seq(Row("4.1", "Success"), Row("0.2", "Success")))) + + sql( + """ + | show segments on source as + | select id, status, datasize from source_segments where status = 'Success' order by dataSize + |""".stripMargin).show(false) + + val rows = sql( + """ + | show segments on source as + | select id, status, datasize from source_segments where status = 'Success' order by dataSize + |""".stripMargin).collect() + + assertResult("4.1")(rows(0).get(0)) + assertResult("Success")(rows(0).get(1)) + assertResult(1762)(rows(0).get(2)) + assertResult("0.2")(rows(1).get(0)) + assertResult("Success")(rows(1).get(1)) + assertResult(3524)(rows(1).get(2)) + + val tables = sql("show tables").collect() + assert(!tables.toSeq.exists(_.get(1).equals("source_segments"))) + + sql(s"""drop table source""").collect + } + + test("Show Segments on empty table") { + sql(s"""drop TABLE if exists source""").collect + sql(s"""CREATE TABLE source (CUST_ID int,CUST_NAME String,ACTIVE_EMUI_VERSION string,DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),Double_COLUMN1 double,DECIMAL_COLUMN2 decimal(36,10), Double_COLUMN2 double,INTEGER_COLUMN1 int) STORED AS carbondata TBLPROPERTIES('table_blocksize'='1')""").collect + checkAnswer(sql("show segments on source"), Seq.empty) + val result = sql("show segments on source as select * from source_segments").collect() + assertResult(0)(result.length) + } + + test("can not show segments on exist table") { + sql("drop TABLE if exists source").collect + sql( + """ + |create table source (age int, name string, class string) + |STORED AS carbondata + |""".stripMargin) + sql("insert into source select 1, 'abc1', 'classA'") + sql("drop table if exists source_segments") + sql("create table source_segments (age int)") + val ex = intercept[MalformedCarbonCommandException](sql("show segments on source as select * from source_segments")) + assert(ex.getMessage.contains("source_segments already exists")) + sql("drop TABLE if exists source") + sql("drop table if exists source_segments") + } + + test("can not show segments by wrong query") { + sql("drop TABLE if exists source").collect + sql( + """ + |create table source (age int, name string, class string) + |STORED AS carbondata + |""".stripMargin) + sql("insert into source select 1, 'abc1', 'classA'") + val ex = intercept[AnalysisException](sql("show segments on source as select dsjk from source_segments")) + val tables = sql("show tables").collect() + assert(!tables.toSeq.exists(_.get(1).equals("source_segments"))) + sql("drop TABLE if exists source") + } + + //Show Segments failing if table name not in same case + test("DataLoadManagement001_830") { + sql(s"""drop TABLE if exists Case_ShowSegment_196""").collect + sql(s"""CREATE TABLE Case_ShowSegment_196 (CUST_ID int,CUST_NAME String,ACTIVE_EMUI_VERSION string,DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10),Double_COLUMN1 double,DECIMAL_COLUMN2 decimal(36,10), Double_COLUMN2 double,INTEGER_COLUMN1 int) STORED AS carbondata TBLPROPERTIES('table_blocksize'='1')""").collect + val df = sql(s"""show segments on default.CASE_ShowSegment_196""").collect() + val col = df.map { + row => Row(row.getString(0), row.getString(1), row.getString(4)) + }.toSeq + assert(col.equals(Seq())) + sql(s"""drop table Case_ShowSegment_196""").collect + } + + test("separate visible and invisible segments info into two files") { + val tableName = "test_tablestatus_history" + sql(s"drop table if exists ${tableName}") + sql(s"create table ${tableName} (name String, age int) STORED AS carbondata " + + "TBLPROPERTIES('AUTO_LOAD_MERGE'='true','COMPACTION_LEVEL_THRESHOLD'='2,2')") + val carbonTable = CarbonEnv.getCarbonTable(Some("default"), tableName)(sqlContext.sparkSession) + sql(s"insert into ${tableName} select 'abc1',1") + sql(s"insert into ${tableName} select 'abc2',2") + sql(s"insert into ${tableName} select 'abc3',3") + sql(s"insert into ${tableName} select 'abc4',4") + sql(s"insert into ${tableName} select 'abc5',5") + sql(s"insert into ${tableName} select 'abc6',6") + assert(sql(s"show segments on ${tableName} as select * from ${tableName}_segments").collect().length == 10) + var detail = SegmentStatusManager.readLoadMetadata(carbonTable.getMetadataPath) + var historyDetail = SegmentStatusManager.readLoadHistoryMetadata(carbonTable.getMetadataPath) + assert(detail.length == 10) + assert(historyDetail.length == 0) + sql(s"clean files for table ${tableName}") + assert(sql(s"show segments on ${tableName}").collect().length == 2) + detail = SegmentStatusManager.readLoadMetadata(carbonTable.getMetadataPath) + historyDetail = SegmentStatusManager.readLoadHistoryMetadata(carbonTable.getMetadataPath) + assert(detail.length == 4) + assert(historyDetail.length == 6) + dropTable(tableName) + } + + test("show history segments") { + val tableName = "test_tablestatus_history" + sql(s"drop table if exists ${tableName}") + sql(s"create table ${tableName} (name String, age int) STORED AS carbondata " + + "TBLPROPERTIES('AUTO_LOAD_MERGE'='true','COMPACTION_LEVEL_THRESHOLD'='2,2')") + val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default", tableName) + sql(s"insert into ${tableName} select 'abc1',1") Review comment: fixed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402796976 ########## File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsByQueryCommand.scala ########## @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.management + +import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SparkSession} +import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} +import org.apache.spark.sql.execution.command.{Checker, DataCommand} + +import org.apache.carbondata.api.CarbonStore +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.schema.table.CarbonTable +import org.apache.carbondata.core.statusmanager.LoadMetadataDetails + +case class SegmentRow( + id: String, status: String, loadStartTime: String, spentTimeMs: Long, partitions: Seq[String], + dataSize: Long, indexSize: Long, mergedToId: String, format: String, path: String, + loadEndTime: String, segmentFileName: String) + +case class CarbonShowSegmentsByQueryCommand( + databaseNameOp: Option[String], + tableName: String, + query: String, + showHistory: Boolean = false) + extends DataCommand { + + private lazy val df = createDataFrame + + override def output: Seq[Attribute] = { + df.queryExecution.analyzed.output.map { attr => + AttributeReference(attr.name, attr.dataType, nullable = false)() + } + } + + override def processData(sparkSession: SparkSession): Seq[Row] = { + Checker.validateTableExists(databaseNameOp, tableName, sparkSession) + val carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName)(sparkSession) + setAuditTable(carbonTable) + if (!carbonTable.getTableInfo.isTransactionalTable) { + throw new MalformedCarbonCommandException("Unsupported operation on non transactional table") + } + try { + df.collect() + } catch { + case ex: Throwable => + throw new MalformedCarbonCommandException("failed to run query: " + ex.getMessage) + } finally { + sparkSession.catalog.dropTempView(makeTempViewName(carbonTable)) + } + } + + override protected def opName: String = "SHOW SEGMENTS" + + private def createDataFrame: DataFrame = { + val sparkSession = SparkSession.getActiveSession.get + val carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName)(sparkSession) + val tablePath = carbonTable.getTablePath + val segments = CarbonStore.readSegments(tablePath, showHistory) + val tempViewName = makeTempViewName(carbonTable) + registerSegmentRowView(sparkSession, tempViewName, carbonTable, segments) + try { + sparkSession.sql(query) + } catch { + case t: Throwable => + sparkSession.catalog.dropTempView(tempViewName) + throw t + } + } + + /** + * Generate temp view name for the query to execute + */ + private def makeTempViewName(carbonTable: CarbonTable): String = { + s"${carbonTable.getTableName}_segments" + } + + private def registerSegmentRowView( + sparkSession: SparkSession, + tempViewName: String, + carbonTable: CarbonTable, + segments: Array[LoadMetadataDetails]): Unit = { + + // populate a dataframe containing all segment information + val tablePath = carbonTable.getTablePath + val segmentRows = segments.toSeq.map { segment => + val mergedToId = CarbonStore.getMergeTo(segment) + val path = CarbonStore.getSegmentPath(segment) + val startTime = CarbonStore.getLoadStartTime(segment) + val endTime = CarbonStore.getLoadEndTime(segment) + val spentTime = CarbonStore.getSpentTimeAsMillis(segment) + val (dataSize, indexSize) = CarbonStore.getDataAndIndexSize(tablePath, segment) + val partitions = CarbonStore.getPartitions(tablePath, segment) + SegmentRow( + segment.getLoadName, + segment.getSegmentStatus.toString, + startTime, + spentTime, + partitions, + dataSize, + indexSize, + mergedToId, + segment.getFileFormat.toString, + path, + endTime, + if (segment.getSegmentFile == null) "NA" else segment.getSegmentFile) + } + + // create a temp view using the populated dataframe and execute the query on it + val df = sparkSession.createDataFrame(segmentRows) + checkIfTableExist(sparkSession, tempViewName) Review comment: fixed ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402797613 ########## File path: integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonShowSegmentsByQueryCommand.scala ########## @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.execution.command.management + +import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SparkSession} +import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} +import org.apache.spark.sql.execution.command.{Checker, DataCommand} + +import org.apache.carbondata.api.CarbonStore +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.schema.table.CarbonTable +import org.apache.carbondata.core.statusmanager.LoadMetadataDetails + +case class SegmentRow( + id: String, status: String, loadStartTime: String, spentTimeMs: Long, partitions: Seq[String], + dataSize: Long, indexSize: Long, mergedToId: String, format: String, path: String, + loadEndTime: String, segmentFileName: String) + +case class CarbonShowSegmentsByQueryCommand( Review comment: Since their output schema is different, it is better to keep as two command ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
jackylk commented on a change in pull request #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#discussion_r402805135 ########## File path: integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/segment/ShowSegmentTestCase.scala ########## @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.carbondata.spark.testsuite.segment + +import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row} +import org.apache.spark.sql.test.util.QueryTest +import org.scalatest.BeforeAndAfterAll + +import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException +import org.apache.carbondata.core.metadata.CarbonMetadata +import org.apache.carbondata.core.statusmanager.SegmentStatusManager + +/** + * Test Class for SHOW SEGMENTS command + */ +class ShowSegmentTestCase extends QueryTest with BeforeAndAfterAll { Review comment: I verified user can do ``` show segments on source as select id, second(loadStartTime), second(loadEndTime) from source_segments ``` But I can not put in testcase since load time is different when CI runs ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
CarbonDataQA1 commented on issue #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#issuecomment-608361345 Build Success with Spark 2.4.5, Please check CI http://121.244.95.60:12545/job/ApacheCarbon_PR_Builder_2.4.5/918/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
In reply to this post by GitBox
CarbonDataQA1 commented on issue #3657: [CARBONDATA-3736] Support show segment by query
URL: https://github.com/apache/carbondata/pull/3657#issuecomment-608364240 Build Success with Spark 2.3.4, Please check CI http://121.244.95.60:12545/job/ApacheCarbonPRBuilder2.3/2627/ ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [hidden email] With regards, Apache Git Services |
Free forum by Nabble | Edit this page |