From d7e96ec5cae0681c63cbf1b5e89d34fe1b7bbe76 Mon Sep 17 00:00:00 2001 From: Anukalp Date: Wed, 6 May 2026 21:48:27 +0530 Subject: [PATCH 1/2] Fix switch case Warning --- .../aggregation/AccumulatorFactory.java | 12 +- .../GroupedMaxMinByBaseAccumulator.java | 1 + .../unary/scalar/util/SpookyHashV2Utils.java | 157 +++++---- .../executor/ConfigPlanExecutor.java | 10 +- .../impl/node/RemoveDataNodesProcedure.java | 1 + ...sertionEventTableParserTabletIterator.java | 207 +++++------ .../IoTConsensusV2Receiver.java | 1 + .../thrift/IoTDBDataNodeReceiver.java | 320 ++++++++---------- .../operator/source/SeriesScanUtil.java | 1 + 9 files changed, 358 insertions(+), 352 deletions(-) diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/source/relational/aggregation/AccumulatorFactory.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/source/relational/aggregation/AccumulatorFactory.java index 48a1abc83f38e..0ff49d5ecf274 100644 --- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/source/relational/aggregation/AccumulatorFactory.java +++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/source/relational/aggregation/AccumulatorFactory.java @@ -424,11 +424,15 @@ public static TableAccumulator createBuiltinMultiInputAccumulator( TAggregationType aggregationType, List inputDataTypes) { switch (aggregationType) { case MAX_BY: - checkState(inputDataTypes.size() == 2, "Wrong inputDataTypes size."); - // return new MaxByAccumulator(inputDataTypes.get(0), inputDataTypes.get(1)); + { + checkState(inputDataTypes.size() == 2, "Wrong inputDataTypes size."); + // return new MaxByAccumulator(inputDataTypes.get(0), inputDataTypes.get(1)); + } case MIN_BY: - checkState(inputDataTypes.size() == 2, "Wrong inputDataTypes size."); - // return new MinByAccumulator(inputDataTypes.get(0), inputDataTypes.get(1)); + { + checkState(inputDataTypes.size() == 2, "Wrong inputDataTypes size."); + // return new MinByAccumulator(inputDataTypes.get(0), inputDataTypes.get(1)); + } default: throw new IllegalArgumentException("Invalid Aggregation function: " + aggregationType); } diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/source/relational/aggregation/grouped/GroupedMaxMinByBaseAccumulator.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/source/relational/aggregation/grouped/GroupedMaxMinByBaseAccumulator.java index 0f59a575f45c6..7d1220ae6ddca 100644 --- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/source/relational/aggregation/grouped/GroupedMaxMinByBaseAccumulator.java +++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/execution/operator/source/relational/aggregation/grouped/GroupedMaxMinByBaseAccumulator.java @@ -645,6 +645,7 @@ private void updateX(int groupId, Column xColumn, int xIndex) { break; case BOOLEAN: xBooleanValues.set(groupId, xColumn.getBoolean(xIndex)); + break; default: throw new UnSupportedDataTypeException( String.format("Unsupported data type in MAX_BY/MIN_BY Aggregation: %s", xDataType)); diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/transformation/dag/column/unary/scalar/util/SpookyHashV2Utils.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/transformation/dag/column/unary/scalar/util/SpookyHashV2Utils.java index d726f4deff6a7..cf5c549cfc085 100644 --- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/transformation/dag/column/unary/scalar/util/SpookyHashV2Utils.java +++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/transformation/dag/column/unary/scalar/util/SpookyHashV2Utils.java @@ -198,44 +198,55 @@ private static long shortHash64(byte[] data, int offset, int length, long seed) // last 15 bytes h3 += ((long) length) << 56; switch (remainder) { - case 15: - h3 += (data[current + 14] & 0xFFL) << 48; - case 14: - h3 += (data[current + 13] & 0xFFL) << 40; - case 13: - h3 += (data[current + 12] & 0xFFL) << 32; - case 12: - h3 += getUnsignedIntFromBytesWithLittleEndian(data, current + 8); + case 0: + h2 += MAGIC_CONSTANT; + h3 += MAGIC_CONSTANT; + break; + case 1: + h2 += (data[current] & 0xFFL); + break; + case 2: + h2 += (data[current + 1] & 0xFFL) << 8; + break; + case 3: + h2 += (data[current + 2] & 0xFFL) << 16; + break; + case 4: + h2 += getUnsignedIntFromBytesWithLittleEndian(data, current); + break; + case 5: + h2 += (data[current + 4] & 0xFFL) << 32; + break; + case 6: + h2 += (data[current + 5] & 0xFFL) << 40; + break; + case 7: + h2 += (data[current + 6] & 0xFFL) << 48; + break; + case 8: h2 += getLongFromBytesWithLittleEndian(data, current); break; - case 11: - h3 += (data[current + 10] & 0xFFL) << 16; - case 10: - h3 += (data[current + 9] & 0xFFL) << 8; case 9: h3 += (data[current + 8] & 0xFFL); - case 8: + break; + case 10: + h3 += (data[current + 9] & 0xFFL) << 8; + break; + case 11: + h3 += (data[current + 10] & 0xFFL) << 16; + break; + case 12: + h3 += getUnsignedIntFromBytesWithLittleEndian(data, current + 8); h2 += getLongFromBytesWithLittleEndian(data, current); break; - case 7: - h2 += (data[current + 6] & 0xFFL) << 48; - case 6: - h2 += (data[current + 5] & 0xFFL) << 40; - case 5: - h2 += (data[current + 4] & 0xFFL) << 32; - case 4: - h2 += getUnsignedIntFromBytesWithLittleEndian(data, current); + case 13: + h3 += (data[current + 12] & 0xFFL) << 32; break; - case 3: - h2 += (data[current + 2] & 0xFFL) << 16; - case 2: - h2 += (data[current + 1] & 0xFFL) << 8; - case 1: - h2 += (data[current] & 0xFFL); + case 14: + h3 += (data[current + 13] & 0xFFL) << 40; break; - case 0: - h2 += MAGIC_CONSTANT; - h3 += MAGIC_CONSTANT; + case 15: + h3 += (data[current + 14] & 0xFFL) << 48; break; default: throw new AssertionError("Unexpected value for remainder: " + remainder); @@ -387,29 +398,40 @@ private static long longHash64(byte[] data, int offset, int length, long seed) { // handle remaining whole 8-byte sequences switch (sequences) { - case 11: - h10 += getLongFromBytesWithLittleEndian(data, current + 10 * SIZE_OF_LONG); - case 10: - h9 += getLongFromBytesWithLittleEndian(data, current + 9 * SIZE_OF_LONG); - case 9: - h8 += getLongFromBytesWithLittleEndian(data, current + 8 * SIZE_OF_LONG); - case 8: - h7 += getLongFromBytesWithLittleEndian(data, current + 7 * SIZE_OF_LONG); - case 7: - h6 += getLongFromBytesWithLittleEndian(data, current + 6 * SIZE_OF_LONG); - case 6: - h5 += getLongFromBytesWithLittleEndian(data, current + 5 * SIZE_OF_LONG); - case 5: - h4 += getLongFromBytesWithLittleEndian(data, current + 4 * SIZE_OF_LONG); - case 4: - h3 += getLongFromBytesWithLittleEndian(data, current + 3 * SIZE_OF_LONG); - case 3: - h2 += getLongFromBytesWithLittleEndian(data, current + 2 * SIZE_OF_LONG); - case 2: - h1 += getLongFromBytesWithLittleEndian(data, current + SIZE_OF_LONG); + case 0: + break; case 1: h0 += getLongFromBytesWithLittleEndian(data, current); - case 0: + break; + case 2: + h1 += getLongFromBytesWithLittleEndian(data, current + SIZE_OF_LONG); + break; + case 3: + h2 += getLongFromBytesWithLittleEndian(data, current + 2 * SIZE_OF_LONG); + break; + case 4: + h3 += getLongFromBytesWithLittleEndian(data, current + 3 * SIZE_OF_LONG); + break; + case 5: + h4 += getLongFromBytesWithLittleEndian(data, current + 4 * SIZE_OF_LONG); + break; + case 6: + h5 += getLongFromBytesWithLittleEndian(data, current + 5 * SIZE_OF_LONG); + break; + case 7: + h6 += getLongFromBytesWithLittleEndian(data, current + 6 * SIZE_OF_LONG); + break; + case 8: + h7 += getLongFromBytesWithLittleEndian(data, current + 7 * SIZE_OF_LONG); + break; + case 9: + h8 += getLongFromBytesWithLittleEndian(data, current + 8 * SIZE_OF_LONG); + break; + case 10: + h9 += getLongFromBytesWithLittleEndian(data, current + 9 * SIZE_OF_LONG); + break; + case 11: + h10 += getLongFromBytesWithLittleEndian(data, current + 10 * SIZE_OF_LONG); break; default: throw new AssertionError("Unexpected value for sequences: " + sequences); @@ -420,21 +442,28 @@ private static long longHash64(byte[] data, int offset, int length, long seed) { // read the last sequence of 0-7 bytes long last = 0; switch (limit - current) { - case 7: - last |= (data[current + 6] & 0xFFL) << 48; - case 6: - last |= (data[current + 5] & 0xFFL) << 40; - case 5: - last |= (data[current + 4] & 0xFFL) << 32; - case 4: - last |= (data[current + 3] & 0xFFL) << 24; - case 3: - last |= (data[current + 2] & 0xFFL) << 16; - case 2: - last |= (data[current + 1] & 0xFFL) << 8; + case 0: + break; case 1: last |= (data[current] & 0xFFL); - case 0: + break; + case 2: + last |= (data[current + 1] & 0xFFL) << 8; + break; + case 3: + last |= (data[current + 2] & 0xFFL) << 16; + break; + case 4: + last |= (data[current + 3] & 0xFFL) << 24; + break; + case 5: + last |= (data[current + 4] & 0xFFL) << 32; + break; + case 6: + last |= (data[current + 5] & 0xFFL) << 40; + break; + case 7: + last |= (data[current + 6] & 0xFFL) << 48; break; default: throw new AssertionError("Unexpected size for last sequence: " + (limit - current)); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java index b2bae24de38b7..93c27fde51095 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/executor/ConfigPlanExecutor.java @@ -418,10 +418,12 @@ public TSStatus executeNonQueryPlan(ConfigPhysicalPlan physicalPlan) return clusterSchemaInfo.adjustMaxRegionGroupCount( (AdjustMaxRegionGroupNumPlan) physicalPlan); case DeleteDatabase: - try { - return clusterSchemaInfo.deleteDatabase((DeleteDatabasePlan) physicalPlan); - } finally { - partitionInfo.deleteDatabase((DeleteDatabasePlan) physicalPlan); + { + try { + return clusterSchemaInfo.deleteDatabase((DeleteDatabasePlan) physicalPlan); + } finally { + partitionInfo.deleteDatabase((DeleteDatabasePlan) physicalPlan); + } } case PreDeleteDatabase: return partitionInfo.preDeleteDatabase((PreDeleteDatabasePlan) physicalPlan); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java index a531d67955cec..9190fa60e6ccb 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/node/RemoveDataNodesProcedure.java @@ -116,6 +116,7 @@ protected Flow executeFromState(ConfigNodeProcedureEnv env, RemoveDataNodeState removedDataNodes); return Flow.NO_MORE_STATE; } + break; case REMOVE_DATA_NODE_PREPARE: Map removedNodeStatusMap = new HashMap<>(); removedDataNodes.forEach( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParserTabletIterator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParserTabletIterator.java index f05cf872c798b..285c8825cf1f0 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParserTabletIterator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/tsfile/parser/table/TsFileInsertionEventTableParserTabletIterator.java @@ -166,127 +166,136 @@ public boolean hasNext() { return true; } case INIT_DATA: - if (chunkReader != null && chunkReader.hasNextSatisfiedPage()) { - batchData = chunkReader.nextPageData(); - final long size = PipeMemoryWeightUtil.calculateBatchDataRamBytesUsed(batchData); - if (allocatedMemoryBlockForBatchData.getMemoryUsageInBytes() < size) { - PipeDataNodeResourceManager.memory() - .forceResize(allocatedMemoryBlockForBatchData, size); + { + if (chunkReader != null && chunkReader.hasNextSatisfiedPage()) { + batchData = chunkReader.nextPageData(); + final long size = PipeMemoryWeightUtil.calculateBatchDataRamBytesUsed(batchData); + if (allocatedMemoryBlockForBatchData.getMemoryUsageInBytes() < size) { + PipeDataNodeResourceManager.memory() + .forceResize(allocatedMemoryBlockForBatchData, size); + } + state = State.CHECK_DATA; + break; } - state = State.CHECK_DATA; - break; } case INIT_CHUNK_READER: - if (currentChunkMetadata != null - || (chunkMetadataList != null && chunkMetadataList.hasNext())) { - if (currentChunkMetadata == null) { - currentChunkMetadata = chunkMetadataList.next(); - timeChunk = null; - offset = 0; + { + if (currentChunkMetadata != null + || (chunkMetadataList != null && chunkMetadataList.hasNext())) { + if (currentChunkMetadata == null) { + currentChunkMetadata = chunkMetadataList.next(); + timeChunk = null; + offset = 0; + } + initChunkReader(currentChunkMetadata); + state = State.INIT_DATA; + break; } - initChunkReader(currentChunkMetadata); - state = State.INIT_DATA; - break; } case INIT_CHUNK_METADATA: - if (deviceMetaIterator != null && deviceMetaIterator.hasNext()) { - final Pair pair = deviceMetaIterator.next(); - - long size = 0; - List iChunkMetadataList = - reader.getAlignedChunkMetadata(pair.left, true); - - Iterator chunkMetadataIterator = - iChunkMetadataList.iterator(); - while (chunkMetadataIterator.hasNext()) { - final AbstractAlignedChunkMetadata alignedChunkMetadata = - chunkMetadataIterator.next(); - if (alignedChunkMetadata == null) { - throw new PipeException( - "Table model tsfile parsing does not support this type of ChunkMeta"); - } - - // Reduce the number of times Chunks are read - if (alignedChunkMetadata.getEndTime() < startTime - || alignedChunkMetadata.getStartTime() > endTime) { - chunkMetadataIterator.remove(); - continue; - } + { + if (deviceMetaIterator != null && deviceMetaIterator.hasNext()) { + final Pair pair = deviceMetaIterator.next(); + + long size = 0; + List iChunkMetadataList = + reader.getAlignedChunkMetadata(pair.left, true); + + Iterator chunkMetadataIterator = + iChunkMetadataList.iterator(); + while (chunkMetadataIterator.hasNext()) { + final AbstractAlignedChunkMetadata alignedChunkMetadata = + chunkMetadataIterator.next(); + if (alignedChunkMetadata == null) { + throw new PipeException( + "Table model tsfile parsing does not support this type of ChunkMeta"); + } - Iterator iChunkMetadataIterator = - alignedChunkMetadata.getValueChunkMetadataList().iterator(); - while (iChunkMetadataIterator.hasNext()) { - IChunkMetadata iChunkMetadata = iChunkMetadataIterator.next(); - if (iChunkMetadata == null) { - iChunkMetadataIterator.remove(); + // Reduce the number of times Chunks are read + if (alignedChunkMetadata.getEndTime() < startTime + || alignedChunkMetadata.getStartTime() > endTime) { + chunkMetadataIterator.remove(); continue; } - if (!modifications.isEmpty() - && ModsOperationUtil.isAllDeletedByMods( - pair.getLeft(), - iChunkMetadata.getMeasurementUid(), - alignedChunkMetadata.getStartTime(), - alignedChunkMetadata.getEndTime(), - modifications)) { - iChunkMetadataIterator.remove(); + Iterator iChunkMetadataIterator = + alignedChunkMetadata.getValueChunkMetadataList().iterator(); + while (iChunkMetadataIterator.hasNext()) { + IChunkMetadata iChunkMetadata = iChunkMetadataIterator.next(); + if (iChunkMetadata == null) { + iChunkMetadataIterator.remove(); + continue; + } + + if (!modifications.isEmpty() + && ModsOperationUtil.isAllDeletedByMods( + pair.getLeft(), + iChunkMetadata.getMeasurementUid(), + alignedChunkMetadata.getStartTime(), + alignedChunkMetadata.getEndTime(), + modifications)) { + iChunkMetadataIterator.remove(); + } } - } - if (alignedChunkMetadata.getValueChunkMetadataList().isEmpty()) { - chunkMetadataIterator.remove(); - continue; - } + if (alignedChunkMetadata.getValueChunkMetadataList().isEmpty()) { + chunkMetadataIterator.remove(); + continue; + } - size += - PipeMemoryWeightUtil.calculateAlignedChunkMetaBytesUsed(alignedChunkMetadata); - if (allocatedMemoryBlockForChunkMeta.getMemoryUsageInBytes() < size) { - PipeDataNodeResourceManager.memory() - .forceResize(allocatedMemoryBlockForChunkMeta, size); + size += + PipeMemoryWeightUtil.calculateAlignedChunkMetaBytesUsed(alignedChunkMetadata); + if (allocatedMemoryBlockForChunkMeta.getMemoryUsageInBytes() < size) { + PipeDataNodeResourceManager.memory() + .forceResize(allocatedMemoryBlockForChunkMeta, size); + } } - } - deviceID = pair.getLeft(); - chunkMetadataList = iChunkMetadataList.iterator(); + deviceID = pair.getLeft(); + chunkMetadataList = iChunkMetadataList.iterator(); - state = State.INIT_CHUNK_READER; - break; + state = State.INIT_CHUNK_READER; + break; + } } case INIT_DEVICE_META: - if (filteredTableSchemaIterator != null && filteredTableSchemaIterator.hasNext()) { - final Map.Entry entry = filteredTableSchemaIterator.next(); - tableName = entry.getKey(); - final TableSchema tableSchema = entry.getValue(); - // The table name has changed, set to false - isSameTableName = false; - - final MetadataIndexNode tableRoot = fileMetadata.getTableMetadataIndexNode(tableName); - deviceMetaIterator = metadataQuerier.deviceIterator(tableRoot, null); - - final int columnSchemaSize = tableSchema.getColumnSchemas().size(); - dataTypeList = new ArrayList<>(); - columnTypes = new ArrayList<>(); - measurementList = new ArrayList<>(); - - for (int i = 0; i < columnSchemaSize; i++) { - final IMeasurementSchema schema = tableSchema.getColumnSchemas().get(i); - final ColumnCategory columnCategory = tableSchema.getColumnTypes().get(i); - if (schema != null - && schema.getMeasurementName() != null - && !schema.getMeasurementName().isEmpty()) { - final String measurementName = schema.getMeasurementName(); - if (ColumnCategory.TAG.equals(columnCategory)) { - columnTypes.add(ColumnCategory.TAG); - measurementList.add(measurementName); - dataTypeList.add(schema.getType()); + { + if (filteredTableSchemaIterator != null && filteredTableSchemaIterator.hasNext()) { + final Map.Entry entry = filteredTableSchemaIterator.next(); + tableName = entry.getKey(); + final TableSchema tableSchema = entry.getValue(); + // The table name has changed, set to false + isSameTableName = false; + + final MetadataIndexNode tableRoot = + fileMetadata.getTableMetadataIndexNode(tableName); + deviceMetaIterator = metadataQuerier.deviceIterator(tableRoot, null); + + final int columnSchemaSize = tableSchema.getColumnSchemas().size(); + dataTypeList = new ArrayList<>(); + columnTypes = new ArrayList<>(); + measurementList = new ArrayList<>(); + + for (int i = 0; i < columnSchemaSize; i++) { + final IMeasurementSchema schema = tableSchema.getColumnSchemas().get(i); + final ColumnCategory columnCategory = tableSchema.getColumnTypes().get(i); + if (schema != null + && schema.getMeasurementName() != null + && !schema.getMeasurementName().isEmpty()) { + final String measurementName = schema.getMeasurementName(); + if (ColumnCategory.TAG.equals(columnCategory)) { + columnTypes.add(ColumnCategory.TAG); + measurementList.add(measurementName); + dataTypeList.add(schema.getType()); + } } } + deviceIdSize = dataTypeList.size(); + state = State.INIT_CHUNK_METADATA; + break; } - deviceIdSize = dataTypeList.size(); - state = State.INIT_CHUNK_METADATA; - break; + return false; } - return false; } } } catch (Exception e) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2Receiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2Receiver.java index 27ce077252ef0..191e2bf3282e1 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2Receiver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/iotconsensusv2/IoTConsensusV2Receiver.java @@ -280,6 +280,7 @@ private TIoTConsensusV2TransferResp loadEvent(final TIoTConsensusV2TransferReq r IoTConsensusV2TsFileSealWithModReq.fromTIoTConsensusV2TransferReq(req)); case TRANSFER_TABLET_BATCH: LOGGER.info("IoTConsensusV2 transfer batch hasn't been implemented yet."); + break; default: break; } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java index c10fdbc4f6720..a3dd749136d59 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/receiver/protocol/thrift/IoTDBDataNodeReceiver.java @@ -207,231 +207,189 @@ public synchronized TPipeTransferResp receive(final TPipeTransferReq req) { } switch (requestType) { case HANDSHAKE_DATANODE_V1: - { - try { - if (PipeConfig.getInstance().isPipeEnableMemoryCheck() - && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes() - < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) { - return new TPipeTransferResp( - RpcUtils.getStatus( - TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(), - "The receiver memory is not enough to handle the handshake request from datanode.")); - } - return handleTransferHandshakeV1( - PipeTransferDataNodeHandshakeV1Req.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordHandshakeDatanodeV1Timer(System.nanoTime() - startTime); + try { + if (PipeConfig.getInstance().isPipeEnableMemoryCheck() + && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes() + < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) { + return new TPipeTransferResp( + RpcUtils.getStatus( + TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(), + "The receiver memory is not enough to handle the handshake request from datanode.")); } + return handleTransferHandshakeV1( + PipeTransferDataNodeHandshakeV1Req.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordHandshakeDatanodeV1Timer(System.nanoTime() - startTime); } case HANDSHAKE_DATANODE_V2: - { - try { - if (PipeConfig.getInstance().isPipeEnableMemoryCheck() - && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes() - < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) { - return new TPipeTransferResp( - RpcUtils.getStatus( - TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(), - "The receiver memory is not enough to handle the handshake request from datanode.")); - } - return handleTransferHandshakeV2( - PipeTransferDataNodeHandshakeV2Req.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordHandshakeDatanodeV2Timer(System.nanoTime() - startTime); + try { + if (PipeConfig.getInstance().isPipeEnableMemoryCheck() + && PipeDataNodeResourceManager.memory().getFreeMemorySizeInBytes() + < PipeConfig.getInstance().getPipeMinimumReceiverMemory()) { + return new TPipeTransferResp( + RpcUtils.getStatus( + TSStatusCode.PIPE_HANDSHAKE_ERROR.getStatusCode(), + "The receiver memory is not enough to handle the handshake request from datanode.")); } + return handleTransferHandshakeV2( + PipeTransferDataNodeHandshakeV2Req.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordHandshakeDatanodeV2Timer(System.nanoTime() - startTime); } case TRANSFER_TABLET_INSERT_NODE: - { - try { - return handleTransferTabletInsertNode( - PipeTransferTabletInsertNodeReq.fromTPipeTransferReq(req)); - - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletInsertNodeTimer(System.nanoTime() - startTime); - } + try { + return handleTransferTabletInsertNode( + PipeTransferTabletInsertNodeReq.fromTPipeTransferReq(req)); + + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletInsertNodeTimer(System.nanoTime() - startTime); } case TRANSFER_TABLET_INSERT_NODE_V2: - { - try { - return handleTransferTabletInsertNode( - PipeTransferTabletInsertNodeReqV2.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletInsertNodeV2Timer(System.nanoTime() - startTime); - } + try { + return handleTransferTabletInsertNode( + PipeTransferTabletInsertNodeReqV2.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletInsertNodeV2Timer(System.nanoTime() - startTime); } case TRANSFER_TABLET_RAW: - { - try { - return handleTransferTabletRaw(PipeTransferTabletRawReq.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletRawTimer(System.nanoTime() - startTime); - } + try { + return handleTransferTabletRaw(PipeTransferTabletRawReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletRawTimer(System.nanoTime() - startTime); } case TRANSFER_TABLET_RAW_V2: - { - try { - return handleTransferTabletRaw( - PipeTransferTabletRawReqV2.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletRawV2Timer(System.nanoTime() - startTime); - } + try { + return handleTransferTabletRaw(PipeTransferTabletRawReqV2.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletRawV2Timer(System.nanoTime() - startTime); } case TRANSFER_TABLET_BINARY: - { - try { - return handleTransferTabletBinary( - PipeTransferTabletBinaryReq.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletBinaryTimer(System.nanoTime() - startTime); - } + try { + return handleTransferTabletBinary( + PipeTransferTabletBinaryReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletBinaryTimer(System.nanoTime() - startTime); } case TRANSFER_TABLET_BINARY_V2: - { - try { - return handleTransferTabletBinary( - PipeTransferTabletBinaryReqV2.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletBinaryV2Timer(System.nanoTime() - startTime); - } + try { + return handleTransferTabletBinary( + PipeTransferTabletBinaryReqV2.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletBinaryV2Timer(System.nanoTime() - startTime); } case TRANSFER_TABLET_BATCH: - { - try { - return handleTransferTabletBatch( - PipeTransferTabletBatchReq.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletBatchTimer(System.nanoTime() - startTime); - } + try { + return handleTransferTabletBatch( + PipeTransferTabletBatchReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletBatchTimer(System.nanoTime() - startTime); } case TRANSFER_TABLET_BATCH_V2: - { - try { - return handleTransferTabletBatchV2( - PipeTransferTabletBatchReqV2.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTabletBatchV2Timer(System.nanoTime() - startTime); - } + try { + return handleTransferTabletBatchV2( + PipeTransferTabletBatchReqV2.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTabletBatchV2Timer(System.nanoTime() - startTime); } case TRANSFER_TS_FILE_PIECE: - { - try { - return handleTransferFilePiece( - PipeTransferTsFilePieceReq.fromTPipeTransferReq(req), - req instanceof AirGapPseudoTPipeTransferRequest, - true); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTsFilePieceTimer(System.nanoTime() - startTime); - } + try { + return handleTransferFilePiece( + PipeTransferTsFilePieceReq.fromTPipeTransferReq(req), + req instanceof AirGapPseudoTPipeTransferRequest, + true); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTsFilePieceTimer(System.nanoTime() - startTime); } case TRANSFER_TS_FILE_SEAL: - { - try { - return handleTransferFileSealV1( - PipeTransferTsFileSealReq.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTsFileSealTimer(System.nanoTime() - startTime); - } + try { + return handleTransferFileSealV1(PipeTransferTsFileSealReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTsFileSealTimer(System.nanoTime() - startTime); } case TRANSFER_TS_FILE_PIECE_WITH_MOD: - { - try { - return handleTransferFilePiece( - PipeTransferTsFilePieceWithModReq.fromTPipeTransferReq(req), - req instanceof AirGapPseudoTPipeTransferRequest, - false); - - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTsFilePieceWithModTimer(System.nanoTime() - startTime); - } + try { + return handleTransferFilePiece( + PipeTransferTsFilePieceWithModReq.fromTPipeTransferReq(req), + req instanceof AirGapPseudoTPipeTransferRequest, + false); + + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTsFilePieceWithModTimer(System.nanoTime() - startTime); } case TRANSFER_TS_FILE_SEAL_WITH_MOD: - { - try { - return handleTransferFileSealV2( - PipeTransferTsFileSealWithModReq.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferTsFileSealWithModTimer(System.nanoTime() - startTime); - } + try { + return handleTransferFileSealV2( + PipeTransferTsFileSealWithModReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferTsFileSealWithModTimer(System.nanoTime() - startTime); } case TRANSFER_PLAN_NODE: - { - try { - return handleTransferSchemaPlan(PipeTransferPlanNodeReq.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferSchemaPlanTimer(System.nanoTime() - startTime); - } + try { + return handleTransferSchemaPlan(PipeTransferPlanNodeReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferSchemaPlanTimer(System.nanoTime() - startTime); } case TRANSFER_SCHEMA_SNAPSHOT_PIECE: - { - try { - return handleTransferFilePiece( - PipeTransferSchemaSnapshotPieceReq.fromTPipeTransferReq(req), - req instanceof AirGapPseudoTPipeTransferRequest, - false); - - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferSchemaSnapshotPieceTimer(System.nanoTime() - startTime); - } + try { + return handleTransferFilePiece( + PipeTransferSchemaSnapshotPieceReq.fromTPipeTransferReq(req), + req instanceof AirGapPseudoTPipeTransferRequest, + false); + + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferSchemaSnapshotPieceTimer(System.nanoTime() - startTime); } case TRANSFER_SCHEMA_SNAPSHOT_SEAL: - { - try { - return handleTransferFileSealV2( - PipeTransferSchemaSnapshotSealReq.fromTPipeTransferReq(req)); - - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferSchemaSnapshotSealTimer(System.nanoTime() - startTime); - } + try { + return handleTransferFileSealV2( + PipeTransferSchemaSnapshotSealReq.fromTPipeTransferReq(req)); + + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferSchemaSnapshotSealTimer(System.nanoTime() - startTime); } case HANDSHAKE_CONFIGNODE_V1: case HANDSHAKE_CONFIGNODE_V2: case TRANSFER_CONFIG_PLAN: case TRANSFER_CONFIG_SNAPSHOT_PIECE: case TRANSFER_CONFIG_SNAPSHOT_SEAL: - { - try { - // Config requests will first be received by the DataNode receiver, - // then transferred to ConfigNode receiver to execute. - return handleTransferConfigPlan(req); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferConfigPlanTimer(System.nanoTime() - startTime); - } + try { + // Config requests will first be received by the DataNode receiver, + // then transferred to ConfigNode receiver to execute. + return handleTransferConfigPlan(req); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferConfigPlanTimer(System.nanoTime() - startTime); } case TRANSFER_SLICE: - { - try { - return handleTransferSlice(PipeTransferSliceReq.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferSliceTimer(System.nanoTime() - startTime); - } + try { + return handleTransferSlice(PipeTransferSliceReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferSliceTimer(System.nanoTime() - startTime); } case TRANSFER_COMPRESSED: - { - try { - return receive(PipeTransferCompressedReq.fromTPipeTransferReq(req)); - } finally { - PipeDataNodeReceiverMetrics.getInstance() - .recordTransferCompressedTimer(System.nanoTime() - startTime); - } + try { + return receive(PipeTransferCompressedReq.fromTPipeTransferReq(req)); + } finally { + PipeDataNodeReceiverMetrics.getInstance() + .recordTransferCompressedTimer(System.nanoTime() - startTime); } default: break; diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java index 8412161e63e39..f4c207272c89c 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/SeriesScanUtil.java @@ -1379,6 +1379,7 @@ private TsBlock getTransferedDataTypeTsBlock(TsBlock tsBlock) { break; case OBJECT: newValueColumns[i] = valueColumns[i]; + break; case VECTOR: case UNKNOWN: default: From 7ea4016ca1ef7d15d51de69c42df79cc5d74ae78 Mon Sep 17 00:00:00 2001 From: Anukalp Date: Fri, 8 May 2026 22:18:11 +0530 Subject: [PATCH 2/2] update switch case warning fixes --- .../unary/scalar/util/SpookyHashV2Utils.java | 186 +++++++++--------- 1 file changed, 93 insertions(+), 93 deletions(-) diff --git a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/transformation/dag/column/unary/scalar/util/SpookyHashV2Utils.java b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/transformation/dag/column/unary/scalar/util/SpookyHashV2Utils.java index cf5c549cfc085..c1c0f42c64123 100644 --- a/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/transformation/dag/column/unary/scalar/util/SpookyHashV2Utils.java +++ b/iotdb-core/calc-commons/src/main/java/org/apache/iotdb/calc/transformation/dag/column/unary/scalar/util/SpookyHashV2Utils.java @@ -198,55 +198,55 @@ private static long shortHash64(byte[] data, int offset, int length, long seed) // last 15 bytes h3 += ((long) length) << 56; switch (remainder) { - case 0: - h2 += MAGIC_CONSTANT; - h3 += MAGIC_CONSTANT; - break; - case 1: - h2 += (data[current] & 0xFFL); - break; - case 2: - h2 += (data[current + 1] & 0xFFL) << 8; - break; - case 3: - h2 += (data[current + 2] & 0xFFL) << 16; - break; - case 4: - h2 += getUnsignedIntFromBytesWithLittleEndian(data, current); - break; - case 5: - h2 += (data[current + 4] & 0xFFL) << 32; - break; - case 6: - h2 += (data[current + 5] & 0xFFL) << 40; - break; - case 7: - h2 += (data[current + 6] & 0xFFL) << 48; - break; - case 8: + case 15: + h3 += (data[current + 14] & 0xFFL) << 48; + // fall through + case 14: + h3 += (data[current + 13] & 0xFFL) << 40; + // fall through + case 13: + h3 += (data[current + 12] & 0xFFL) << 32; + // fall through + case 12: + h3 += getUnsignedIntFromBytesWithLittleEndian(data, current + 8); h2 += getLongFromBytesWithLittleEndian(data, current); break; - case 9: - h3 += (data[current + 8] & 0xFFL); - break; - case 10: - h3 += (data[current + 9] & 0xFFL) << 8; - break; case 11: h3 += (data[current + 10] & 0xFFL) << 16; - break; - case 12: - h3 += getUnsignedIntFromBytesWithLittleEndian(data, current + 8); + // fall through + case 10: + h3 += (data[current + 9] & 0xFFL) << 8; + // fall through + case 9: + h3 += (data[current + 8] & 0xFFL); + // fall through + case 8: h2 += getLongFromBytesWithLittleEndian(data, current); break; - case 13: - h3 += (data[current + 12] & 0xFFL) << 32; + case 7: + h2 += (data[current + 6] & 0xFFL) << 48; + // fall through + case 6: + h2 += (data[current + 5] & 0xFFL) << 40; + // fall through + case 5: + h2 += (data[current + 4] & 0xFFL) << 32; + // fall through + case 4: + h2 += getUnsignedIntFromBytesWithLittleEndian(data, current); break; - case 14: - h3 += (data[current + 13] & 0xFFL) << 40; + case 3: + h2 += (data[current + 2] & 0xFFL) << 16; + // fall through + case 2: + h2 += (data[current + 1] & 0xFFL) << 8; + // fall through + case 1: + h2 += (data[current] & 0xFFL); break; - case 15: - h3 += (data[current + 14] & 0xFFL) << 48; + case 0: + h2 += MAGIC_CONSTANT; + h3 += MAGIC_CONSTANT; break; default: throw new AssertionError("Unexpected value for remainder: " + remainder); @@ -398,40 +398,40 @@ private static long longHash64(byte[] data, int offset, int length, long seed) { // handle remaining whole 8-byte sequences switch (sequences) { - case 0: - break; - case 1: - h0 += getLongFromBytesWithLittleEndian(data, current); - break; - case 2: - h1 += getLongFromBytesWithLittleEndian(data, current + SIZE_OF_LONG); - break; - case 3: - h2 += getLongFromBytesWithLittleEndian(data, current + 2 * SIZE_OF_LONG); - break; - case 4: - h3 += getLongFromBytesWithLittleEndian(data, current + 3 * SIZE_OF_LONG); - break; - case 5: - h4 += getLongFromBytesWithLittleEndian(data, current + 4 * SIZE_OF_LONG); - break; - case 6: - h5 += getLongFromBytesWithLittleEndian(data, current + 5 * SIZE_OF_LONG); - break; - case 7: - h6 += getLongFromBytesWithLittleEndian(data, current + 6 * SIZE_OF_LONG); - break; - case 8: - h7 += getLongFromBytesWithLittleEndian(data, current + 7 * SIZE_OF_LONG); - break; - case 9: - h8 += getLongFromBytesWithLittleEndian(data, current + 8 * SIZE_OF_LONG); - break; - case 10: - h9 += getLongFromBytesWithLittleEndian(data, current + 9 * SIZE_OF_LONG); - break; case 11: h10 += getLongFromBytesWithLittleEndian(data, current + 10 * SIZE_OF_LONG); + // fall through + case 10: + h9 += getLongFromBytesWithLittleEndian(data, current + 9 * SIZE_OF_LONG); + // fall through + case 9: + h8 += getLongFromBytesWithLittleEndian(data, current + 8 * SIZE_OF_LONG); + // fall through + case 8: + h7 += getLongFromBytesWithLittleEndian(data, current + 7 * SIZE_OF_LONG); + // fall through + case 7: + h6 += getLongFromBytesWithLittleEndian(data, current + 6 * SIZE_OF_LONG); + // fall through + case 6: + h5 += getLongFromBytesWithLittleEndian(data, current + 5 * SIZE_OF_LONG); + // fall through + case 5: + h4 += getLongFromBytesWithLittleEndian(data, current + 4 * SIZE_OF_LONG); + // fall through + case 4: + h3 += getLongFromBytesWithLittleEndian(data, current + 3 * SIZE_OF_LONG); + // fall through + case 3: + h2 += getLongFromBytesWithLittleEndian(data, current + 2 * SIZE_OF_LONG); + // fall through + case 2: + h1 += getLongFromBytesWithLittleEndian(data, current + SIZE_OF_LONG); + // fall through + case 1: + h0 += getLongFromBytesWithLittleEndian(data, current); + // fall through + case 0: break; default: throw new AssertionError("Unexpected value for sequences: " + sequences); @@ -442,28 +442,28 @@ private static long longHash64(byte[] data, int offset, int length, long seed) { // read the last sequence of 0-7 bytes long last = 0; switch (limit - current) { - case 0: - break; - case 1: - last |= (data[current] & 0xFFL); - break; - case 2: - last |= (data[current + 1] & 0xFFL) << 8; - break; - case 3: - last |= (data[current + 2] & 0xFFL) << 16; - break; - case 4: - last |= (data[current + 3] & 0xFFL) << 24; - break; - case 5: - last |= (data[current + 4] & 0xFFL) << 32; - break; - case 6: - last |= (data[current + 5] & 0xFFL) << 40; - break; case 7: last |= (data[current + 6] & 0xFFL) << 48; + // fall through + case 6: + last |= (data[current + 5] & 0xFFL) << 40; + // fall through + case 5: + last |= (data[current + 4] & 0xFFL) << 32; + // fall through + case 4: + last |= (data[current + 3] & 0xFFL) << 24; + // fall through + case 3: + last |= (data[current + 2] & 0xFFL) << 16; + // fall through + case 2: + last |= (data[current + 1] & 0xFFL) << 8; + // fall through + case 1: + last |= (data[current] & 0xFFL); + // fall through + case 0: break; default: throw new AssertionError("Unexpected size for last sequence: " + (limit - current));