From c152285e69b0c02fca2e5cbd633c3553616083e7 Mon Sep 17 00:00:00 2001 From: Yuya Ebihara Date: Fri, 1 Nov 2024 10:45:58 +0900 Subject: [PATCH] Use AssertJ hasSize method --- .../execution/BaseTestSqlTaskManager.java | 4 +- .../io/trino/execution/TestCommitTask.java | 2 +- .../trino/execution/TestCreateTableTask.java | 2 +- .../io/trino/execution/TestNodeScheduler.java | 10 ++-- .../trino/execution/TestPageSplitterUtil.java | 2 +- .../io/trino/execution/TestQueryStats.java | 4 +- .../io/trino/execution/TestRollbackTask.java | 2 +- .../java/io/trino/execution/TestSqlTask.java | 4 +- .../execution/TestStageStateMachine.java | 2 +- .../execution/TestStartTransactionTask.java | 4 +- .../TestTaskExecutorStuckSplits.java | 2 +- .../buffer/TestArbitraryOutputBuffer.java | 2 +- .../resourcegroups/TestResourceGroups.java | 4 +- .../scheduler/TestFixedCountScheduler.java | 6 +- .../TestMultiSourcePartitionedScheduler.java | 32 +++++----- ...rtitionedPipelinedOutputBufferManager.java | 2 +- .../scheduler/TestScaledWriterScheduler.java | 24 ++++---- .../TestSourcePartitionedScheduler.java | 56 +++++++++--------- .../scheduler/TestUniformNodeSelector.java | 20 +++---- .../warnings/TestDefaultWarningCollector.java | 4 +- .../TestHeartbeatFailureDetector.java | 2 +- .../java/io/trino/memory/TestMemoryPools.java | 2 +- .../metadata/TestDiscoveryNodeManager.java | 2 +- .../TestPolymorphicScalarFunction.java | 2 +- .../operator/AnnotationEngineAssertions.java | 12 ++-- .../io/trino/operator/OperatorAssertion.java | 2 +- .../io/trino/operator/PageAssertions.java | 2 +- .../TestAnnotationEngineForAggregates.java | 12 ++-- .../TestAnnotationEngineForScalars.java | 40 ++++++------- .../operator/TestDirectExchangeClient.java | 6 +- .../io/trino/operator/TestDriverStats.java | 2 +- .../trino/operator/TestExchangeOperator.java | 2 +- .../operator/TestGroupedTopNRankBuilder.java | 6 +- .../TestGroupedTopNRowNumberBuilder.java | 6 +- .../operator/TestHashAggregationOperator.java | 2 +- .../operator/TestHttpPageBufferClient.java | 22 +++---- .../io/trino/operator/TestPipelineStats.java | 4 +- .../trino/operator/TestRowNumberOperator.java | 16 ++--- .../TestScanFilterAndProjectOperator.java | 2 +- .../operator/TestTableFinishOperator.java | 4 +- .../java/io/trino/operator/TestTaskStats.java | 2 +- .../io/trino/operator/TestTypeSignature.java | 4 +- .../TestingExchangeHttpClientHandler.java | 4 +- .../TestApproximateMostFrequentHistogram.java | 10 ++-- .../join/TestNestedLoopBuildOperator.java | 6 +- .../join/unspilled/TestHashJoinOperator.java | 2 +- .../operator/output/TestPagePartitioner.java | 2 +- .../operator/project/TestPageProcessor.java | 18 +++--- .../io/trino/server/TestQueryResource.java | 12 ++-- .../io/trino/server/TestQueryStateInfo.java | 2 +- .../server/TestQueryStateInfoResource.java | 8 +-- .../server/security/jwt/TestJwkDecoder.java | 20 +++---- .../server/security/jwt/TestJwkService.java | 4 +- .../trino/spiller/TestBinaryFileSpiller.java | 4 +- .../spiller/TestFileSingleStreamSpiller.java | 4 +- .../TestFileSingleStreamSpillerFactory.java | 48 +++++++-------- .../TestGenericPartitioningSpiller.java | 2 +- .../sql/BenchmarkExpressionInterpreter.java | 2 +- .../io/trino/sql/gen/TestColumnarFilters.java | 4 +- .../sql/planner/TestEqualityInference.java | 2 +- .../sql/planner/TestSymbolAllocator.java | 2 +- .../trino/sql/planner/iterative/TestMemo.java | 2 +- .../io/trino/sql/query/QueryAssertions.java | 2 +- .../transaction/TestTransactionManager.java | 12 ++-- .../java/io/trino/type/TestRowOperators.java | 2 +- .../java/io/trino/util/TestDisjointSet.java | 16 ++--- .../test/java/io/trino/util/TestFailures.java | 8 +-- .../util/TestLong2LongOpenBigHashMap.java | 6 +- .../TestConnectorViewDefinition.java | 2 +- .../spi/predicate/TestEquatableValueSet.java | 4 +- .../java/io/trino/geospatial/TestKdbTree.java | 16 ++--- .../trino/hdfs/s3/TestTrinoS3FileSystem.java | 6 +- .../trino/hive/formats/FormatTestUtils.java | 8 +-- .../trino/hive/formats/avro/TestAvroBase.java | 2 +- .../avro/TestHiveAvroTypeBlockHandler.java | 14 ++--- .../src/test/java/io/trino/orc/OrcTester.java | 8 +-- .../io/trino/orc/TestOrcBloomFilters.java | 4 +- .../io/trino/orc/TestStructColumnReader.java | 10 ++-- .../orc/stream/AbstractTestValueStream.java | 2 +- .../orc/stream/TestBooleanOutputStream.java | 2 +- .../parquet/reader/TestParquetReader.java | 2 +- .../parquet/writer/TestParquetWriter.java | 4 +- ...seFileBasedConnectorAccessControlTest.java | 2 +- .../BaseFileBasedSystemAccessControlTest.java | 6 +- .../trino/decoder/avro/TestAvroDecoder.java | 16 ++--- .../io/trino/decoder/csv/TestCsvDecoder.java | 8 +-- .../trino/decoder/json/TestJsonDecoder.java | 6 +- .../decoder/protobuf/TestProtobufDecoder.java | 8 +-- .../io/trino/decoder/raw/TestRawDecoder.java | 10 ++-- .../cassandra/TestCassandraConnectorTest.java | 2 +- .../BaseDeltaLakeConnectorSmokeTest.java | 4 +- .../BaseDeltaLakeTableWithCustomLocation.java | 2 +- .../TestDeltaLakeCreateTableStatistics.java | 2 +- .../deltalake/TestDeltaLakePageSink.java | 4 +- .../deltalake/TestTransactionLogAccess.java | 8 +-- ...aLakeFileBasedTableStatisticsProvider.java | 2 +- .../TestDeltaLakeSchemaSupport.java | 4 +- .../checkpoint/TestTransactionLogTail.java | 4 +- .../plugin/hive/BaseHiveConnectorTest.java | 38 ++++++------ .../hive/TestBackgroundHiveSplitLoader.java | 26 ++++----- .../plugin/hive/TestHiveFileFormats.java | 2 +- .../plugin/hive/TestHiveSplitSource.java | 14 ++--- .../cache/TestCachingHiveMetastore.java | 14 ++--- .../glue/v1/TestGlueInputConverter.java | 4 +- .../plugin/hive/orc/TestOrcDeletedRows.java | 6 +- .../hive/orc/TestOrcPageSourceFactory.java | 4 +- .../predicate/TestParquetPredicateUtils.java | 4 +- .../TestMetastoreHiveStatisticsProvider.java | 2 +- .../plugin/hive/util/TestAcidTables.java | 28 ++++----- .../plugin/hive/util/TestHiveAcidUtils.java | 58 +++++++++---------- .../iceberg/BaseIcebergConnectorTest.java | 6 +- .../BaseIcebergMaterializedViewTest.java | 8 +-- .../TestIcebergOrcMetricsCollection.java | 38 ++++++------ .../iceberg/TestIcebergSplitSource.java | 2 +- .../trino/plugin/iceberg/TestIcebergV2.java | 2 +- .../plugin/iceberg/TestParquetPredicates.java | 2 +- .../plugin/iceberg/TestPartitionFields.java | 2 +- .../plugin/iceberg/TestSortFieldUtils.java | 2 +- .../iceberg/catalog/BaseTrinoCatalogTest.java | 10 ++-- .../catalog/glue/TestTrinoGlueCatalog.java | 2 +- .../catalog/rest/TestTrinoRestCatalog.java | 4 +- .../kafka/TestKafkaEventListenerConfig.java | 2 +- .../TestKinesisTableDescriptionSupplier.java | 4 +- .../plugin/kinesis/TestRecordAccess.java | 4 +- .../s3config/TestS3TableConfigClient.java | 2 +- .../plugin/memory/TestMemoryPagesStore.java | 4 +- .../plugin/pinot/TestPinotSplitManager.java | 2 +- ...seTestDbResourceGroupsFlywayMigration.java | 6 +- ...stDbResourceGroupConfigurationManager.java | 14 ++--- .../db/TestResourceGroupsDao.java | 6 +- .../db/TestDbSessionPropertyManager.java | 2 +- .../dateformat/TestDateFormatParser.java | 6 +- .../java/io/trino/verifier/TestShadowing.java | 16 ++--- .../verifier/TestVerifierRewriteQueries.java | 8 +-- .../io/trino/tests/product/TpcTestUtils.java | 2 +- ...TestIcebergFormatVersionCompatibility.java | 2 +- .../TestIcebergSparkCompatibility.java | 4 +- .../testing/AbstractTestAggregations.java | 4 +- .../AbstractTestEngineOnlyQueries.java | 42 +++++++------- .../io/trino/testing/AbstractTestQueries.java | 10 ++-- .../io/trino/testing/BaseConnectorTest.java | 4 +- .../execution/TestEventListenerBasic.java | 4 +- .../TestEventListenerWithSplits.java | 6 +- .../execution/TestPendingStageState.java | 2 +- .../trino/execution/TestStatementStats.java | 2 +- .../TestResourceGroupIntegration.java | 2 +- .../resourcegroups/db/TestQueuesDb.java | 2 +- .../io/trino/tests/TestMetadataManager.java | 6 +- .../trino/tests/TestMinWorkerRequirement.java | 10 ++-- 149 files changed, 587 insertions(+), 587 deletions(-) diff --git a/core/trino-main/src/test/java/io/trino/execution/BaseTestSqlTaskManager.java b/core/trino-main/src/test/java/io/trino/execution/BaseTestSqlTaskManager.java index 2fc4476fb9b9d3..10b09cd12e45d0 100644 --- a/core/trino-main/src/test/java/io/trino/execution/BaseTestSqlTaskManager.java +++ b/core/trino-main/src/test/java/io/trino/execution/BaseTestSqlTaskManager.java @@ -146,14 +146,14 @@ public void testSimpleQuery() BufferResult results = sqlTaskManager.getTaskResults(taskId, OUT, 0, DataSize.of(1, Unit.MEGABYTE)).getResultsFuture().get(); assertThat(results.isBufferComplete()).isFalse(); - assertThat(results.getSerializedPages().size()).isEqualTo(1); + assertThat(results.getSerializedPages()).hasSize(1); assertThat(getSerializedPagePositionCount(results.getSerializedPages().get(0))).isEqualTo(1); for (boolean moreResults = true; moreResults; moreResults = !results.isBufferComplete()) { results = sqlTaskManager.getTaskResults(taskId, OUT, results.getToken() + results.getSerializedPages().size(), DataSize.of(1, Unit.MEGABYTE)).getResultsFuture().get(); } assertThat(results.isBufferComplete()).isTrue(); - assertThat(results.getSerializedPages().size()).isEqualTo(0); + assertThat(results.getSerializedPages()).hasSize(0); // complete the task by calling destroy on it TaskInfo info = sqlTaskManager.destroyTaskResults(taskId, OUT); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestCommitTask.java b/core/trino-main/src/test/java/io/trino/execution/TestCommitTask.java index d95c9783e803eb..1e49a2a7150c49 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestCommitTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestCommitTask.java @@ -81,7 +81,7 @@ public void testCommit() .build(); QueryStateMachine stateMachine = createQueryStateMachine("COMMIT", session, transactionManager); assertThat(stateMachine.getSession().getTransactionId()).isPresent(); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); getFutureValue(new CommitTask(transactionManager).execute(new Commit(new NodeLocation(1, 1)), stateMachine, emptyList(), WarningCollector.NOOP)); assertThat(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId()).isTrue(); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestCreateTableTask.java b/core/trino-main/src/test/java/io/trino/execution/TestCreateTableTask.java index e672cdc1637f89..4d8ac85672a86f 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestCreateTableTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestCreateTableTask.java @@ -222,7 +222,7 @@ void testCreateWithNotNullColumns() getFutureValue(createTableTask.internalExecute(statement, transactionSession, emptyList(), output -> {})); assertThat(metadata.getCreateTableCallCount()).isEqualTo(1); List columns = metadata.getReceivedTableMetadata().get(0).getColumns(); - assertThat(columns.size()).isEqualTo(3); + assertThat(columns).hasSize(3); assertThat(columns.get(0).getName()).isEqualTo("a"); assertThat(columns.get(0).getType().getDisplayName().toUpperCase(ROOT)).isEqualTo("DATE"); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestNodeScheduler.java b/core/trino-main/src/test/java/io/trino/execution/TestNodeScheduler.java index 33be3b44747fdd..2393aa87958fed 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestNodeScheduler.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestNodeScheduler.java @@ -216,7 +216,7 @@ public void testTopologyAwareScheduling() } nonRackLocalSplits = Sets.difference(nonRackLocalSplits, new HashSet<>(assignments.values())); // Check that 3 of the splits were rejected, since they're non-local - assertThat(nonRackLocalSplits.size()).isEqualTo(3); + assertThat(nonRackLocalSplits).hasSize(3); // Assign rack-local splits ImmutableSet.Builder rackLocalSplits = ImmutableSet.builder(); @@ -246,7 +246,7 @@ public void testTopologyAwareScheduling() .build()); } unassigned = Sets.difference(unassigned, new HashSet<>(assignments.values())); - assertThat(unassigned.size()).isEqualTo(3); + assertThat(unassigned).hasSize(3); int rack1 = 0; int rack2 = 0; for (Split split : unassigned) { @@ -297,7 +297,7 @@ public void testBasicAssignment() splits.add(new Split(TEST_CATALOG_HANDLE, new TestSplitRemote())); } Multimap assignments = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(taskMap.values())).getAssignments(); - assertThat(assignments.entries().size()).isEqualTo(assignments.size()); + assertThat(assignments.entries()).hasSize(assignments.size()); for (InternalNode node : activeCatalogNodes) { assertThat(assignments.keySet()).contains(node); } @@ -356,7 +356,7 @@ public void testBasicAssignmentMaxUnacknowledgedSplitsPerTask() splits.add(new Split(TEST_CATALOG_HANDLE, new TestSplitRemote())); } Multimap assignments = nodeSelector.computeAssignments(splits, ImmutableList.copyOf(taskMap.values())).getAssignments(); - assertThat(assignments.entries().size()).isEqualTo(activeCatalogNodes.size()); + assertThat(assignments.entries()).hasSize(activeCatalogNodes.size()); for (InternalNode node : activeCatalogNodes) { assertThat(assignments.keySet()).contains(node); } @@ -399,7 +399,7 @@ public void testMaxSplitsPerNodePerTask() // no split should be assigned to the newNode, as it already has // maxSplitsPerNode + maxSplitsPerNodePerTask assigned to it - assertThat(assignments.keySet().size()).isEqualTo(3); // Splits should be scheduled on the other three nodes + assertThat(assignments.keySet()).hasSize(3); // Splits should be scheduled on the other three nodes assertThat(assignments.keySet().contains(newNode)).isFalse(); // No splits scheduled on the maxed out node for (RemoteTask task : tasks) { diff --git a/core/trino-main/src/test/java/io/trino/execution/TestPageSplitterUtil.java b/core/trino-main/src/test/java/io/trino/execution/TestPageSplitterUtil.java index f25c83c2e87bf2..bcfa8360c44ada 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestPageSplitterUtil.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestPageSplitterUtil.java @@ -87,7 +87,7 @@ public void testSplitPageNonDecreasingPageSize() // the page should only be split in half as the recursion should terminate // after seeing that the size of the Page doesn't decrease - assertThat(pages.size()).isEqualTo(2); + assertThat(pages).hasSize(2); Page first = pages.get(0); Page second = pages.get(1); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestQueryStats.java b/core/trino-main/src/test/java/io/trino/execution/TestQueryStats.java index 8a296fd77d5fcb..c389d31bc6805c 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestQueryStats.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestQueryStats.java @@ -375,7 +375,7 @@ public static void assertExpectedQueryStats(QueryStats actual) assertThat(actual.getPhysicalWrittenDataSize()).isEqualTo(DataSize.ofBytes(47)); assertThat(actual.getFailedPhysicalWrittenDataSize()).isEqualTo(DataSize.ofBytes(48)); - assertThat(actual.getStageGcStatistics().size()).isEqualTo(1); + assertThat(actual.getStageGcStatistics()).hasSize(1); StageGcStatistics gcStatistics = actual.getStageGcStatistics().get(0); assertThat(gcStatistics.getStageId()).isEqualTo(101); assertThat(gcStatistics.getTasks()).isEqualTo(102); @@ -389,7 +389,7 @@ public static void assertExpectedQueryStats(QueryStats actual) assertThat(58).isEqualTo(actual.getLogicalWrittenDataSize().toBytes()); assertThat(DynamicFiltersStats.EMPTY).isEqualTo(actual.getDynamicFiltersStats()); - assertThat(actual.getOptimizerRulesSummaries().size()).isEqualTo(optimizerRulesSummaries.size()); + assertThat(actual.getOptimizerRulesSummaries()).hasSize(optimizerRulesSummaries.size()); for (int i = 0, end = optimizerRulesSummaries.size(); i < end; i++) { QueryPlanOptimizerStatistics actualRule = actual.getOptimizerRulesSummaries().get(i); QueryPlanOptimizerStatistics expectedRule = optimizerRulesSummaries.get(i); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestRollbackTask.java b/core/trino-main/src/test/java/io/trino/execution/TestRollbackTask.java index e382207af9c847..8a6f94660185c9 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestRollbackTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestRollbackTask.java @@ -74,7 +74,7 @@ public void testRollback() .build(); QueryStateMachine stateMachine = createQueryStateMachine("ROLLBACK", session, transactionManager); assertThat(stateMachine.getSession().getTransactionId()).isPresent(); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); getFutureValue(new RollbackTask(transactionManager).execute(new Rollback(new NodeLocation(1, 1)), stateMachine, emptyList(), WarningCollector.NOOP)); assertThat(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId()).isTrue(); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestSqlTask.java b/core/trino-main/src/test/java/io/trino/execution/TestSqlTask.java index a9f325cf926ffd..b4af2f6b6f2b58 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestSqlTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestSqlTask.java @@ -192,13 +192,13 @@ public void testSimpleQuery() BufferResult results = sqlTask.getTaskResults(OUT, 0, DataSize.of(1, MEGABYTE)).get(); assertThat(results.isBufferComplete()).isFalse(); - assertThat(results.getSerializedPages().size()).isEqualTo(1); + assertThat(results.getSerializedPages()).hasSize(1); assertThat(getSerializedPagePositionCount(results.getSerializedPages().get(0))).isEqualTo(1); for (boolean moreResults = true; moreResults; moreResults = !results.isBufferComplete()) { results = sqlTask.getTaskResults(OUT, results.getToken() + results.getSerializedPages().size(), DataSize.of(1, MEGABYTE)).get(); } - assertThat(results.getSerializedPages().size()).isEqualTo(0); + assertThat(results.getSerializedPages()).hasSize(0); // complete the task by calling destroy on it TaskInfo info = sqlTask.destroyTaskResults(OUT); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestStageStateMachine.java b/core/trino-main/src/test/java/io/trino/execution/TestStageStateMachine.java index d74bc56d4cd4c1..b0f6114c08e409 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestStageStateMachine.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestStageStateMachine.java @@ -232,7 +232,7 @@ public void testGetBasicStageInfo() assertThat(stageInfo.getState()).isEqualTo(PLANNED); assertThat(stageInfo.isCoordinatorOnly()).isFalse(); assertThat(stageInfo.getSubStages()).isEmpty(); - assertThat(stageInfo.getTasks().size()).isEqualTo(taskInfos.size()); + assertThat(stageInfo.getTasks()).hasSize(taskInfos.size()); BasicStageStats stats = stageInfo.getStageStats(); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestStartTransactionTask.java b/core/trino-main/src/test/java/io/trino/execution/TestStartTransactionTask.java index 1964d23510d7e0..e6cd335f0f0ede 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestStartTransactionTask.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestStartTransactionTask.java @@ -137,7 +137,7 @@ public void testStartTransaction() getFutureValue(new StartTransactionTask(transactionManager).execute(new StartTransaction(new NodeLocation(1, 1), ImmutableList.of()), stateMachine, emptyList(), WarningCollector.NOOP)); assertThat(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId()).isFalse(); assertThat(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId()).isPresent(); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); TransactionInfo transactionInfo = transactionManager.getTransactionInfo(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().get()); assertThat(transactionInfo.isAutoCommitContext()).isFalse(); @@ -160,7 +160,7 @@ public void testStartTransactionExplicitModes() WarningCollector.NOOP)); assertThat(stateMachine.getQueryInfo(Optional.empty()).isClearTransactionId()).isFalse(); assertThat(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId()).isPresent(); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); TransactionInfo transactionInfo = transactionManager.getTransactionInfo(stateMachine.getQueryInfo(Optional.empty()).getStartedTransactionId().get()); assertThat(transactionInfo.getIsolationLevel()).isEqualTo(IsolationLevel.SERIALIZABLE); diff --git a/core/trino-main/src/test/java/io/trino/execution/TestTaskExecutorStuckSplits.java b/core/trino-main/src/test/java/io/trino/execution/TestTaskExecutorStuckSplits.java index 1e0f6e4853a49b..8f207aa261eb9e 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TestTaskExecutorStuckSplits.java +++ b/core/trino-main/src/test/java/io/trino/execution/TestTaskExecutorStuckSplits.java @@ -103,7 +103,7 @@ public void testFailStuckSplitTasks() mockSplitRunner.waitForFinish(); List taskInfos = sqlTaskManager.getAllTaskInfo(); - assertThat(taskInfos.size()).isEqualTo(1); + assertThat(taskInfos).hasSize(1); TaskInfo taskInfo = pollTerminatingTaskInfoUntilDone(sqlTaskManager, taskInfos.get(0)); assertThat(taskInfo.taskStatus().getState()).isEqualTo(TaskState.FAILED); diff --git a/core/trino-main/src/test/java/io/trino/execution/buffer/TestArbitraryOutputBuffer.java b/core/trino-main/src/test/java/io/trino/execution/buffer/TestArbitraryOutputBuffer.java index da9f80173c64fa..68a7904f7cf020 100644 --- a/core/trino-main/src/test/java/io/trino/execution/buffer/TestArbitraryOutputBuffer.java +++ b/core/trino-main/src/test/java/io/trino/execution/buffer/TestArbitraryOutputBuffer.java @@ -464,7 +464,7 @@ public void testResumeFromPreviousPosition() secondReads.add(buffer.get(completed, result.getNextToken(), sizeOfPages(1))); } // Test sanity - assertThat(secondReads.size()).isEqualTo(ids.length); + assertThat(secondReads).hasSize(ids.length); // Completion order should be identical to the first iteration at this point for (int i = 0; i < ids.length; i++) { diff --git a/core/trino-main/src/test/java/io/trino/execution/resourcegroups/TestResourceGroups.java b/core/trino-main/src/test/java/io/trino/execution/resourcegroups/TestResourceGroups.java index 18211ca8a463a0..e1099a81323125 100644 --- a/core/trino-main/src/test/java/io/trino/execution/resourcegroups/TestResourceGroups.java +++ b/core/trino-main/src/test/java/io/trino/execution/resourcegroups/TestResourceGroups.java @@ -1262,7 +1262,7 @@ public void testGetResourceGroupStateInfo() assertThat(rootInfo.memoryUsage()).isEqualTo(DataSize.ofBytes(0)); assertThat(rootInfo.cpuUsage().toMillis()).isEqualTo(0); List subGroups = rootInfo.subGroups().get(); - assertThat(subGroups.size()).isEqualTo(2); + assertThat(subGroups).hasSize(2); assertGroupInfoEquals(subGroups.get(0), rootA.getInfo()); assertThat(subGroups.get(0).id()).isEqualTo(rootA.getId()); assertThat(subGroups.get(0).state()).isEqualTo(CAN_QUEUE); @@ -1286,7 +1286,7 @@ public void testGetResourceGroupStateInfo() assertThat(rootInfo.maxQueuedQueries()).isEqualTo(root.getMaxQueuedQueries()); assertThat(rootInfo.numQueuedQueries()).isEqualTo(19); List runningQueries = rootInfo.runningQueries().get(); - assertThat(runningQueries.size()).isEqualTo(1); + assertThat(runningQueries).hasSize(1); QueryStateInfo queryInfo = runningQueries.get(0); assertThat(queryInfo.getResourceGroupId()).isEqualTo(Optional.of(rootB.getId())); } diff --git a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestFixedCountScheduler.java b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestFixedCountScheduler.java index 239f9334fcfd7f..c2d8d3e14fb014 100644 --- a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestFixedCountScheduler.java +++ b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestFixedCountScheduler.java @@ -77,7 +77,7 @@ public void testSingleNode() ScheduleResult result = nodeScheduler.schedule(); assertThat(result.isFinished()).isTrue(); assertThat(result.getBlocked().isDone()).isTrue(); - assertThat(result.getNewTasks().size()).isEqualTo(1); + assertThat(result.getNewTasks()).hasSize(1); assertThat(result.getNewTasks().iterator().next().getNodeId()).isEqualTo("other 0"); } @@ -94,8 +94,8 @@ public void testMultipleNodes() ScheduleResult result = nodeScheduler.schedule(); assertThat(result.isFinished()).isTrue(); assertThat(result.getBlocked().isDone()).isTrue(); - assertThat(result.getNewTasks().size()).isEqualTo(5); - assertThat(result.getNewTasks().stream().map(RemoteTask::getNodeId).collect(toImmutableSet()).size()).isEqualTo(5); + assertThat(result.getNewTasks()).hasSize(5); + assertThat(result.getNewTasks().stream().map(RemoteTask::getNodeId).collect(toImmutableSet())).hasSize(5); } private static List generateRandomNodes(int count) diff --git a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestMultiSourcePartitionedScheduler.java b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestMultiSourcePartitionedScheduler.java index 23161d0a4ac7e6..50c0f97fe0eaba 100644 --- a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestMultiSourcePartitionedScheduler.java +++ b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestMultiSourcePartitionedScheduler.java @@ -193,7 +193,7 @@ public void testScheduleSplitsBatchedNoBlocking() assertThat(scheduleResult.getBlocked().isDone()).isTrue(); // first three splits create new tasks - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(i == 0 ? 3 : 0); + assertThat(scheduleResult.getNewTasks()).hasSize(i == 0 ? 3 : 0); } for (RemoteTask remoteTask : stage.getAllTasks()) { @@ -221,12 +221,12 @@ public void testScheduleSplitsBatchedBlockingSplitSource() ScheduleResult scheduleResult = scheduler.schedule(); assertThat(scheduleResult.isFinished()).isFalse(); assertThat(scheduleResult.getBlocked().isDone()).isTrue(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(3); scheduleResult = scheduler.schedule(); assertThat(scheduleResult.isFinished()).isFalse(); assertThat(scheduleResult.getBlocked().isDone()).isFalse(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(0); + assertThat(scheduleResult.getNewTasks()).hasSize(0); assertThat(scheduleResult.getBlockedReason()).isEqualTo(Optional.of(WAITING_FOR_SOURCE)); blockingSplitSource.addSplits(2, true); @@ -234,7 +234,7 @@ public void testScheduleSplitsBatchedBlockingSplitSource() scheduleResult = scheduler.schedule(); assertThat(scheduleResult.getBlocked().isDone()).isTrue(); assertThat(scheduleResult.getSplitsScheduled()).isEqualTo(2); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(0); + assertThat(scheduleResult.getNewTasks()).hasSize(0); assertThat(scheduleResult.getBlockedReason()).isEqualTo(Optional.empty()); assertThat(scheduleResult.isFinished()).isTrue(); @@ -266,7 +266,7 @@ public void testScheduleSplitsTasksAreFull() assertThat(scheduleResult.getSplitsScheduled()).isEqualTo(300); assertThat(scheduleResult.isFinished()).isFalse(); assertThat(scheduleResult.getBlocked().isDone()).isFalse(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(3); assertThat(scheduleResult.getBlockedReason()).isEqualTo(Optional.of(SPLIT_QUEUES_FULL)); assertThat(stage.getAllTasks().stream().mapToInt(task -> task.getPartitionedSplitsInfo().getCount()).sum()).isEqualTo(300); @@ -301,8 +301,8 @@ public void testBalancedSplitAssignment() ScheduleResult scheduleResult = scheduler.schedule(); assertThat(scheduleResult.getBlocked().isDone()).isFalse(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(3); - assertThat(firstStage.getAllTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(3); + assertThat(firstStage.getAllTasks()).hasSize(3); for (RemoteTask remoteTask : firstStage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); // All splits were balanced between nodes @@ -321,8 +321,8 @@ public void testBalancedSplitAssignment() assertEffectivelyFinished(scheduleResult, scheduler); assertThat(scheduleResult.getBlocked().isDone()).isTrue(); assertThat(scheduleResult.isFinished()).isTrue(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(1); - assertThat(firstStage.getAllTasks().size()).isEqualTo(4); + assertThat(scheduleResult.getNewTasks()).hasSize(1); + assertThat(firstStage.getAllTasks()).hasSize(4); assertThat(firstStage.getAllTasks().get(0).getPartitionedSplitsInfo().getCount()).isEqualTo(5); assertThat(firstStage.getAllTasks().get(1).getPartitionedSplitsInfo().getCount()).isEqualTo(5); @@ -342,8 +342,8 @@ public void testBalancedSplitAssignment() assertEffectivelyFinished(scheduleResult, secondScheduler); assertThat(scheduleResult.getBlocked().isDone()).isTrue(); assertThat(scheduleResult.isFinished()).isTrue(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(4); - assertThat(secondStage.getAllTasks().size()).isEqualTo(4); + assertThat(scheduleResult.getNewTasks()).hasSize(4); + assertThat(secondStage.getAllTasks()).hasSize(4); for (RemoteTask task : secondStage.getAllTasks()) { assertThat(task.getPartitionedSplitsInfo().getCount()).isEqualTo(5); @@ -368,7 +368,7 @@ public void testScheduleEmptySources() ScheduleResult scheduleResult = scheduler.schedule(); // If both split sources produce no splits then internal schedulers add one split - it can be expected by some operators e.g. AggregationOperator - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(2); + assertThat(scheduleResult.getNewTasks()).hasSize(2); assertEffectivelyFinished(scheduleResult, scheduler); stage.abort(); @@ -406,7 +406,7 @@ public void testDynamicFiltersUnblockedOnBlockedBuildSource() // make sure dynamic filtering collecting task was created immediately assertThat(stage.getState()).isEqualTo(PLANNED); scheduler.start(); - assertThat(stage.getAllTasks().size()).isEqualTo(1); + assertThat(stage.getAllTasks()).hasSize(1); assertThat(stage.getState()).isEqualTo(SCHEDULING); // make sure dynamic filter is initially blocked @@ -442,7 +442,7 @@ public void testNoNewTaskScheduledWhenChildStageBufferIsOverUtilized() // the queues of 3 running nodes should be full ScheduleResult scheduleResult = scheduler.schedule(); assertThat(scheduleResult.getBlockedReason()).isEqualTo(Optional.of(SPLIT_QUEUES_FULL)); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(3); assertThat(scheduleResult.getSplitsScheduled()).isEqualTo(300); for (RemoteTask remoteTask : scheduleResult.getNewTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); @@ -453,7 +453,7 @@ public void testNoNewTaskScheduledWhenChildStageBufferIsOverUtilized() nodeManager.addNodes(new InternalNode("other4", URI.create("http://127.0.0.4:14"), NodeVersion.UNKNOWN, false)); scheduleResult = scheduler.schedule(); assertThat(scheduleResult.getBlockedReason()).isEqualTo(Optional.of(SPLIT_QUEUES_FULL)); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(0); + assertThat(scheduleResult.getNewTasks()).hasSize(0); assertThat(scheduleResult.getSplitsScheduled()).isEqualTo(0); } @@ -473,7 +473,7 @@ private static void assertEffectivelyFinished(ScheduleResult scheduleResult, Sta ScheduleResult nextScheduleResult = scheduler.schedule(); assertThat(nextScheduleResult.isFinished()).isTrue(); assertThat(nextScheduleResult.getBlocked().isDone()).isTrue(); - assertThat(nextScheduleResult.getNewTasks().size()).isEqualTo(0); + assertThat(nextScheduleResult.getNewTasks()).hasSize(0); assertThat(nextScheduleResult.getSplitsScheduled()).isEqualTo(0); } diff --git a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestPartitionedPipelinedOutputBufferManager.java b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestPartitionedPipelinedOutputBufferManager.java index 7ef9f4c3587908..fb4ef929b020e5 100644 --- a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestPartitionedPipelinedOutputBufferManager.java +++ b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestPartitionedPipelinedOutputBufferManager.java @@ -57,7 +57,7 @@ private static void assertOutputBuffers(PipelinedOutputBuffers outputBuffers) assertThat(outputBuffers.getVersion() > 0).isTrue(); assertThat(outputBuffers.isNoMoreBufferIds()).isTrue(); Map buffers = outputBuffers.getBuffers(); - assertThat(buffers.size()).isEqualTo(4); + assertThat(buffers).hasSize(4); for (int partition = 0; partition < 4; partition++) { assertThat(buffers).containsEntry(new OutputBufferId(partition), Integer.valueOf(partition)); } diff --git a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestScaledWriterScheduler.java b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestScaledWriterScheduler.java index 33d79c931c2f65..677f448203baef 100644 --- a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestScaledWriterScheduler.java +++ b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestScaledWriterScheduler.java @@ -77,7 +77,7 @@ public void testGetNewTaskCountWithUnderutilizedTasksWithoutSkewness() TaskStatus taskStatus3 = buildTaskStatus(false, 12345L); try (ScaledWriterScheduler scaledWriterScheduler = buildScaleWriterSchedulerWithInitialTasks(taskStatus1, taskStatus2, taskStatus3)) { - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(0); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(0); } } @@ -89,7 +89,7 @@ public void testGetNewTaskCountWithOverutilizedTasksWithoutSkewness() TaskStatus taskStatus3 = buildTaskStatus(false, 12345L); try (ScaledWriterScheduler scaledWriterScheduler = buildScaleWriterSchedulerWithInitialTasks(taskStatus1, taskStatus2, taskStatus3)) { - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(1); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(1); } } @@ -101,7 +101,7 @@ public void testGetNewTaskCountWithOverutilizedSkewedTaskAndUnderutilizedNonSkew TaskStatus taskStatus3 = buildTaskStatus(false, 123456L); try (ScaledWriterScheduler scaledWriterScheduler = buildScaleWriterSchedulerWithInitialTasks(taskStatus1, taskStatus2, taskStatus3)) { - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(1); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(1); } } @@ -113,7 +113,7 @@ public void testGetNewTaskCountWithUnderutilizedSkewedTaskAndOverutilizedNonSkew TaskStatus taskStatus3 = buildTaskStatus(false, 1234567L); try (ScaledWriterScheduler scaledWriterScheduler = buildScaleWriterSchedulerWithInitialTasks(taskStatus1, taskStatus2, taskStatus3)) { - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(1); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(1); } } @@ -126,7 +126,7 @@ public void testGetNewTaskCountWhenWriterDataProcessedIsGreaterThanMinForScaleUp try (ScaledWriterScheduler scaledWriterScheduler = buildScaleWriterSchedulerWithInitialTasks(taskStatus1, taskStatus2, taskStatus3)) { // Scale up will happen - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(1); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(1); } } @@ -140,7 +140,7 @@ public void testGetNewTaskCountWhenWriterDataProcessedIsLessThanMinForScaleUp() try (ScaledWriterScheduler scaledWriterScheduler = buildScaleWriterSchedulerWithInitialTasks(taskStatus1, taskStatus2, taskStatus3)) { // Scale up will not happen because for one of the task there are two local writers which makes the // minWrittenBytes for scaling up to (2 * writerScalingMinDataProcessed) that is greater than writerInputDataSize. - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(0); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(0); } } @@ -153,7 +153,7 @@ public void testGetNewTaskCountWhenExistingWriterTaskMaxWriterCountIsEmpty() try (ScaledWriterScheduler scaledWriterScheduler = buildScaleWriterSchedulerWithInitialTasks(taskStatus1, taskStatus2, taskStatus3)) { // Scale up will not happen because one of the existing writer task isn't initialized yet with maxWriterCount. - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(0); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(0); } } @@ -164,7 +164,7 @@ public void testNewTaskCountWhenNodesUpperLimitIsNotExceeded() AtomicReference> taskStatusProvider = new AtomicReference<>(ImmutableList.of(taskStatus)); try (ScaledWriterScheduler scaledWriterScheduler = buildScaledWriterScheduler(taskStatusProvider, 2)) { scaledWriterScheduler.schedule(); - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(1); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(1); } } @@ -175,7 +175,7 @@ public void testNewTaskCountWhenNodesUpperLimitIsExceeded() AtomicReference> taskStatusProvider = new AtomicReference<>(ImmutableList.of(taskStatus)); try (ScaledWriterScheduler scaledWriterScheduler = buildScaledWriterScheduler(taskStatusProvider, 1)) { scaledWriterScheduler.schedule(); - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(0); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(0); } } @@ -184,13 +184,13 @@ private ScaledWriterScheduler buildScaleWriterSchedulerWithInitialTasks(TaskStat AtomicReference> taskStatusProvider = new AtomicReference<>(ImmutableList.of()); ScaledWriterScheduler scaledWriterScheduler = buildScaledWriterScheduler(taskStatusProvider, 100); - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(1); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(1); taskStatusProvider.set(ImmutableList.of(taskStatus1)); - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(1); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(1); taskStatusProvider.set(ImmutableList.of(taskStatus1, taskStatus2)); - assertThat(scaledWriterScheduler.schedule().getNewTasks().size()).isEqualTo(1); + assertThat(scaledWriterScheduler.schedule().getNewTasks()).hasSize(1); taskStatusProvider.set(ImmutableList.of(taskStatus1, taskStatus2, taskStatus3)); return scaledWriterScheduler; diff --git a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestSourcePartitionedScheduler.java b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestSourcePartitionedScheduler.java index dbbb58dde43490..6e5fda0c0cf93c 100644 --- a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestSourcePartitionedScheduler.java +++ b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestSourcePartitionedScheduler.java @@ -165,7 +165,7 @@ public void testScheduleNoSplits() ScheduleResult scheduleResult = scheduler.schedule(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(1); + assertThat(scheduleResult.getNewTasks()).hasSize(1); assertEffectivelyFinished(scheduleResult, scheduler); stage.abort(); @@ -181,13 +181,13 @@ public void testDoesNotScheduleEmptySplit() ConnectorSplitSource splitSource = createFixedSplitSource(2, TestingSplit::createRemoteSplit); StageScheduler scheduler = getSourcePartitionedScheduler(splitSource, stage, nodeManager, nodeTaskMap, 1, STAGE); - assertThat(scheduler.schedule().getNewTasks().size()).isEqualTo(1); + assertThat(scheduler.schedule().getNewTasks()).hasSize(1); // ensure that next batch size fetched by scheduler will be empty and last splitSource.getNextBatch(1); ScheduleResult scheduleResult = scheduler.schedule(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(0); + assertThat(scheduleResult.getNewTasks()).hasSize(0); assertEffectivelyFinished(scheduleResult, scheduler); @@ -218,8 +218,8 @@ public void testScheduleSplitsOneAtATime() assertThat(scheduleResult.getBlocked().isDone()).isTrue(); // first three splits create new tasks - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(i < 3 ? 1 : 0); - assertThat(stage.getAllTasks().size()).isEqualTo(i < 3 ? i + 1 : 3); + assertThat(scheduleResult.getNewTasks()).hasSize(i < 3 ? 1 : 0); + assertThat(stage.getAllTasks()).hasSize(i < 3 ? i + 1 : 3); assertPartitionedSplitCount(stage, min(i + 1, 60)); } @@ -256,8 +256,8 @@ public void testScheduleSplitsBatched() assertThat(scheduleResult.getBlocked().isDone()).isTrue(); // first three splits create new tasks - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(i == 0 ? 3 : 0); - assertThat(stage.getAllTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(i == 0 ? 3 : 0); + assertThat(stage.getAllTasks()).hasSize(3); assertPartitionedSplitCount(stage, min((i + 1) * 7, 60)); } @@ -289,8 +289,8 @@ public void testScheduleSplitsBlock() assertThat(scheduleResult.getBlocked().isDone()).isEqualTo(i != 60); // first three splits create new tasks - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(i < 3 ? 1 : 0); - assertThat(stage.getAllTasks().size()).isEqualTo(i < 3 ? i + 1 : 3); + assertThat(scheduleResult.getNewTasks()).hasSize(i < 3 ? 1 : 0); + assertThat(stage.getAllTasks()).hasSize(i < 3 ? i + 1 : 3); assertPartitionedSplitCount(stage, min(i + 1, 60)); } @@ -321,8 +321,8 @@ public void testScheduleSplitsBlock() assertThat(scheduleResult.getBlocked().isDone()).isTrue(); // no additional tasks will be created - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(0); - assertThat(stage.getAllTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(0); + assertThat(stage.getAllTasks()).hasSize(3); // we dropped 20 splits so start at 40 and count to 60 assertPartitionedSplitCount(stage, min(i + 41, 60)); @@ -350,8 +350,8 @@ public void testScheduleSlowSplitSource() ScheduleResult scheduleResult = scheduler.schedule(); assertThat(scheduleResult.isFinished()).isFalse(); assertThat(scheduleResult.getBlocked().isDone()).isFalse(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(0); - assertThat(stage.getAllTasks().size()).isEqualTo(0); + assertThat(scheduleResult.getNewTasks()).hasSize(0); + assertThat(stage.getAllTasks()).hasSize(0); queuedSplitSource.addSplits(1); assertThat(scheduleResult.getBlocked().isDone()).isTrue(); @@ -399,8 +399,8 @@ public void testWorkerBalancedSplitAssignment() ScheduleResult scheduleResult = firstScheduler.schedule(); assertEffectivelyFinished(scheduleResult, firstScheduler); assertThat(scheduleResult.getBlocked().isDone()).isTrue(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(3); - assertThat(firstStage.getAllTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(3); + assertThat(firstStage.getAllTasks()).hasSize(3); for (RemoteTask remoteTask : firstStage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); assertThat(splitsInfo.getCount()).isEqualTo(5); @@ -418,8 +418,8 @@ public void testWorkerBalancedSplitAssignment() scheduleResult = secondScheduler.schedule(); assertEffectivelyFinished(scheduleResult, secondScheduler); assertThat(scheduleResult.getBlocked().isDone()).isTrue(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(1); - assertThat(secondStage.getAllTasks().size()).isEqualTo(1); + assertThat(scheduleResult.getNewTasks()).hasSize(1); + assertThat(secondStage.getAllTasks()).hasSize(1); RemoteTask task = secondStage.getAllTasks().get(0); assertThat(task.getPartitionedSplitsInfo().getCount()).isEqualTo(5); @@ -446,8 +446,8 @@ public void testStageBalancedSplitAssignment() ScheduleResult scheduleResult = firstScheduler.schedule(); assertThat(scheduleResult.getBlocked().isDone()).isTrue(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(3); - assertThat(firstStage.getAllTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(3); + assertThat(firstStage.getAllTasks()).hasSize(3); for (RemoteTask remoteTask : firstStage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); assertThat(splitsInfo.getCount()).isEqualTo(5); @@ -463,8 +463,8 @@ public void testStageBalancedSplitAssignment() scheduleResult = firstScheduler.schedule(); assertEffectivelyFinished(scheduleResult, firstScheduler); assertThat(scheduleResult.getBlocked().isDone()).isTrue(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(1); - assertThat(firstStage.getAllTasks().size()).isEqualTo(4); + assertThat(scheduleResult.getNewTasks()).hasSize(1); + assertThat(firstStage.getAllTasks()).hasSize(4); for (RemoteTask remoteTask : firstStage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); assertThat(splitsInfo.getCount()).isEqualTo(5); @@ -481,7 +481,7 @@ public void testStageBalancedSplitAssignment() scheduleResult = secondScheduler.schedule(); assertEffectivelyFinished(scheduleResult, secondScheduler); - assertThat(secondStage.getAllTasks().size()).isEqualTo(5); + assertThat(secondStage.getAllTasks()).hasSize(5); for (RemoteTask remoteTask : secondStage.getAllTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); assertThat(splitsInfo.getCount()).isEqualTo(1); @@ -519,7 +519,7 @@ public void testNewTaskScheduledWhenChildStageBufferIsUnderutilized() // the queues of 3 running nodes should be full ScheduleResult scheduleResult = scheduler.schedule(); assertThat(scheduleResult.getBlockedReason().get()).isEqualTo(SPLIT_QUEUES_FULL); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(3); assertThat(scheduleResult.getSplitsScheduled()).isEqualTo(3 * 256); for (RemoteTask remoteTask : scheduleResult.getNewTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); @@ -529,7 +529,7 @@ public void testNewTaskScheduledWhenChildStageBufferIsUnderutilized() // new node added - the pending splits should go to it since the child tasks are not blocked nodeManager.addNodes(new InternalNode("other4", URI.create("http://127.0.0.4:14"), NodeVersion.UNKNOWN, false)); scheduleResult = scheduler.schedule(); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(1); + assertThat(scheduleResult.getNewTasks()).hasSize(1); assertThat(scheduleResult.getBlockedReason().get()).isEqualTo(SPLIT_QUEUES_FULL); // split queue is full but still the source task creation isn't blocked assertThat(scheduleResult.getSplitsScheduled()).isEqualTo(256); } @@ -562,7 +562,7 @@ public void testNoNewTaskScheduledWhenChildStageBufferIsOverutilized() // the queues of 3 running nodes should be full ScheduleResult scheduleResult = scheduler.schedule(); assertThat(scheduleResult.getBlockedReason().get()).isEqualTo(SPLIT_QUEUES_FULL); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(3); + assertThat(scheduleResult.getNewTasks()).hasSize(3); assertThat(scheduleResult.getSplitsScheduled()).isEqualTo(768); for (RemoteTask remoteTask : scheduleResult.getNewTasks()) { PartitionedSplitsInfo splitsInfo = remoteTask.getPartitionedSplitsInfo(); @@ -573,7 +573,7 @@ public void testNoNewTaskScheduledWhenChildStageBufferIsOverutilized() nodeManager.addNodes(new InternalNode("other4", URI.create("http://127.0.0.4:14"), NodeVersion.UNKNOWN, false)); scheduleResult = scheduler.schedule(); assertThat(scheduleResult.getBlockedReason().get()).isEqualTo(SPLIT_QUEUES_FULL); - assertThat(scheduleResult.getNewTasks().size()).isEqualTo(0); + assertThat(scheduleResult.getNewTasks()).hasSize(0); assertThat(scheduleResult.getSplitsScheduled()).isEqualTo(0); } @@ -611,7 +611,7 @@ public void testDynamicFiltersUnblockedOnBlockedBuildSource() // make sure dynamic filtering collecting task was created immediately assertThat(stage.getState()).isEqualTo(PLANNED); scheduler.start(); - assertThat(stage.getAllTasks().size()).isEqualTo(1); + assertThat(stage.getAllTasks()).hasSize(1); assertThat(stage.getState()).isEqualTo(SCHEDULING); // make sure dynamic filter is initially blocked @@ -641,7 +641,7 @@ private static void assertEffectivelyFinished(ScheduleResult scheduleResult, Sta ScheduleResult nextScheduleResult = scheduler.schedule(); assertThat(nextScheduleResult.isFinished()).isTrue(); assertThat(nextScheduleResult.getBlocked().isDone()).isTrue(); - assertThat(nextScheduleResult.getNewTasks().size()).isEqualTo(0); + assertThat(nextScheduleResult.getNewTasks()).hasSize(0); assertThat(nextScheduleResult.getSplitsScheduled()).isEqualTo(0); } diff --git a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestUniformNodeSelector.java b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestUniformNodeSelector.java index 2d4af1f754bae2..7d2cae31ddcc08 100644 --- a/core/trino-main/src/test/java/io/trino/execution/scheduler/TestUniformNodeSelector.java +++ b/core/trino-main/src/test/java/io/trino/execution/scheduler/TestUniformNodeSelector.java @@ -158,7 +158,7 @@ public void testQueueSizeAdjustmentScaleDown() taskMap.put(node, remoteTask); } Set unassignedSplits = Sets.difference(splits, new HashSet<>(assignments1.values())); - assertThat(unassignedSplits.size()).isEqualTo(18); + assertThat(unassignedSplits).hasSize(18); // It's possible to add new assignments because split queue was upscaled Multimap assignments2 = nodeSelector.computeAssignments(unassignedSplits, ImmutableList.copyOf(taskMap.values())).getAssignments(); assertThat(assignments2.size()).isEqualTo(2); @@ -206,7 +206,7 @@ public void testQueueSizeAdjustmentAllNodes() taskMap.put(node, remoteTask); } Set unassignedSplits = Sets.difference(splits, new HashSet<>(assignments1.values())); - assertThat(unassignedSplits.size()).isEqualTo(140); + assertThat(unassignedSplits).hasSize(140); // assign splits, mark all splits running to trigger adjustment Multimap assignments2 = nodeSelector.computeAssignments(unassignedSplits, ImmutableList.copyOf(taskMap.values())).getAssignments(); @@ -218,7 +218,7 @@ public void testQueueSizeAdjustmentAllNodes() remoteTask.startSplits(remoteTask.getPartitionedSplitsInfo().getCount()); // mark all task running } unassignedSplits = Sets.difference(unassignedSplits, new HashSet<>(assignments2.values())); - assertThat(unassignedSplits.size()).isEqualTo(100); // 140 (unassigned splits) - (2 (queue size adjustment) * 10 (minPendingSplitsPerTask)) * 2 (nodes) + assertThat(unassignedSplits).hasSize(100); // 140 (unassigned splits) - (2 (queue size adjustment) * 10 (minPendingSplitsPerTask)) * 2 (nodes) // assign splits without setting all splits running Multimap assignments3 = nodeSelector.computeAssignments(unassignedSplits, ImmutableList.copyOf(taskMap.values())).getAssignments(); @@ -229,12 +229,12 @@ public void testQueueSizeAdjustmentAllNodes() .build()); } unassignedSplits = Sets.difference(unassignedSplits, new HashSet<>(assignments3.values())); - assertThat(unassignedSplits.size()).isEqualTo(20); // 100 (unassigned splits) - (4 (queue size adjustment) * 10 (minPendingSplitsPerTask)) * 2 (nodes) + assertThat(unassignedSplits).hasSize(20); // 100 (unassigned splits) - (4 (queue size adjustment) * 10 (minPendingSplitsPerTask)) * 2 (nodes) // compute assignments with exhausted nodes Multimap assignments4 = nodeSelector.computeAssignments(unassignedSplits, ImmutableList.copyOf(taskMap.values())).getAssignments(); unassignedSplits = Sets.difference(unassignedSplits, new HashSet<>(assignments4.values())); - assertThat(unassignedSplits.size()).isEqualTo(20); // no new split assignments, queued are more than 0 + assertThat(unassignedSplits).hasSize(20); // no new split assignments, queued are more than 0 } @Test @@ -260,7 +260,7 @@ public void testQueueSizeAdjustmentOneOfAll() taskMap.put(node, remoteTask); } Set unassignedSplits = Sets.difference(splits, new HashSet<>(assignments1.values())); - assertThat(unassignedSplits.size()).isEqualTo(140); + assertThat(unassignedSplits).hasSize(140); // assign splits, mark all splits for node1 running to trigger adjustment Multimap assignments2 = nodeSelector.computeAssignments(unassignedSplits, ImmutableList.copyOf(taskMap.values())).getAssignments(); @@ -274,8 +274,8 @@ public void testQueueSizeAdjustmentOneOfAll() } } unassignedSplits = Sets.difference(unassignedSplits, new HashSet<>(assignments2.values())); - assertThat(unassignedSplits.size()).isEqualTo(120); - assertThat(assignments2.get(node1).size()).isEqualTo(20); // 2x max pending + assertThat(unassignedSplits).hasSize(120); + assertThat(assignments2.get(node1)).hasSize(20); // 2x max pending assertThat(assignments2.containsKey(node2)).isFalse(); // assign splits, mark all splits for node1 running to trigger adjustment @@ -290,8 +290,8 @@ public void testQueueSizeAdjustmentOneOfAll() } } unassignedSplits = Sets.difference(unassignedSplits, new HashSet<>(assignments3.values())); - assertThat(unassignedSplits.size()).isEqualTo(80); - assertThat(assignments3.get(node1).size()).isEqualTo(40); // 4x max pending + assertThat(unassignedSplits).hasSize(80); + assertThat(assignments3.get(node1)).hasSize(40); // 4x max pending assertThat(assignments2.containsKey(node2)).isFalse(); } diff --git a/core/trino-main/src/test/java/io/trino/execution/warnings/TestDefaultWarningCollector.java b/core/trino-main/src/test/java/io/trino/execution/warnings/TestDefaultWarningCollector.java index 5a5c82cb43e4a0..8a9ebae1354692 100644 --- a/core/trino-main/src/test/java/io/trino/execution/warnings/TestDefaultWarningCollector.java +++ b/core/trino-main/src/test/java/io/trino/execution/warnings/TestDefaultWarningCollector.java @@ -26,7 +26,7 @@ public void testNoWarnings() { WarningCollector warningCollector = new DefaultWarningCollector(new WarningCollectorConfig().setMaxWarnings(0)); warningCollector.add(new TrinoWarning(new WarningCode(1, "1"), "warning 1")); - assertThat(warningCollector.getWarnings().size()).isEqualTo(0); + assertThat(warningCollector.getWarnings()).hasSize(0); } @Test @@ -36,6 +36,6 @@ public void testMaxWarnings() warningCollector.add(new TrinoWarning(new WarningCode(1, "1"), "warning 1")); warningCollector.add(new TrinoWarning(new WarningCode(2, "2"), "warning 2")); warningCollector.add(new TrinoWarning(new WarningCode(3, "3"), "warning 3")); - assertThat(warningCollector.getWarnings().size()).isEqualTo(2); + assertThat(warningCollector.getWarnings()).hasSize(2); } } diff --git a/core/trino-main/src/test/java/io/trino/failuredetector/TestHeartbeatFailureDetector.java b/core/trino-main/src/test/java/io/trino/failuredetector/TestHeartbeatFailureDetector.java index 2ef5824f44e978..dc98f687816442 100644 --- a/core/trino-main/src/test/java/io/trino/failuredetector/TestHeartbeatFailureDetector.java +++ b/core/trino-main/src/test/java/io/trino/failuredetector/TestHeartbeatFailureDetector.java @@ -74,7 +74,7 @@ public void testExcludesCurrentNode() .initialize(); ServiceSelector selector = injector.getInstance(Key.get(ServiceSelector.class, serviceType("trino"))); - assertThat(selector.selectAllServices().size()).isEqualTo(1); + assertThat(selector.selectAllServices()).hasSize(1); HeartbeatFailureDetector detector = injector.getInstance(HeartbeatFailureDetector.class); detector.updateMonitoredServices(); diff --git a/core/trino-main/src/test/java/io/trino/memory/TestMemoryPools.java b/core/trino-main/src/test/java/io/trino/memory/TestMemoryPools.java index f7bfe6194f0d10..1a06a26aacf7d2 100644 --- a/core/trino-main/src/test/java/io/trino/memory/TestMemoryPools.java +++ b/core/trino-main/src/test/java/io/trino/memory/TestMemoryPools.java @@ -212,7 +212,7 @@ void testTaggedAllocations() // free all for test_tag2 testPool.free(testTask, "test_tag2", 20); - assertThat(testPool.getTaggedMemoryAllocations().size()).isEqualTo(0); + assertThat(testPool.getTaggedMemoryAllocations()).hasSize(0); } @Test diff --git a/core/trino-main/src/test/java/io/trino/metadata/TestDiscoveryNodeManager.java b/core/trino-main/src/test/java/io/trino/metadata/TestDiscoveryNodeManager.java index 563ab3b87233d9..9668670ccbc1f7 100644 --- a/core/trino-main/src/test/java/io/trino/metadata/TestDiscoveryNodeManager.java +++ b/core/trino-main/src/test/java/io/trino/metadata/TestDiscoveryNodeManager.java @@ -110,7 +110,7 @@ public void testGetAllNodes() AllNodes allNodes = manager.getAllNodes(); Set connectorNodes = manager.getActiveCatalogNodes(GlobalSystemConnector.CATALOG_HANDLE); - assertThat(connectorNodes.size()).isEqualTo(4); + assertThat(connectorNodes).hasSize(4); assertThat(connectorNodes.stream().anyMatch(InternalNode::isCoordinator)).isTrue(); Set activeNodes = allNodes.getActiveNodes(); diff --git a/core/trino-main/src/test/java/io/trino/metadata/TestPolymorphicScalarFunction.java b/core/trino-main/src/test/java/io/trino/metadata/TestPolymorphicScalarFunction.java index bf161249d6664c..7c716b617a69db 100644 --- a/core/trino-main/src/test/java/io/trino/metadata/TestPolymorphicScalarFunction.java +++ b/core/trino-main/src/test/java/io/trino/metadata/TestPolymorphicScalarFunction.java @@ -109,7 +109,7 @@ public void testSelectsMultipleChoiceWithBlockPosition() shortDecimalBoundSignature, new InternalFunctionDependencies(FUNCTION_MANAGER::getScalarFunctionImplementation, ImmutableMap.of(), ImmutableSet.of())); - assertThat(specializedFunction.getChoices().size()).isEqualTo(2); + assertThat(specializedFunction.getChoices()).hasSize(2); assertThat(specializedFunction.getChoices().get(0).getInvocationConvention()).isEqualTo(new InvocationConvention(ImmutableList.of(NULL_FLAG, NULL_FLAG), FAIL_ON_NULL, false, false)); assertThat(specializedFunction.getChoices().get(1).getInvocationConvention()).isEqualTo(new InvocationConvention(ImmutableList.of(BLOCK_POSITION, BLOCK_POSITION), FAIL_ON_NULL, false, false)); Block block1 = new LongArrayBlock(0, Optional.empty(), new long[0]); diff --git a/core/trino-main/src/test/java/io/trino/operator/AnnotationEngineAssertions.java b/core/trino-main/src/test/java/io/trino/operator/AnnotationEngineAssertions.java index 905929ce6562f6..571f7580048622 100644 --- a/core/trino-main/src/test/java/io/trino/operator/AnnotationEngineAssertions.java +++ b/core/trino-main/src/test/java/io/trino/operator/AnnotationEngineAssertions.java @@ -29,15 +29,15 @@ public static void assertImplementationCount(ParametricScalar scalar, int exact, public static void assertImplementationCount(ParametricImplementationsGroup implementations, int exact, int specialized, int generic) { - assertThat(implementations.getExactImplementations().size()).isEqualTo(exact); - assertThat(implementations.getSpecializedImplementations().size()).isEqualTo(specialized); - assertThat(implementations.getGenericImplementations().size()).isEqualTo(generic); + assertThat(implementations.getExactImplementations()).hasSize(exact); + assertThat(implementations.getSpecializedImplementations()).hasSize(specialized); + assertThat(implementations.getGenericImplementations()).hasSize(generic); } public static void assertDependencyCount(ParametricAggregationImplementation implementation, int input, int combine, int output) { - assertThat(implementation.getInputDependencies().size()).isEqualTo(input); - assertThat(implementation.getCombineDependencies().size()).isEqualTo(combine); - assertThat(implementation.getOutputDependencies().size()).isEqualTo(output); + assertThat(implementation.getInputDependencies()).hasSize(input); + assertThat(implementation.getCombineDependencies()).hasSize(combine); + assertThat(implementation.getOutputDependencies()).hasSize(output); } } diff --git a/core/trino-main/src/test/java/io/trino/operator/OperatorAssertion.java b/core/trino-main/src/test/java/io/trino/operator/OperatorAssertion.java index 639c5d4d961d33..8e9d4c4a6fda15 100644 --- a/core/trino-main/src/test/java/io/trino/operator/OperatorAssertion.java +++ b/core/trino-main/src/test/java/io/trino/operator/OperatorAssertion.java @@ -211,7 +211,7 @@ public static SqlRow toRow(List parameterTypes, Object... values) public static void assertOperatorEquals(OperatorFactory operatorFactory, List types, DriverContext driverContext, List input, List expected) { List actual = toPages(operatorFactory, driverContext, input); - assertThat(actual.size()).isEqualTo(expected.size()); + assertThat(actual).hasSize(expected.size()); for (int i = 0; i < actual.size(); i++) { assertPageEquals(types, actual.get(i), expected.get(i)); } diff --git a/core/trino-main/src/test/java/io/trino/operator/PageAssertions.java b/core/trino-main/src/test/java/io/trino/operator/PageAssertions.java index 31af1bd63b326b..c5783cb089205f 100644 --- a/core/trino-main/src/test/java/io/trino/operator/PageAssertions.java +++ b/core/trino-main/src/test/java/io/trino/operator/PageAssertions.java @@ -27,7 +27,7 @@ private PageAssertions() {} public static void assertPageEquals(List types, Page actualPage, Page expectedPage) { - assertThat(types.size()).isEqualTo(actualPage.getChannelCount()); + assertThat(types).hasSize(actualPage.getChannelCount()); assertThat(actualPage.getChannelCount()).isEqualTo(expectedPage.getChannelCount()); assertThat(actualPage.getPositionCount()).isEqualTo(expectedPage.getPositionCount()); for (int i = 0; i < actualPage.getChannelCount(); i++) { diff --git a/core/trino-main/src/test/java/io/trino/operator/TestAnnotationEngineForAggregates.java b/core/trino-main/src/test/java/io/trino/operator/TestAnnotationEngineForAggregates.java index eb432ea4ef7415..00465ba3fd85d2 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestAnnotationEngineForAggregates.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestAnnotationEngineForAggregates.java @@ -667,7 +667,7 @@ public void testMultiOutputAggregationParse() .build(); List aggregations = parseFunctionDefinitions(MultiOutputAggregationFunction.class); - assertThat(aggregations.size()).isEqualTo(2); + assertThat(aggregations).hasSize(2); ParametricAggregation aggregation1 = aggregations.stream().filter(aggregate -> aggregate.getFunctionMetadata().getCanonicalName().equals("multi_output_aggregate_1")).collect(toImmutableList()).get(0); assertThat(aggregation1.getFunctionMetadata().getSignature()).isEqualTo(expectedSignature1); @@ -817,7 +817,7 @@ public void testInjectTypeAggregateParse() assertThat(aggregation.getFunctionMetadata().getSignature()).isEqualTo(expectedSignature); ParametricImplementationsGroup implementations = aggregation.getImplementations(); - assertThat(implementations.getGenericImplementations().size()).isEqualTo(1); + assertThat(implementations.getGenericImplementations()).hasSize(1); ParametricAggregationImplementation implementation = implementations.getGenericImplementations().get(0); assertThat(implementation.getDefinitionClass()).isEqualTo(InjectTypeAggregateFunction.class); assertDependencyCount(implementation, 1, 1, 1); @@ -880,7 +880,7 @@ public void testInjectLiteralAggregateParse() assertThat(aggregation.getFunctionMetadata().getSignature()).isEqualTo(expectedSignature); ParametricImplementationsGroup implementations = aggregation.getImplementations(); - assertThat(implementations.getGenericImplementations().size()).isEqualTo(1); + assertThat(implementations.getGenericImplementations()).hasSize(1); ParametricAggregationImplementation implementation = implementations.getGenericImplementations().get(0); assertThat(implementation.getDefinitionClass()).isEqualTo(InjectLiteralAggregateFunction.class); assertDependencyCount(implementation, 1, 1, 1); @@ -947,7 +947,7 @@ public void testLongConstraintAggregateFunctionParse() assertThat(aggregation.getFunctionMetadata().getSignature()).isEqualTo(expectedSignature); ParametricImplementationsGroup implementations = aggregation.getImplementations(); - assertThat(implementations.getGenericImplementations().size()).isEqualTo(1); + assertThat(implementations.getGenericImplementations()).hasSize(1); ParametricAggregationImplementation implementation = implementations.getGenericImplementations().get(0); assertThat(implementation.getDefinitionClass()).isEqualTo(LongConstraintAggregateFunction.class); assertDependencyCount(implementation, 0, 0, 0); @@ -1133,13 +1133,13 @@ public static void output(@AggregationState TriStateBooleanState state, BlockBui public void testAggregateFunctionGetCanonicalName() { List aggregationFunctions = parseFunctionDefinitions(AggregationOutputFunctionWithAlias.class); - assertThat(aggregationFunctions.size()).isEqualTo(1); + assertThat(aggregationFunctions).hasSize(1); ParametricAggregation aggregation = getOnlyElement(aggregationFunctions); assertThat(aggregation.getFunctionMetadata().getCanonicalName()).isEqualTo("aggregation_output"); assertThat(aggregation.getFunctionMetadata().getNames()).containsExactlyInAnyOrder("aggregation_output", "aggregation_output_alias_1", "aggregation_output_alias_2"); aggregationFunctions = parseFunctionDefinitions(AggregationFunctionWithAlias.class); - assertThat(aggregationFunctions.size()).isEqualTo(1); + assertThat(aggregationFunctions).hasSize(1); aggregation = getOnlyElement(aggregationFunctions); assertThat(aggregation.getFunctionMetadata().getCanonicalName()).isEqualTo("aggregation"); assertThat(aggregation.getFunctionMetadata().getNames()).containsExactlyInAnyOrder("aggregation", "aggregation_alias_1", "aggregation_alias_2"); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestAnnotationEngineForScalars.java b/core/trino-main/src/test/java/io/trino/operator/TestAnnotationEngineForScalars.java index d5fa5fdec3a8a2..134da275ecbb7a 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestAnnotationEngineForScalars.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestAnnotationEngineForScalars.java @@ -83,7 +83,7 @@ public void testSingleImplementationScalarParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(SingleImplementationScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); FunctionMetadata functionMetadata = scalar.getFunctionMetadata(); @@ -117,7 +117,7 @@ public static double fun(@SqlType(StandardTypes.DOUBLE) double v) public void testHiddenScalarParse() { List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(HiddenScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); FunctionMetadata functionMetadata = scalar.getFunctionMetadata(); @@ -140,7 +140,7 @@ public static double fun(@SqlType(StandardTypes.DOUBLE) double v) public void testNonDeterministicScalarParse() { List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(NonDeterministicScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); FunctionMetadata functionMetadata = scalar.getFunctionMetadata(); @@ -172,7 +172,7 @@ public void testWithNullablePrimitiveArgScalarParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(WithNullablePrimitiveArgScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); FunctionMetadata functionMetadata = scalar.getFunctionMetadata(); @@ -213,7 +213,7 @@ public void testWithNullableComplexArgScalarParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(WithNullableComplexArgScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); FunctionMetadata functionMetadata = scalar.getFunctionMetadata(); @@ -251,7 +251,7 @@ public void testStaticMethodScalarParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinitions(StaticMethodScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); FunctionMetadata functionMetadata = scalar.getFunctionMetadata(); @@ -294,7 +294,7 @@ public void testMultiScalarParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinitions(MultiScalarFunction.class); - assertThat(functions.size()).isEqualTo(2); + assertThat(functions).hasSize(2); ParametricScalar scalar1 = (ParametricScalar) functions.stream().filter(function -> function.getFunctionMetadata().getSignature().equals(expectedSignature1)).collect(toImmutableList()).get(0); ParametricScalar scalar2 = (ParametricScalar) functions.stream().filter(function -> function.getFunctionMetadata().getSignature().equals(expectedSignature2)).collect(toImmutableList()).get(0); @@ -343,7 +343,7 @@ public void testParametricScalarParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(ParametricScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); assertImplementationCount(scalar, 0, 2, 0); @@ -386,7 +386,7 @@ public void testComplexParametricScalarParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(ComplexParametricScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); assertImplementationCount(scalar.getImplementations(), 1, 0, 1); assertThat(getOnlyElement(scalar.getImplementations().getExactImplementations().keySet())).isEqualTo(exactSignature); @@ -421,13 +421,13 @@ public void testSimpleInjectionScalarParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(SimpleInjectionScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); assertImplementationCount(scalar, 0, 0, 1); List parametricScalarImplementationChoices = scalar.getImplementations().getGenericImplementations().get(0).getChoices(); - assertThat(parametricScalarImplementationChoices.size()).isEqualTo(1); + assertThat(parametricScalarImplementationChoices).hasSize(1); List dependencies = parametricScalarImplementationChoices.get(0).getDependencies(); - assertThat(dependencies.size()).isEqualTo(1); + assertThat(dependencies).hasSize(1); assertThat(dependencies.get(0)).isInstanceOf(LiteralImplementationDependency.class); FunctionMetadata functionMetadata = scalar.getFunctionMetadata(); @@ -474,15 +474,15 @@ public void testConstructorInjectionScalarParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(ConstructorInjectionScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); assertImplementationCount(scalar, 2, 0, 1); List parametricScalarImplementationChoices = scalar.getImplementations().getGenericImplementations().get(0).getChoices(); - assertThat(parametricScalarImplementationChoices.size()).isEqualTo(1); + assertThat(parametricScalarImplementationChoices).hasSize(1); List dependencies = parametricScalarImplementationChoices.get(0).getDependencies(); - assertThat(dependencies.size()).isEqualTo(0); + assertThat(dependencies).hasSize(0); List constructorDependencies = parametricScalarImplementationChoices.get(0).getConstructorDependencies(); - assertThat(constructorDependencies.size()).isEqualTo(1); + assertThat(constructorDependencies).hasSize(1); assertThat(constructorDependencies.get(0)).isInstanceOf(TypeImplementationDependency.class); FunctionMetadata functionMetadata = scalar.getFunctionMetadata(); @@ -514,7 +514,7 @@ public void testFixedTypeParameterParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(FixedTypeParameterScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); assertImplementationCount(scalar, 1, 0, 0); @@ -551,13 +551,13 @@ public void testPartiallyFixedTypeParameterParse() .build(); List functions = ScalarFromAnnotationsParser.parseFunctionDefinition(PartiallyFixedTypeParameterScalarFunction.class); - assertThat(functions.size()).isEqualTo(1); + assertThat(functions).hasSize(1); ParametricScalar scalar = (ParametricScalar) functions.get(0); assertImplementationCount(scalar, 0, 0, 1); List parametricScalarImplementationChoices = scalar.getImplementations().getGenericImplementations().get(0).getChoices(); - assertThat(parametricScalarImplementationChoices.size()).isEqualTo(1); + assertThat(parametricScalarImplementationChoices).hasSize(1); List dependencies = parametricScalarImplementationChoices.get(0).getDependencies(); - assertThat(dependencies.size()).isEqualTo(1); + assertThat(dependencies).hasSize(1); FunctionMetadata functionMetadata = scalar.getFunctionMetadata(); assertThat(functionMetadata.getSignature()).isEqualTo(expectedSignature); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestDirectExchangeClient.java b/core/trino-main/src/test/java/io/trino/operator/TestDirectExchangeClient.java index 054678e1d1bcef..3e304077f90f0b 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestDirectExchangeClient.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestDirectExchangeClient.java @@ -1015,7 +1015,7 @@ public void testScheduleWhenOneClientFilledBuffer() int clientCount = exchangeClient.scheduleRequestIfNecessary(); // The first client filled the buffer. There is no place for the another one assertThat(clientCount).isEqualTo(1); - assertThat(exchangeClient.getRunningClients().size()).isEqualTo(1); + assertThat(exchangeClient.getRunningClients()).hasSize(1); } @Test @@ -1049,7 +1049,7 @@ public void testScheduleWhenAllClientsAreEmpty() int clientCount = exchangeClient.scheduleRequestIfNecessary(); assertThat(clientCount).isEqualTo(2); - assertThat(exchangeClient.getRunningClients().size()).isEqualTo(2); + assertThat(exchangeClient.getRunningClients()).hasSize(2); } @Test @@ -1088,7 +1088,7 @@ public void testScheduleWhenThereIsPendingClient() int clientCount = exchangeClient.scheduleRequestIfNecessary(); // The first client is pending and it reserved the space in the buffer. There is no place for the another one assertThat(clientCount).isEqualTo(0); - assertThat(exchangeClient.getRunningClients().size()).isEqualTo(1); + assertThat(exchangeClient.getRunningClients()).hasSize(1); } private HttpPageBufferClient createHttpPageBufferClient(TestingHttpClient.Processor processor, DataSize expectedMaxSize, URI location, HttpPageBufferClient.ClientCallback callback) diff --git a/core/trino-main/src/test/java/io/trino/operator/TestDriverStats.java b/core/trino-main/src/test/java/io/trino/operator/TestDriverStats.java index 4ebcc82e11fff0..58f671a05a1cd6 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestDriverStats.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestDriverStats.java @@ -119,7 +119,7 @@ public static void assertExpectedDriverStats(DriverStats actual) assertThat(actual.getPhysicalWrittenDataSize()).isEqualTo(DataSize.ofBytes(20)); - assertThat(actual.getOperatorStats().size()).isEqualTo(1); + assertThat(actual.getOperatorStats()).hasSize(1); assertExpectedOperatorStats(actual.getOperatorStats().get(0)); } } diff --git a/core/trino-main/src/test/java/io/trino/operator/TestExchangeOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestExchangeOperator.java index ca3b23d1f7cf23..7fbe4e30553684 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestExchangeOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestExchangeOperator.java @@ -324,7 +324,7 @@ private static List waitForPages(Operator operator, int expectedPageCount) assertThat(operator.getOutput()).isNull(); // verify pages - assertThat(outputPages.size()).isEqualTo(expectedPageCount); + assertThat(outputPages).hasSize(expectedPageCount); for (Page page : outputPages) { assertPageEquals(TYPES, page, PAGE); } diff --git a/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRankBuilder.java b/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRankBuilder.java index b5df9ed0cd9e4b..6dd1fe966a2844 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRankBuilder.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRankBuilder.java @@ -114,7 +114,7 @@ private void testSingleGroupTopN(boolean produceRanking) .build()).process()).isTrue(); List output = ImmutableList.copyOf(groupedTopNBuilder.buildResult()); - assertThat(output.size()).isEqualTo(1); + assertThat(output).hasSize(1); List outputTypes = ImmutableList.of(DOUBLE, BIGINT); Page expected = rowPageBuilder(outputTypes) @@ -190,7 +190,7 @@ private void testMultiGroupTopN(boolean produceRanking) .build()).process()).isTrue(); List output = ImmutableList.copyOf(groupedTopNBuilder.buildResult()); - assertThat(output.size()).isEqualTo(1); + assertThat(output).hasSize(1); List outputTypes = ImmutableList.of(BIGINT, DOUBLE, BIGINT); Page expected = rowPageBuilder(outputTypes) @@ -242,7 +242,7 @@ public void testYield() unblock.set(true); assertThat(work.process()).isTrue(); List output = ImmutableList.copyOf(groupedTopNBuilder.buildResult()); - assertThat(output.size()).isEqualTo(1); + assertThat(output).hasSize(1); Page expected = rowPagesBuilder(types) .row(1L, 0.1) diff --git a/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRowNumberBuilder.java b/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRowNumberBuilder.java index cdbe12510220e5..5411887b471dd5 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRowNumberBuilder.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestGroupedTopNRowNumberBuilder.java @@ -102,7 +102,7 @@ private void testMultiGroupTopN(boolean produceRowNumbers) assertThat(groupedTopNBuilder.processPage(input.get(3)).process()).isTrue(); List output = ImmutableList.copyOf(groupedTopNBuilder.buildResult()); - assertThat(output.size()).isEqualTo(1); + assertThat(output).hasSize(1); Page expected = rowPagesBuilder(BIGINT, DOUBLE, BIGINT) .row(1L, 0.3, 1) @@ -174,7 +174,7 @@ private void testSingleGroupTopN(boolean produceRowNumbers) assertThat(groupedTopNBuilder.processPage(input.get(3)).process()).isTrue(); List output = ImmutableList.copyOf(groupedTopNBuilder.buildResult()); - assertThat(output.size()).isEqualTo(1); + assertThat(output).hasSize(1); Page expected = rowPagesBuilder(BIGINT, DOUBLE, BIGINT) .row(3L, 0.1, 1) @@ -221,7 +221,7 @@ public void testYield() unblock.set(true); assertThat(work.process()).isTrue(); List output = ImmutableList.copyOf(groupedTopNBuilder.buildResult()); - assertThat(output.size()).isEqualTo(1); + assertThat(output).hasSize(1); Page expected = rowPagesBuilder(types) .row(1L, 0.1) diff --git a/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java index 52f6b8e89a281f..17f39a9693eb8c 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestHashAggregationOperator.java @@ -552,7 +552,7 @@ private void testMultiSliceAggregationOutput(boolean hashEnabled) typeOperators, Optional.empty()); - assertThat(toPages(operatorFactory, createDriverContext(), input).size()).isEqualTo(2); + assertThat(toPages(operatorFactory, createDriverContext(), input)).hasSize(2); } @Test diff --git a/core/trino-main/src/test/java/io/trino/operator/TestHttpPageBufferClient.java b/core/trino-main/src/test/java/io/trino/operator/TestHttpPageBufferClient.java index a2c349f2340a8c..4e9c1bdcd39768 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestHttpPageBufferClient.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestHttpPageBufferClient.java @@ -137,7 +137,7 @@ public void testHappyPath() client.scheduleRequest(); requestComplete.await(10, TimeUnit.SECONDS); - assertThat(callback.getPages().size()).isEqualTo(1); + assertThat(callback.getPages()).hasSize(1); assertPageEquals(expectedPage, callback.getPages().get(0)); assertThat(callback.getCompletedRequests()).isEqualTo(1); assertThat(callback.getFinishedBuffers()).isEqualTo(0); @@ -148,7 +148,7 @@ public void testHappyPath() client.scheduleRequest(); requestComplete.await(10, TimeUnit.SECONDS); - assertThat(callback.getPages().size()).isEqualTo(0); + assertThat(callback.getPages()).hasSize(0); assertThat(callback.getCompletedRequests()).isEqualTo(1); assertThat(callback.getFinishedBuffers()).isEqualTo(0); assertStatus(client, location, "queued", 1, 2, 2, 0, "not scheduled"); @@ -160,7 +160,7 @@ public void testHappyPath() client.scheduleRequest(); requestComplete.await(10, TimeUnit.SECONDS); - assertThat(callback.getPages().size()).isEqualTo(2); + assertThat(callback.getPages()).hasSize(2); assertPageEquals(expectedPage, callback.getPages().get(0)); assertPageEquals(expectedPage, callback.getPages().get(1)); assertThat(callback.getCompletedRequests()).isEqualTo(1); @@ -176,7 +176,7 @@ public void testHappyPath() requestComplete.await(10, TimeUnit.SECONDS); // get the buffer complete signal - assertThat(callback.getPages().size()).isEqualTo(0); + assertThat(callback.getPages()).hasSize(0); assertThat(callback.getCompletedRequests()).isEqualTo(1); // schedule the delete call to the buffer @@ -185,7 +185,7 @@ public void testHappyPath() requestComplete.await(10, TimeUnit.SECONDS); assertThat(callback.getFinishedBuffers()).isEqualTo(1); - assertThat(callback.getPages().size()).isEqualTo(0); + assertThat(callback.getPages()).hasSize(0); assertThat(callback.getCompletedRequests()).isEqualTo(0); assertThat(callback.getFailedBuffers()).isEqualTo(0); @@ -268,7 +268,7 @@ public void testInvalidResponses() processor.setResponse(new TestingResponse(HttpStatus.NOT_FOUND, ImmutableListMultimap.of(CONTENT_TYPE, TRINO_PAGES), new byte[0])); client.scheduleRequest(); requestComplete.await(10, TimeUnit.SECONDS); - assertThat(callback.getPages().size()).isEqualTo(0); + assertThat(callback.getPages()).hasSize(0); assertThat(callback.getCompletedRequests()).isEqualTo(1); assertThat(callback.getFinishedBuffers()).isEqualTo(0); assertThat(callback.getFailedBuffers()).isEqualTo(1); @@ -281,7 +281,7 @@ public void testInvalidResponses() processor.setResponse(new TestingResponse(HttpStatus.OK, ImmutableListMultimap.of(CONTENT_TYPE, "INVALID_TYPE"), new byte[0])); client.scheduleRequest(); requestComplete.await(10, TimeUnit.SECONDS); - assertThat(callback.getPages().size()).isEqualTo(0); + assertThat(callback.getPages()).hasSize(0); assertThat(callback.getCompletedRequests()).isEqualTo(1); assertThat(callback.getFinishedBuffers()).isEqualTo(0); assertThat(callback.getFailedBuffers()).isEqualTo(1); @@ -294,7 +294,7 @@ public void testInvalidResponses() processor.setResponse(new TestingResponse(HttpStatus.OK, ImmutableListMultimap.of(CONTENT_TYPE, "text/plain"), new byte[0])); client.scheduleRequest(); requestComplete.await(10, TimeUnit.SECONDS); - assertThat(callback.getPages().size()).isEqualTo(0); + assertThat(callback.getPages()).hasSize(0); assertThat(callback.getCompletedRequests()).isEqualTo(1); assertThat(callback.getFinishedBuffers()).isEqualTo(0); assertThat(callback.getFailedBuffers()).isEqualTo(1); @@ -400,7 +400,7 @@ public void testExceptionFromResponseHandler() // this starts the error stopwatch client.scheduleRequest(); requestComplete.await(10, TimeUnit.SECONDS); - assertThat(callback.getPages().size()).isEqualTo(0); + assertThat(callback.getPages()).hasSize(0); assertThat(callback.getCompletedRequests()).isEqualTo(1); assertThat(callback.getFinishedBuffers()).isEqualTo(0); assertThat(callback.getFailedBuffers()).isEqualTo(0); @@ -412,7 +412,7 @@ public void testExceptionFromResponseHandler() // verify that the client has not failed client.scheduleRequest(); requestComplete.await(10, TimeUnit.SECONDS); - assertThat(callback.getPages().size()).isEqualTo(0); + assertThat(callback.getPages()).hasSize(0); assertThat(callback.getCompletedRequests()).isEqualTo(2); assertThat(callback.getFinishedBuffers()).isEqualTo(0); assertThat(callback.getFailedBuffers()).isEqualTo(0); @@ -424,7 +424,7 @@ public void testExceptionFromResponseHandler() // verify that the client has failed client.scheduleRequest(); requestComplete.await(10, TimeUnit.SECONDS); - assertThat(callback.getPages().size()).isEqualTo(0); + assertThat(callback.getPages()).hasSize(0); assertThat(callback.getCompletedRequests()).isEqualTo(3); assertThat(callback.getFinishedBuffers()).isEqualTo(0); assertThat(callback.getFailedBuffers()).isEqualTo(1); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestPipelineStats.java b/core/trino-main/src/test/java/io/trino/operator/TestPipelineStats.java index bb4a39459c9fc3..7c622f61a95cce 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestPipelineStats.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestPipelineStats.java @@ -149,10 +149,10 @@ public static void assertExpectedPipelineStats(PipelineStats actual) assertThat(actual.getPhysicalWrittenDataSize()).isEqualTo(DataSize.ofBytes(20)); - assertThat(actual.getOperatorSummaries().size()).isEqualTo(1); + assertThat(actual.getOperatorSummaries()).hasSize(1); assertExpectedOperatorStats(actual.getOperatorSummaries().get(0)); - assertThat(actual.getDrivers().size()).isEqualTo(1); + assertThat(actual.getDrivers()).hasSize(1); assertExpectedDriverStats(actual.getDrivers().get(0)); } diff --git a/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java index 97b2e641cbbd63..82337cd82accf5 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestRowNumberOperator.java @@ -235,9 +235,9 @@ public void testRowNumberPartitioned() ImmutableSet expectedPartition1Set = ImmutableSet.copyOf(expectedPartition1.getMaterializedRows()); ImmutableSet expectedPartition2Set = ImmutableSet.copyOf(expectedPartition2.getMaterializedRows()); ImmutableSet expectedPartition3Set = ImmutableSet.copyOf(expectedPartition3.getMaterializedRows()); - assertThat(Sets.intersection(expectedPartition1Set, actualSet).size()).isEqualTo(4); - assertThat(Sets.intersection(expectedPartition2Set, actualSet).size()).isEqualTo(4); - assertThat(Sets.intersection(expectedPartition3Set, actualSet).size()).isEqualTo(2); + assertThat(Sets.intersection(expectedPartition1Set, actualSet)).hasSize(4); + assertThat(Sets.intersection(expectedPartition2Set, actualSet)).hasSize(4); + assertThat(Sets.intersection(expectedPartition3Set, actualSet)).hasSize(2); } } @@ -307,9 +307,9 @@ public void testRowNumberPartitionedLimit() ImmutableSet expectedPartition1Set = ImmutableSet.copyOf(expectedPartition1.getMaterializedRows()); ImmutableSet expectedPartition2Set = ImmutableSet.copyOf(expectedPartition2.getMaterializedRows()); ImmutableSet expectedPartition3Set = ImmutableSet.copyOf(expectedPartition3.getMaterializedRows()); - assertThat(Sets.intersection(expectedPartition1Set, actualSet).size()).isEqualTo(3); - assertThat(Sets.intersection(expectedPartition2Set, actualSet).size()).isEqualTo(3); - assertThat(Sets.intersection(expectedPartition3Set, actualSet).size()).isEqualTo(2); + assertThat(Sets.intersection(expectedPartition1Set, actualSet)).hasSize(3); + assertThat(Sets.intersection(expectedPartition2Set, actualSet)).hasSize(3); + assertThat(Sets.intersection(expectedPartition3Set, actualSet)).hasSize(2); } } @@ -363,10 +363,10 @@ public void testRowNumberUnpartitionedLimit() pages = stripRowNumberColumn(pages); MaterializedResult actual = toMaterializedResult(driverContext.getSession(), ImmutableList.of(DOUBLE, BIGINT), pages); - assertThat(actual.getMaterializedRows().size()).isEqualTo(3); + assertThat(actual.getMaterializedRows()).hasSize(3); ImmutableSet actualSet = ImmutableSet.copyOf(actual.getMaterializedRows()); ImmutableSet expectedRowsSet = ImmutableSet.copyOf(expectedRows.getMaterializedRows()); - assertThat(Sets.intersection(expectedRowsSet, actualSet).size()).isEqualTo(3); + assertThat(Sets.intersection(expectedRowsSet, actualSet)).hasSize(3); } private static Block getRowNumberColumn(List pages) diff --git a/core/trino-main/src/test/java/io/trino/operator/TestScanFilterAndProjectOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestScanFilterAndProjectOperator.java index 8cf4e70b5aaac1..edfd0a91706cdc 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestScanFilterAndProjectOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestScanFilterAndProjectOperator.java @@ -188,7 +188,7 @@ public void testPageSourceMergeOutput() operator.noMoreSplits(); List actual = toPages(operator); - assertThat(actual.size()).isEqualTo(1); + assertThat(actual).hasSize(1); List expected = rowPagesBuilder(BIGINT) .row(10L) diff --git a/core/trino-main/src/test/java/io/trino/operator/TestTableFinishOperator.java b/core/trino-main/src/test/java/io/trino/operator/TestTableFinishOperator.java index 2c9d78750fb8ac..eac368d8ecfe49 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestTableFinishOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestTableFinishOperator.java @@ -154,8 +154,8 @@ public void testStatisticsAggregation() operator.close(); assertThat(tableFinisher.getFragments()).isEqualTo(ImmutableList.of(Slices.wrappedBuffer(new byte[] {1}), Slices.wrappedBuffer(new byte[] {2}))); - assertThat(tableFinisher.getComputedStatistics().size()).isEqualTo(1); - assertThat(getOnlyElement(tableFinisher.getComputedStatistics()).getColumnStatistics().size()).isEqualTo(1); + assertThat(tableFinisher.getComputedStatistics()).hasSize(1); + assertThat(getOnlyElement(tableFinisher.getComputedStatistics()).getColumnStatistics()).hasSize(1); LongArrayBlockBuilder expectedStatistics = new LongArrayBlockBuilder(null, 1); BIGINT.writeLong(expectedStatistics, 7); diff --git a/core/trino-main/src/test/java/io/trino/operator/TestTaskStats.java b/core/trino-main/src/test/java/io/trino/operator/TestTaskStats.java index 92b6770ee006ef..bffb372a199ba0 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestTaskStats.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestTaskStats.java @@ -151,7 +151,7 @@ public static void assertExpectedTaskStats(TaskStats actual) assertThat(actual.getPhysicalWrittenDataSize()).isEqualTo(DataSize.ofBytes(25)); - assertThat(actual.getPipelines().size()).isEqualTo(1); + assertThat(actual.getPipelines()).hasSize(1); assertExpectedPipelineStats(actual.getPipelines().get(0)); } } diff --git a/core/trino-main/src/test/java/io/trino/operator/TestTypeSignature.java b/core/trino-main/src/test/java/io/trino/operator/TestTypeSignature.java index a31db515c86637..1db6e5ceae6fae 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestTypeSignature.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestTypeSignature.java @@ -51,7 +51,7 @@ public class TestTypeSignature public void parseSignatureWithLiterals() { TypeSignature result = new TypeSignature("decimal", typeVariable("X"), numericParameter(42)); - assertThat(result.getParameters().size()).isEqualTo(2); + assertThat(result.getParameters()).hasSize(2); assertThat(result.getParameters().get(0).isVariable()).isEqualTo(true); assertThat(result.getParameters().get(1).isLongLiteral()).isEqualTo(true); } @@ -315,7 +315,7 @@ private static void assertSignature( { TypeSignature signature = parseTypeSignature(typeName, ImmutableSet.of()); assertThat(signature.getBase()).isEqualTo(base); - assertThat(signature.getParameters().size()).isEqualTo(parameters.size()); + assertThat(signature.getParameters()).hasSize(parameters.size()); for (int i = 0; i < signature.getParameters().size(); i++) { assertThat(signature.getParameters().get(i).toString()).isEqualTo(parameters.get(i)); } diff --git a/core/trino-main/src/test/java/io/trino/operator/TestingExchangeHttpClientHandler.java b/core/trino-main/src/test/java/io/trino/operator/TestingExchangeHttpClientHandler.java index 2ddf0ef9aaaf49..816d635a6be009 100644 --- a/core/trino-main/src/test/java/io/trino/operator/TestingExchangeHttpClientHandler.java +++ b/core/trino-main/src/test/java/io/trino/operator/TestingExchangeHttpClientHandler.java @@ -59,11 +59,11 @@ public Response handle(Request request) { ImmutableList parts = ImmutableList.copyOf(Splitter.on("/").omitEmptyStrings().split(request.getUri().getPath())); if (request.getMethod().equals("DELETE")) { - assertThat(parts.size()).isEqualTo(1); + assertThat(parts).hasSize(1); return new TestingResponse(HttpStatus.NO_CONTENT, ImmutableListMultimap.of(), new byte[0]); } - assertThat(parts.size()).isEqualTo(2); + assertThat(parts).hasSize(2); TaskId taskId = TaskId.valueOf(parts.get(0)); int pageToken = Integer.parseInt(parts.get(1)); diff --git a/core/trino-main/src/test/java/io/trino/operator/aggregation/TestApproximateMostFrequentHistogram.java b/core/trino-main/src/test/java/io/trino/operator/aggregation/TestApproximateMostFrequentHistogram.java index 54a98b565b0c56..8d3ee140455422 100644 --- a/core/trino-main/src/test/java/io/trino/operator/aggregation/TestApproximateMostFrequentHistogram.java +++ b/core/trino-main/src/test/java/io/trino/operator/aggregation/TestApproximateMostFrequentHistogram.java @@ -37,7 +37,7 @@ public void testLongHistogram() Map buckets = histogram.getBuckets(); - assertThat(buckets.size()).isEqualTo(3); + assertThat(buckets).hasSize(3); assertThat(buckets).isEqualTo(ImmutableMap.of(1L, 2L, 2L, 1L, 3L, 1L)); } @@ -75,7 +75,7 @@ public void testMerge() histogram1.merge(histogram2); Map buckets = histogram1.getBuckets(); - assertThat(buckets.size()).isEqualTo(3); + assertThat(buckets).hasSize(3); assertThat(buckets).isEqualTo(ImmutableMap.of(1L, 2L, 2L, 1L, 3L, 1L)); } @@ -105,7 +105,7 @@ public void testLongMergeOverMaxbuckets() histogram1.merge(histogram2); Map buckets = histogram1.getBuckets(); - assertThat(buckets.size()).isEqualTo(3); + assertThat(buckets).hasSize(3); assertThat(buckets).isEqualTo(ImmutableMap.of(1L, 4L, 2L, 4L, 3L, 4L)); } @@ -122,7 +122,7 @@ public void testStringHistogram() Map buckets = histogram.getBuckets(); - assertThat(buckets.size()).isEqualTo(3); + assertThat(buckets).hasSize(3); assertThat(buckets).isEqualTo(ImmutableMap.of(Slices.utf8Slice("A"), 2L, Slices.utf8Slice("B"), 1L, Slices.utf8Slice("C"), 1L)); } @@ -170,7 +170,7 @@ public void testStringMergeOverMaxbuckets() histogram1.merge(histogram2); Map buckets = histogram1.getBuckets(); - assertThat(buckets.size()).isEqualTo(3); + assertThat(buckets).hasSize(3); assertThat(buckets).isEqualTo(ImmutableMap.of(Slices.utf8Slice("A"), 4L, Slices.utf8Slice("B"), 4L, Slices.utf8Slice("C"), 4L)); } } diff --git a/core/trino-main/src/test/java/io/trino/operator/join/TestNestedLoopBuildOperator.java b/core/trino-main/src/test/java/io/trino/operator/join/TestNestedLoopBuildOperator.java index 1ba4ee97490649..3e3d5246c8a67e 100644 --- a/core/trino-main/src/test/java/io/trino/operator/join/TestNestedLoopBuildOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/join/TestNestedLoopBuildOperator.java @@ -95,7 +95,7 @@ public void testNestedLoopBuild() assertThat(buildPages.get(0)).isEqualTo(buildPage1); assertThat(buildPages.get(1)).isEqualTo(buildPage2); - assertThat(buildPages.size()).isEqualTo(2); + assertThat(buildPages).hasSize(2); } @Test @@ -128,7 +128,7 @@ public void testNestedLoopBuildNoBlock() assertThat(nestedLoopJoinBridge.getPagesFuture().isDone()).isTrue(); List buildPages = nestedLoopJoinBridge.getPagesFuture().get().getPages(); - assertThat(buildPages.size()).isEqualTo(1); + assertThat(buildPages).hasSize(1); assertThat(buildPages.get(0).getPositionCount()).isEqualTo(3003); } @@ -157,7 +157,7 @@ public void testNestedLoopNoBlocksMaxSizeLimit() assertThat(nestedLoopJoinBridge.getPagesFuture().isDone()).isTrue(); List buildPages = nestedLoopJoinBridge.getPagesFuture().get().getPages(); - assertThat(buildPages.size()).isEqualTo(2); + assertThat(buildPages).hasSize(2); assertThat(buildPages.get(0).getPositionCount()).isEqualTo(PageProcessor.MAX_BATCH_SIZE); assertThat(buildPages.get(1).getPositionCount()).isEqualTo(100); } diff --git a/core/trino-main/src/test/java/io/trino/operator/join/unspilled/TestHashJoinOperator.java b/core/trino-main/src/test/java/io/trino/operator/join/unspilled/TestHashJoinOperator.java index 1caf44a01cb629..321452dfda6458 100644 --- a/core/trino-main/src/test/java/io/trino/operator/join/unspilled/TestHashJoinOperator.java +++ b/core/trino-main/src/test/java/io/trino/operator/join/unspilled/TestHashJoinOperator.java @@ -221,7 +221,7 @@ private void testInnerJoinWithRunLengthEncodedProbe(boolean withFilter, boolean pages = dropChannel(pages, getHashChannels(probePagesBuilder, buildPages)); } - assertThat(pages.size()).isEqualTo(2); + assertThat(pages).hasSize(2); if (withFilter) { assertThat(pages.get(0).getBlock(2)).isInstanceOf(VariableWidthBlock.class); assertThat(pages.get(0).getBlock(3)).isInstanceOf(LongArrayBlock.class); diff --git a/core/trino-main/src/test/java/io/trino/operator/output/TestPagePartitioner.java b/core/trino-main/src/test/java/io/trino/operator/output/TestPagePartitioner.java index 3d6b8f7f071148..53e502dff7fdc1 100644 --- a/core/trino-main/src/test/java/io/trino/operator/output/TestPagePartitioner.java +++ b/core/trino-main/src/test/java/io/trino/operator/output/TestPagePartitioner.java @@ -548,7 +548,7 @@ public void testOutputBytesWhenReused() pagePartitioner.close(); List output = outputBuffer.getEnqueued(); // only a single page was flushed after the partitioner is closed, all output bytes were reported eagerly on release - assertThat(output.size()).isEqualTo(1); + assertThat(output).hasSize(1); } @Test diff --git a/core/trino-main/src/test/java/io/trino/operator/project/TestPageProcessor.java b/core/trino-main/src/test/java/io/trino/operator/project/TestPageProcessor.java index 8cb360d10eff9f..e9283616d1d5f3 100644 --- a/core/trino-main/src/test/java/io/trino/operator/project/TestPageProcessor.java +++ b/core/trino-main/src/test/java/io/trino/operator/project/TestPageProcessor.java @@ -99,7 +99,7 @@ public void testProjectNoColumns() Iterator> output = processAndAssertRetainedPageSize(pageProcessor, inputPage); List> outputPages = ImmutableList.copyOf(output); - assertThat(outputPages.size()).isEqualTo(1); + assertThat(outputPages).hasSize(1); Page outputPage = outputPages.get(0).orElse(null); assertThat(outputPage.getChannelCount()).isEqualTo(0); assertThat(outputPage.getPositionCount()).isEqualTo(inputPage.getPositionCount()); @@ -117,7 +117,7 @@ public void testFilterNoColumns() assertThat(memoryContext.getBytes()).isEqualTo(0); List> outputPages = ImmutableList.copyOf(output); - assertThat(outputPages.size()).isEqualTo(1); + assertThat(outputPages).hasSize(1); Page outputPage = outputPages.get(0).orElse(null); assertThat(outputPage.getChannelCount()).isEqualTo(0); assertThat(outputPage.getPositionCount()).isEqualTo(50); @@ -137,7 +137,7 @@ public void testPartialFilter() Iterator> output = processAndAssertRetainedPageSize(pageProcessor, inputPage); List> outputPages = ImmutableList.copyOf(output); - assertThat(outputPages.size()).isEqualTo(1); + assertThat(outputPages).hasSize(1); assertPageEquals(ImmutableList.of(BIGINT), outputPages.get(0).orElse(null), new Page(createLongSequenceBlock(25, 75))); } @@ -155,7 +155,7 @@ public void testSelectAllFilter() Iterator> output = processAndAssertRetainedPageSize(pageProcessor, inputPage); List> outputPages = ImmutableList.copyOf(output); - assertThat(outputPages.size()).isEqualTo(1); + assertThat(outputPages).hasSize(1); assertPageEquals(ImmutableList.of(BIGINT), outputPages.get(0).orElse(null), new Page(createLongSequenceBlock(0, 100))); } @@ -171,7 +171,7 @@ public void testSelectNoneFilter() assertThat(memoryContext.getBytes()).isEqualTo(0); List> outputPages = ImmutableList.copyOf(output); - assertThat(outputPages.size()).isEqualTo(0); + assertThat(outputPages).hasSize(0); } @Test @@ -187,7 +187,7 @@ public void testProjectEmptyPage() // output should be one page containing no columns (only a count) List> outputPages = ImmutableList.copyOf(output); - assertThat(outputPages.size()).isEqualTo(0); + assertThat(outputPages).hasSize(0); } @Test @@ -204,7 +204,7 @@ public void testSelectNoneFilterLazyLoad() Iterator> output = pageProcessor.process(SESSION, new DriverYieldSignal(), memoryContext, inputPage); assertThat(memoryContext.getBytes()).isEqualTo(0); List> outputPages = ImmutableList.copyOf(output); - assertThat(outputPages.size()).isEqualTo(0); + assertThat(outputPages).hasSize(0); } @Test @@ -225,7 +225,7 @@ public void testProjectLazyLoad() Iterator> output = pageProcessor.process(SESSION, new DriverYieldSignal(), memoryContext, inputPage); List> outputPages = ImmutableList.copyOf(output); - assertThat(outputPages.size()).isEqualTo(1); + assertThat(outputPages).hasSize(1); assertPageEquals(ImmutableList.of(BIGINT), outputPages.get(0).orElse(null), new Page(createLongSequenceBlock(0, 100))); } @@ -243,7 +243,7 @@ public void testBatchedOutput() Iterator> output = processAndAssertRetainedPageSize(pageProcessor, inputPage); List> outputPages = ImmutableList.copyOf(output); - assertThat(outputPages.size()).isEqualTo(3); + assertThat(outputPages).hasSize(3); for (int i = 0; i < outputPages.size(); i++) { Page actualPage = outputPages.get(i).orElse(null); int offset = i * MAX_BATCH_SIZE; diff --git a/core/trino-main/src/test/java/io/trino/server/TestQueryResource.java b/core/trino-main/src/test/java/io/trino/server/TestQueryResource.java index 8fb55c7bf5634c..a019cdc6d3e03a 100644 --- a/core/trino-main/src/test/java/io/trino/server/TestQueryResource.java +++ b/core/trino-main/src/test/java/io/trino/server/TestQueryResource.java @@ -148,19 +148,19 @@ public void testGetQueryInfos() runToCompletion("SELECT x FROM y"); List infos = getQueryInfos("/v1/query"); - assertThat(infos.size()).isEqualTo(3); + assertThat(infos).hasSize(3); assertStateCounts(infos, 2, 1, 0); infos = getQueryInfos("/v1/query?state=finished"); - assertThat(infos.size()).isEqualTo(2); + assertThat(infos).hasSize(2); assertStateCounts(infos, 2, 0, 0); infos = getQueryInfos("/v1/query?state=failed"); - assertThat(infos.size()).isEqualTo(1); + assertThat(infos).hasSize(1); assertStateCounts(infos, 0, 1, 0); infos = getQueryInfos("/v1/query?state=running"); - assertThat(infos.size()).isEqualTo(0); + assertThat(infos).hasSize(0); assertStateCounts(infos, 0, 0, 0); server.getAccessControl().deny(privilege("query", VIEW_QUERY)); @@ -183,8 +183,8 @@ public void testGetQueryInfoPruned() QueryInfo queryInfoPruned = getQueryInfo(queryId, true); QueryInfo queryInfoNotPruned = getQueryInfo(queryId); - assertThat(queryInfoPruned.getRoutines().size()).isEqualTo(1); - assertThat(queryInfoNotPruned.getRoutines().size()).isEqualTo(1); + assertThat(queryInfoPruned.getRoutines()).hasSize(1); + assertThat(queryInfoNotPruned.getRoutines()).hasSize(1); assertThat(queryInfoPruned.getRoutines().get(0).getRoutine()).isEqualTo("now"); assertThat(queryInfoNotPruned.getRoutines().get(0).getRoutine()).isEqualTo("now"); diff --git a/core/trino-main/src/test/java/io/trino/server/TestQueryStateInfo.java b/core/trino-main/src/test/java/io/trino/server/TestQueryStateInfo.java index d6b7c46349a0c6..9e7ec6e246d7d2 100644 --- a/core/trino-main/src/test/java/io/trino/server/TestQueryStateInfo.java +++ b/core/trino-main/src/test/java/io/trino/server/TestQueryStateInfo.java @@ -80,7 +80,7 @@ public void testQueryStateInfo() List chainInfo = query.getPathToRoot().get(); - assertThat(chainInfo.size()).isEqualTo(3); + assertThat(chainInfo).hasSize(3); ResourceGroupInfo rootAInfo = chainInfo.get(1); ResourceGroupInfo expectedRootAInfo = rootA.getInfo(); diff --git a/core/trino-main/src/test/java/io/trino/server/TestQueryStateInfoResource.java b/core/trino-main/src/test/java/io/trino/server/TestQueryStateInfoResource.java index 061ac0b4c9a910..09bc72e3b61f62 100644 --- a/core/trino-main/src/test/java/io/trino/server/TestQueryStateInfoResource.java +++ b/core/trino-main/src/test/java/io/trino/server/TestQueryStateInfoResource.java @@ -143,7 +143,7 @@ public void testGetAllQueryStateInfos() .build(), createJsonResponseHandler(listJsonCodec(QueryStateInfo.class))); - assertThat(infos.size()).isEqualTo(2); + assertThat(infos).hasSize(2); } @Test @@ -156,7 +156,7 @@ public void testGetQueryStateInfosForUser() .build(), createJsonResponseHandler(listJsonCodec(QueryStateInfo.class))); - assertThat(infos.size()).isEqualTo(1); + assertThat(infos).hasSize(1); } @Test @@ -194,7 +194,7 @@ public void testGetAllQueryStateInfosDenied() .setHeader(TRINO_HEADERS.requestUser(), "any-other-user") .build(), createJsonResponseHandler(listJsonCodec(QueryStateInfo.class))); - assertThat(infos.size()).isEqualTo(2); + assertThat(infos).hasSize(2); testGetAllQueryStateInfosDenied("user1", 1); testGetAllQueryStateInfosDenied("any-other-user", 0); @@ -211,7 +211,7 @@ private void testGetAllQueryStateInfosDenied(String executionUser, int expectedC .build(), createJsonResponseHandler(listJsonCodec(QueryStateInfo.class))); - assertThat(infos.size()).isEqualTo(expectedCount); + assertThat(infos).hasSize(expectedCount); } finally { server.getAccessControl().reset(); diff --git a/core/trino-main/src/test/java/io/trino/server/security/jwt/TestJwkDecoder.java b/core/trino-main/src/test/java/io/trino/server/security/jwt/TestJwkDecoder.java index c22b1c5bc38904..93a585f786e96b 100644 --- a/core/trino-main/src/test/java/io/trino/server/security/jwt/TestJwkDecoder.java +++ b/core/trino-main/src/test/java/io/trino/server/security/jwt/TestJwkDecoder.java @@ -68,7 +68,7 @@ public void testReadRsaKeys() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(2); + assertThat(keys).hasSize(2); assertThat(keys.get("example-rsa")).isInstanceOf(JwkRsaPublicKey.class); assertThat(keys.get("example-ec")).isInstanceOf(JwkEcPublicKey.class); } @@ -96,7 +96,7 @@ public void testNoKeyId() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(0); + assertThat(keys).hasSize(0); } @Test @@ -114,7 +114,7 @@ public void testRsaNoModulus() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(0); + assertThat(keys).hasSize(0); } @Test @@ -132,7 +132,7 @@ public void testRsaNoExponent() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(0); + assertThat(keys).hasSize(0); } @Test @@ -151,7 +151,7 @@ public void testRsaInvalidModulus() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(0); + assertThat(keys).hasSize(0); } @Test @@ -170,7 +170,7 @@ public void testRsaInvalidExponent() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(0); + assertThat(keys).hasSize(0); } @Test @@ -227,7 +227,7 @@ public void testEcKey() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(1); + assertThat(keys).hasSize(1); assertThat(keys.get("test-ec")).isInstanceOf(JwkEcPublicKey.class); } @@ -246,7 +246,7 @@ public void testEcInvalidCurve() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(0); + assertThat(keys).hasSize(0); } @Test @@ -264,7 +264,7 @@ public void testEcInvalidX() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(0); + assertThat(keys).hasSize(0); } @Test @@ -282,7 +282,7 @@ public void testEcInvalidY() " }\n" + " ]\n" + "}"); - assertThat(keys.size()).isEqualTo(0); + assertThat(keys).hasSize(0); } @Test diff --git a/core/trino-main/src/test/java/io/trino/server/security/jwt/TestJwkService.java b/core/trino-main/src/test/java/io/trino/server/security/jwt/TestJwkService.java index 3cd018eefd23fd..37db859121f826 100644 --- a/core/trino-main/src/test/java/io/trino/server/security/jwt/TestJwkService.java +++ b/core/trino-main/src/test/java/io/trino/server/security/jwt/TestJwkService.java @@ -190,13 +190,13 @@ public void testBadResponse() private static void assertEmptyKeys(JwkService service) { - assertThat(service.getKeys().size()).isEqualTo(0); + assertThat(service.getKeys()).hasSize(0); } private static void assertTestKeys(JwkService service) { Map keys = service.getKeys(); - assertThat(keys.size()).isEqualTo(3); + assertThat(keys).hasSize(3); assertThat(keys).containsKey("test-rsa"); assertThat(keys).containsKey("test-ec"); assertThat(keys).containsKey("test-certificate-chain"); diff --git a/core/trino-main/src/test/java/io/trino/spiller/TestBinaryFileSpiller.java b/core/trino-main/src/test/java/io/trino/spiller/TestBinaryFileSpiller.java index cb6826090797a4..07bef0f650db02 100644 --- a/core/trino-main/src/test/java/io/trino/spiller/TestBinaryFileSpiller.java +++ b/core/trino-main/src/test/java/io/trino/spiller/TestBinaryFileSpiller.java @@ -164,13 +164,13 @@ private void testSpiller(List types, Spiller spiller, List... spills assertThat(memoryContext.getBytes()).isEqualTo((long) spills.length * FileSingleStreamSpiller.BUFFER_SIZE); List> actualSpills = spiller.getSpills(); - assertThat(actualSpills.size()).isEqualTo(spills.length); + assertThat(actualSpills).hasSize(spills.length); for (int i = 0; i < actualSpills.size(); i++) { List actualSpill = ImmutableList.copyOf(actualSpills.get(i)); List expectedSpill = spills[i]; - assertThat(actualSpill.size()).isEqualTo(expectedSpill.size()); + assertThat(actualSpill).hasSize(expectedSpill.size()); for (int j = 0; j < actualSpill.size(); j++) { assertPageEquals(types, actualSpill.get(j), expectedSpill.get(j)); } diff --git a/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpiller.java b/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpiller.java index b3c4241de32498..0d113d74b2b33d 100644 --- a/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpiller.java +++ b/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpiller.java @@ -123,7 +123,7 @@ private void assertSpill(CompressionCodec compressionCodec, boolean encryption) assertThat(memoryContext.getBytes()).isEqualTo(4096); spiller.spill(page).get(); spiller.spill(Iterators.forArray(page, page, page)).get(); - assertThat(listFiles(spillPath.toPath()).size()).isEqualTo(1); + assertThat(listFiles(spillPath.toPath())).hasSize(1); // Assert the spill codec flags match the expected configuration try (InputStream is = newInputStream(listFiles(spillPath.toPath()).get(0))) { @@ -158,7 +158,7 @@ private void assertSpill(CompressionCodec compressionCodec, boolean encryption) .hasMessage("Repeated reads are disallowed to prevent potential resource leaks"); spiller.close(); - assertThat(listFiles(spillPath.toPath()).size()).isEqualTo(0); + assertThat(listFiles(spillPath.toPath())).hasSize(0); assertThat(memoryContext.getBytes()).isEqualTo(0); } finally { diff --git a/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpillerFactory.java b/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpillerFactory.java index b8f89cc228973e..1f7dbcc9b8bfbd 100644 --- a/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpillerFactory.java +++ b/core/trino-main/src/test/java/io/trino/spiller/TestFileSingleStreamSpillerFactory.java @@ -90,8 +90,8 @@ public void testDistributesSpillOverPaths() List spillPaths = ImmutableList.of(spillPath1.toPath(), spillPath2.toPath()); FileSingleStreamSpillerFactory spillerFactory = spillerFactoryFactory(spillPaths); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(0); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(0); + assertThat(listFiles(spillPath1.toPath())).hasSize(0); + assertThat(listFiles(spillPath2.toPath())).hasSize(0); Page page = buildPage(); List spillers = new ArrayList<>(); @@ -100,12 +100,12 @@ public void testDistributesSpillOverPaths() getUnchecked(singleStreamSpiller.spill(page)); spillers.add(singleStreamSpiller); } - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(5); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(5); + assertThat(listFiles(spillPath1.toPath())).hasSize(5); + assertThat(listFiles(spillPath2.toPath())).hasSize(5); spillers.forEach(SingleStreamSpiller::close); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(0); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(0); + assertThat(listFiles(spillPath1.toPath())).hasSize(0); + assertThat(listFiles(spillPath2.toPath())).hasSize(0); } @Test @@ -116,8 +116,8 @@ public void testDistributesSpillOverPathsBadDisk() List spillPaths = ImmutableList.of(spillPath1.toPath(), spillPath2.toPath()); FileSingleStreamSpillerFactory spillerFactory = spillerFactoryFactory(spillPaths); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(0); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(0); + assertThat(listFiles(spillPath1.toPath())).hasSize(0); + assertThat(listFiles(spillPath2.toPath())).hasSize(0); // Set first spiller path to read-only after initialization to emulate a disk failing during runtime setPosixFilePermissions(spillPath1.toPath(), ImmutableSet.of(PosixFilePermission.OWNER_READ)); @@ -132,12 +132,12 @@ public void testDistributesSpillOverPathsBadDisk() } // bad disk should receive no spills, with the good disk taking the remainder - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(0); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(numberOfSpills); + assertThat(listFiles(spillPath1.toPath())).hasSize(0); + assertThat(listFiles(spillPath2.toPath())).hasSize(numberOfSpills); spillers.forEach(SingleStreamSpiller::close); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(0); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(0); + assertThat(listFiles(spillPath1.toPath())).hasSize(0); + assertThat(listFiles(spillPath2.toPath())).hasSize(0); } private Page buildPage() @@ -186,14 +186,14 @@ public void testCleanupOldSpillFiles() java.nio.file.Files.createTempFile(spillPath2.toPath(), "blah", SPILL_FILE_SUFFIX); java.nio.file.Files.createTempFile(spillPath2.toPath(), "blah", "blah"); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(3); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(3); + assertThat(listFiles(spillPath1.toPath())).hasSize(3); + assertThat(listFiles(spillPath2.toPath())).hasSize(3); FileSingleStreamSpillerFactory spillerFactory = spillerFactoryFactory(spillPaths); spillerFactory.cleanupOldSpillFiles(); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(1); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(2); + assertThat(listFiles(spillPath1.toPath())).hasSize(1); + assertThat(listFiles(spillPath2.toPath())).hasSize(2); } @Test @@ -205,8 +205,8 @@ public void testCacheInvalidatedOnBadDisk() FileSingleStreamSpillerFactory spillerFactory = spillerFactoryFactory(spillPaths); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(0); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(0); + assertThat(listFiles(spillPath1.toPath())).hasSize(0); + assertThat(listFiles(spillPath2.toPath())).hasSize(0); Page page = buildPage(); List spillers = new ArrayList<>(); @@ -231,8 +231,8 @@ public void testCacheInvalidatedOnBadDisk() // restore permissions to allow cleanup setPosixFilePermissions(spillPath2.toPath(), ImmutableSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE, PosixFilePermission.OWNER_EXECUTE)); spillers.forEach(SingleStreamSpiller::close); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(0); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(0); + assertThat(listFiles(spillPath1.toPath())).hasSize(0); + assertThat(listFiles(spillPath2.toPath())).hasSize(0); } @Test @@ -244,8 +244,8 @@ public void testCacheFull() FileSingleStreamSpillerFactory spillerFactory = spillerFactoryFactory(spillPaths); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(0); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(0); + assertThat(listFiles(spillPath1.toPath())).hasSize(0); + assertThat(listFiles(spillPath2.toPath())).hasSize(0); Page page = buildPage(); List spillers = new ArrayList<>(); @@ -263,8 +263,8 @@ public void testCacheFull() .isEqualTo(2); spillers.forEach(SingleStreamSpiller::close); - assertThat(listFiles(spillPath1.toPath()).size()).isEqualTo(0); - assertThat(listFiles(spillPath2.toPath()).size()).isEqualTo(0); + assertThat(listFiles(spillPath1.toPath())).hasSize(0); + assertThat(listFiles(spillPath2.toPath())).hasSize(0); } private FileSingleStreamSpillerFactory spillerFactoryFactory(List paths) diff --git a/core/trino-main/src/test/java/io/trino/spiller/TestGenericPartitioningSpiller.java b/core/trino-main/src/test/java/io/trino/spiller/TestGenericPartitioningSpiller.java index 17214e24145fa8..7523170eeeac41 100644 --- a/core/trino-main/src/test/java/io/trino/spiller/TestGenericPartitioningSpiller.java +++ b/core/trino-main/src/test/java/io/trino/spiller/TestGenericPartitioningSpiller.java @@ -214,7 +214,7 @@ private void assertSpilledPages( List actualSpill = ImmutableList.copyOf(spiller.getSpilledPages(partition)); List expectedSpill = expectedPartitions.get(partition); - assertThat(actualSpill.size()).isEqualTo(expectedSpill.size()); + assertThat(actualSpill).hasSize(expectedSpill.size()); for (int j = 0; j < actualSpill.size(); j++) { assertPageEquals(types, actualSpill.get(j), expectedSpill.get(j)); } diff --git a/core/trino-main/src/test/java/io/trino/sql/BenchmarkExpressionInterpreter.java b/core/trino-main/src/test/java/io/trino/sql/BenchmarkExpressionInterpreter.java index 883fc5e98a946c..a2669edd6ed1c3 100644 --- a/core/trino-main/src/test/java/io/trino/sql/BenchmarkExpressionInterpreter.java +++ b/core/trino-main/src/test/java/io/trino/sql/BenchmarkExpressionInterpreter.java @@ -83,7 +83,7 @@ public void verify() BenchmarkData data = new BenchmarkData(); data.setup(); BenchmarkExpressionInterpreter benchmark = new BenchmarkExpressionInterpreter(); - assertThat(benchmark.optimize(data).size()).isEqualTo(data.expressions.size()); + assertThat(benchmark.optimize(data)).hasSize(data.expressions.size()); } public static void main(String[] args) diff --git a/core/trino-main/src/test/java/io/trino/sql/gen/TestColumnarFilters.java b/core/trino-main/src/test/java/io/trino/sql/gen/TestColumnarFilters.java index 06065f35f4f8ad..336bca5a9989d3 100644 --- a/core/trino-main/src/test/java/io/trino/sql/gen/TestColumnarFilters.java +++ b/core/trino-main/src/test/java/io/trino/sql/gen/TestColumnarFilters.java @@ -919,7 +919,7 @@ private static void verifyFilterInternal(List inputPages, RowExpression fi { List outputPagesExpected = processFilter(inputPages, false, filter); List outputPagesActual = processFilter(inputPages, true, filter); - assertThat(outputPagesExpected.size()).isEqualTo(outputPagesActual.size()); + assertThat(outputPagesExpected).hasSize(outputPagesActual.size()); for (int pageCount = 0; pageCount < outputPagesActual.size(); pageCount++) { assertPageEquals(ImmutableList.of(BIGINT), outputPagesActual.get(pageCount), outputPagesExpected.get(pageCount)); @@ -930,7 +930,7 @@ private static void assertPageEquals(List types, Page actual, Page expecte { assertThat(actual.getChannelCount()).isEqualTo(expected.getChannelCount()); assertThat(actual.getPositionCount()).isEqualTo(expected.getPositionCount()); - assertThat(types.size()).isEqualTo(actual.getChannelCount()); + assertThat(types).hasSize(actual.getChannelCount()); for (int channel = 0; channel < types.size(); channel++) { assertBlockEquals(types.get(channel), actual.getBlock(channel), expected.getBlock(channel)); diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/TestEqualityInference.java b/core/trino-main/src/test/java/io/trino/sql/planner/TestEqualityInference.java index 2d4e5279511df9..07f53c5780266d 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/TestEqualityInference.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/TestEqualityInference.java @@ -323,7 +323,7 @@ public void testExpressionsThatMayReturnNullOnNonNullInput() equals(new Reference(candidate.type(), "a"), candidate)); List equalities = inference.generateEqualitiesPartitionedBy(symbols("b")).getScopeStraddlingEqualities(); - assertThat(equalities.size()).isEqualTo(1); + assertThat(equalities).hasSize(1); assertThat(equalities.get(0).equals(equals(new Reference(BIGINT, "x"), new Reference(BIGINT, "b"))) || equalities.get(0).equals(equals(new Reference(BIGINT, "b"), new Reference(BIGINT, "x")))).isTrue(); } } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/TestSymbolAllocator.java b/core/trino-main/src/test/java/io/trino/sql/planner/TestSymbolAllocator.java index ca06f2d3f08476..4a7410a8553c78 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/TestSymbolAllocator.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/TestSymbolAllocator.java @@ -34,7 +34,7 @@ public void testUnique() .add(allocator.newSymbol("foo", BigintType.BIGINT)) .build(); - assertThat(symbols.size()).isEqualTo(4); + assertThat(symbols).hasSize(4); } @Test diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/TestMemo.java b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/TestMemo.java index 460391517227e9..f164a04de037d3 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/iterative/TestMemo.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/iterative/TestMemo.java @@ -257,7 +257,7 @@ private static void assertMatchesStructure(PlanNode actual, PlanNode expected) { assertThat(actual.getClass()).isEqualTo(expected.getClass()); assertThat(actual.getId()).isEqualTo(expected.getId()); - assertThat(actual.getSources().size()).isEqualTo(expected.getSources().size()); + assertThat(actual.getSources()).hasSize(expected.getSources().size()); for (int i = 0; i < actual.getSources().size(); i++) { assertMatchesStructure(actual.getSources().get(i), expected.getSources().get(i)); diff --git a/core/trino-main/src/test/java/io/trino/sql/query/QueryAssertions.java b/core/trino-main/src/test/java/io/trino/sql/query/QueryAssertions.java index 7348000fb8af44..537d5f06477dac 100644 --- a/core/trino-main/src/test/java/io/trino/sql/query/QueryAssertions.java +++ b/core/trino-main/src/test/java/io/trino/sql/query/QueryAssertions.java @@ -235,7 +235,7 @@ public void assertQueryReturnsEmptyResult(@Language("SQL") String actual) fail("Execution of 'actual' query failed: " + actual, ex); } List actualRows = actualResults.getMaterializedRows(); - assertThat(actualRows.size()).isEqualTo(0); + assertThat(actualRows).hasSize(0); } public MaterializedResult execute(@Language("SQL") String query) diff --git a/core/trino-main/src/test/java/io/trino/transaction/TestTransactionManager.java b/core/trino-main/src/test/java/io/trino/transaction/TestTransactionManager.java index a7c2a99c0eab85..d092003261c159 100644 --- a/core/trino-main/src/test/java/io/trino/transaction/TestTransactionManager.java +++ b/core/trino-main/src/test/java/io/trino/transaction/TestTransactionManager.java @@ -69,7 +69,7 @@ public void testTransactionWorkflow() TransactionId transactionId = transactionManager.beginTransaction(false); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); TransactionInfo transactionInfo = transactionManager.getTransactionInfo(transactionId); assertThat(transactionInfo.isAutoCommitContext()).isFalse(); assertThat(transactionInfo.getCatalogNames()).isEmpty(); @@ -98,7 +98,7 @@ public void testAbortedTransactionWorkflow() TransactionId transactionId = transactionManager.beginTransaction(false); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); TransactionInfo transactionInfo = transactionManager.getTransactionInfo(transactionId); assertThat(transactionInfo.isAutoCommitContext()).isFalse(); assertThat(transactionInfo.getCatalogNames()).isEmpty(); @@ -127,7 +127,7 @@ public void testFailedTransactionWorkflow() TransactionId transactionId = transactionManager.beginTransaction(false); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); TransactionInfo transactionInfo = transactionManager.getTransactionInfo(transactionId); assertThat(transactionInfo.isAutoCommitContext()).isFalse(); assertThat(transactionInfo.getCatalogNames()).isEmpty(); @@ -140,12 +140,12 @@ public void testFailedTransactionWorkflow() assertThat(transactionInfo.getWrittenCatalogName()).isEmpty(); transactionManager.fail(transactionId); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); assertTrinoExceptionThrownBy(() -> transactionManager.getCatalogMetadata(transactionId, TEST_CATALOG_HANDLE)) .hasErrorCode(TRANSACTION_ALREADY_ABORTED); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); getFutureValue(transactionManager.asyncAbort(transactionId)); @@ -167,7 +167,7 @@ public void testExpiration() TransactionId transactionId = transactionManager.beginTransaction(false); - assertThat(transactionManager.getAllTransactionInfos().size()).isEqualTo(1); + assertThat(transactionManager.getAllTransactionInfos()).hasSize(1); TransactionInfo transactionInfo = transactionManager.getTransactionInfo(transactionId); assertThat(transactionInfo.isAutoCommitContext()).isFalse(); assertThat(transactionInfo.getCatalogNames()).isEmpty(); diff --git a/core/trino-main/src/test/java/io/trino/type/TestRowOperators.java b/core/trino-main/src/test/java/io/trino/type/TestRowOperators.java index 30012895be9851..80330b577d018f 100644 --- a/core/trino-main/src/test/java/io/trino/type/TestRowOperators.java +++ b/core/trino-main/src/test/java/io/trino/type/TestRowOperators.java @@ -115,7 +115,7 @@ public void testRowTypeLookup() { TypeSignature signature = RowType.from(ImmutableList.of(field("b", BIGINT))).getTypeSignature(); Type type = assertions.getQueryRunner().getPlannerContext().getTypeManager().getType(signature); - assertThat(type.getTypeSignature().getParameters().size()).isEqualTo(1); + assertThat(type.getTypeSignature().getParameters()).hasSize(1); assertThat(type.getTypeSignature().getParameters().get(0).getNamedTypeSignature().getName().get()).isEqualTo("b"); } diff --git a/core/trino-main/src/test/java/io/trino/util/TestDisjointSet.java b/core/trino-main/src/test/java/io/trino/util/TestDisjointSet.java index 2ab1d8167cc166..84338e0ca1baf8 100644 --- a/core/trino-main/src/test/java/io/trino/util/TestDisjointSet.java +++ b/core/trino-main/src/test/java/io/trino/util/TestDisjointSet.java @@ -37,7 +37,7 @@ public void testInitial() for (int i = 0; i < 100; i++) { assertThat(disjoint.find(i).intValue()).isEqualTo(i); } - assertThat(disjoint.getEquivalentClasses().size()).isEqualTo(100); + assertThat(disjoint.getEquivalentClasses()).hasSize(100); } @Test @@ -64,8 +64,8 @@ public void testMergeAllSequentially() } } Collection> equivalentClasses = disjoint.getEquivalentClasses(); - assertThat(equivalentClasses.size()).isEqualTo(1); - assertThat(Iterables.getOnlyElement(equivalentClasses).size()).isEqualTo(101); + assertThat(equivalentClasses).hasSize(1); + assertThat(Iterables.getOnlyElement(equivalentClasses)).hasSize(101); } @Test @@ -92,8 +92,8 @@ public void testMergeAllBackwardsSequentially() } } Collection> equivalentClasses = disjoint.getEquivalentClasses(); - assertThat(equivalentClasses.size()).isEqualTo(1); - assertThat(Iterables.getOnlyElement(equivalentClasses).size()).isEqualTo(101); + assertThat(equivalentClasses).hasSize(1); + assertThat(Iterables.getOnlyElement(equivalentClasses)).hasSize(101); } @Test @@ -121,8 +121,8 @@ public void testMergeFourGroups() } } Collection> equivalentClasses = disjoint.getEquivalentClasses(); - assertThat(equivalentClasses.size()).isEqualTo(4); - equivalentClasses.forEach(equivalentClass -> assertThat(equivalentClass.size()).isEqualTo(25)); + assertThat(equivalentClasses).hasSize(4); + equivalentClasses.forEach(equivalentClass -> assertThat(equivalentClass).hasSize(25)); } @Test @@ -145,7 +145,7 @@ public void testMergeRandomly() if (newEquivalence) { groupCount--; } - assertThat(disjoint.getEquivalentClasses().size()).isEqualTo(groupCount); + assertThat(disjoint.getEquivalentClasses()).hasSize(groupCount); } } } diff --git a/core/trino-main/src/test/java/io/trino/util/TestFailures.java b/core/trino-main/src/test/java/io/trino/util/TestFailures.java index 0fb2898fdee80f..4c096ba3a0924b 100644 --- a/core/trino-main/src/test/java/io/trino/util/TestFailures.java +++ b/core/trino-main/src/test/java/io/trino/util/TestFailures.java @@ -35,7 +35,7 @@ public void testToFailureLoop() ExecutionFailureInfo failure = toFailure(exception1); assertThat(failure.getMessage()).isEqualTo("fake exception 1"); assertThat(failure.getCause()).isNull(); - assertThat(failure.getSuppressed().size()).isEqualTo(1); + assertThat(failure.getSuppressed()).hasSize(1); assertThat(failure.getSuppressed().get(0).getMessage()).isEqualTo("fake exception 2"); assertThat(failure.getErrorCode()).isEqualTo(TOO_MANY_REQUESTS_FAILED.toErrorCode()); @@ -44,7 +44,7 @@ public void testToFailureLoop() assertThat(failure.getMessage()).isEqualTo("fake exception 2"); assertThat(failure.getCause()).isNotNull(); assertThat(failure.getCause().getMessage()).isEqualTo("fake exception 1"); - assertThat(failure.getSuppressed().size()).isEqualTo(0); + assertThat(failure.getSuppressed()).hasSize(0); assertThat(failure.getErrorCode()).isEqualTo(TOO_MANY_REQUESTS_FAILED.toErrorCode()); // add exception 1 --> add suppress (exception 2) --> add suppress (exception 1) @@ -55,7 +55,7 @@ public void testToFailureLoop() failure = toFailure(exception1); assertThat(failure.getMessage()).isEqualTo("fake exception 1"); assertThat(failure.getCause()).isNull(); - assertThat(failure.getSuppressed().size()).isEqualTo(1); + assertThat(failure.getSuppressed()).hasSize(1); assertThat(failure.getSuppressed().get(0).getMessage()).isEqualTo("fake exception 2"); assertThat(failure.getErrorCode()).isEqualTo(TOO_MANY_REQUESTS_FAILED.toErrorCode()); @@ -67,7 +67,7 @@ public void testToFailureLoop() assertThat(failure.getMessage()).isEqualTo("fake exception 2"); assertThat(failure.getCause()).isNotNull(); assertThat(failure.getCause().getMessage()).isEqualTo("fake exception 1"); - assertThat(failure.getSuppressed().size()).isEqualTo(0); + assertThat(failure.getSuppressed()).hasSize(0); assertThat(failure.getErrorCode()).isEqualTo(GENERIC_INTERNAL_ERROR.toErrorCode()); } } diff --git a/core/trino-main/src/test/java/io/trino/util/TestLong2LongOpenBigHashMap.java b/core/trino-main/src/test/java/io/trino/util/TestLong2LongOpenBigHashMap.java index 8ce1863670d4d2..996b3a758023a5 100644 --- a/core/trino-main/src/test/java/io/trino/util/TestLong2LongOpenBigHashMap.java +++ b/core/trino-main/src/test/java/io/trino/util/TestLong2LongOpenBigHashMap.java @@ -30,7 +30,7 @@ public void testBasicOps() map.defaultReturnValue(-1); assertThat(map).isEmpty(); - assertThat(map.size()).isEqualTo(0); + assertThat(map).hasSize(0); assertThat(map.get(0)).isEqualTo(-1); assertThat(map.get(1)).isEqualTo(-1); @@ -42,7 +42,7 @@ public void testBasicOps() count++; assertThat(map.put(key, count - 1)).isEqualTo(-1); assertThat(map).isNotEmpty(); - assertThat(map.size()).isEqualTo(count); + assertThat(map).hasSize(count); } // Replace @@ -51,7 +51,7 @@ public void testBasicOps() count++; assertThat(map.replace(key, count - 1, count)).isTrue(); assertThat(map).isNotEmpty(); - assertThat(map.size()).isEqualTo(values.size()); + assertThat(map).hasSize(values.size()); } // Get diff --git a/core/trino-spi/src/test/java/io/trino/spi/connector/TestConnectorViewDefinition.java b/core/trino-spi/src/test/java/io/trino/spi/connector/TestConnectorViewDefinition.java index a6b5a9eb0eff8c..2f336a5589491b 100644 --- a/core/trino-spi/src/test/java/io/trino/spi/connector/TestConnectorViewDefinition.java +++ b/core/trino-spi/src/test/java/io/trino/spi/connector/TestConnectorViewDefinition.java @@ -112,7 +112,7 @@ public void testRoundTrip() private static void assertBaseView(ConnectorViewDefinition view) { assertThat(view.getOriginalSql()).isEqualTo("SELECT 42 x"); - assertThat(view.getColumns().size()).isEqualTo(1); + assertThat(view.getColumns()).hasSize(1); ViewColumn column = getOnlyElement(view.getColumns()); assertThat(column.getName()).isEqualTo("x"); assertThat(column.getType()).isEqualTo(BIGINT.getTypeId()); diff --git a/core/trino-spi/src/test/java/io/trino/spi/predicate/TestEquatableValueSet.java b/core/trino-spi/src/test/java/io/trino/spi/predicate/TestEquatableValueSet.java index 9c2773b4806eb5..56bf49764f96a3 100644 --- a/core/trino-spi/src/test/java/io/trino/spi/predicate/TestEquatableValueSet.java +++ b/core/trino-spi/src/test/java/io/trino/spi/predicate/TestEquatableValueSet.java @@ -47,7 +47,7 @@ public void testEmptySet() assertThat(equatables.isAll()).isFalse(); assertThat(equatables.isSingleValue()).isFalse(); assertThat(equatables.inclusive()).isTrue(); - assertThat(equatables.getValues().size()).isEqualTo(0); + assertThat(equatables.getValues()).hasSize(0); assertThat(equatables.complement()).isEqualTo(EquatableValueSet.all(ID)); assertThat(equatables.containsValue(0L)).isFalse(); assertThat(equatables.containsValue(1L)).isFalse(); @@ -63,7 +63,7 @@ public void testEntireSet() assertThat(equatables.isAll()).isTrue(); assertThat(equatables.isSingleValue()).isFalse(); assertThat(equatables.inclusive()).isFalse(); - assertThat(equatables.getValues().size()).isEqualTo(0); + assertThat(equatables.getValues()).hasSize(0); assertThat(equatables.complement()).isEqualTo(EquatableValueSet.none(ID)); assertThat(equatables.containsValue(0L)).isTrue(); assertThat(equatables.containsValue(1L)).isTrue(); diff --git a/lib/trino-geospatial-toolkit/src/test/java/io/trino/geospatial/TestKdbTree.java b/lib/trino-geospatial-toolkit/src/test/java/io/trino/geospatial/TestKdbTree.java index f58b580acdcad0..4145803dbbcaf0 100644 --- a/lib/trino-geospatial-toolkit/src/test/java/io/trino/geospatial/TestKdbTree.java +++ b/lib/trino-geospatial-toolkit/src/test/java/io/trino/geospatial/TestKdbTree.java @@ -67,7 +67,7 @@ private void testSinglePartition(double width, double height) KdbTree tree = buildKdbTree(100, extent, rectangles.build()); - assertThat(tree.getLeaves().size()).isEqualTo(1); + assertThat(tree.getLeaves()).hasSize(1); Map.Entry entry = Iterables.getOnlyElement(tree.getLeaves().entrySet()); assertThat(entry.getKey().intValue()).isEqualTo(0); @@ -94,7 +94,7 @@ private void testSplitVertically(double width, double height) KdbTree treeCopy = buildKdbTree(25, extent, rectangles.build()); Map leafNodes = treeCopy.getLeaves(); - assertThat(leafNodes.size()).isEqualTo(2); + assertThat(leafNodes).hasSize(2); assertThat(leafNodes.keySet()).isEqualTo(ImmutableSet.of(0, 1)); assertThat(leafNodes).containsEntry(0, new Rectangle(0, 0, 4.5, 4)); assertThat(leafNodes).containsEntry(1, new Rectangle(4.5, 0, 9, 4)); @@ -123,7 +123,7 @@ private void testSplitHorizontally(double width, double height) KdbTree tree = buildKdbTree(25, extent, rectangles.build()); Map leafNodes = tree.getLeaves(); - assertThat(leafNodes.size()).isEqualTo(2); + assertThat(leafNodes).hasSize(2); assertThat(leafNodes.keySet()).isEqualTo(ImmutableSet.of(0, 1)); assertThat(leafNodes).containsEntry(0, new Rectangle(0, 0, 4, 4.5)); assertThat(leafNodes).containsEntry(1, new Rectangle(0, 4.5, 4, 9)); @@ -146,7 +146,7 @@ private void testSplitHorizontally(double width, double height) private void assertPartitions(KdbTree kdbTree, Rectangle envelope, Set partitions) { Map matchingNodes = kdbTree.findIntersectingLeaves(envelope); - assertThat(matchingNodes.size()).isEqualTo(partitions.size()); + assertThat(matchingNodes).hasSize(partitions.size()); assertThat(matchingNodes.keySet()).isEqualTo(partitions); } @@ -170,7 +170,7 @@ private void testEvenDistribution(double width, double height) KdbTree tree = buildKdbTree(10, extent, rectangles.build()); Map leafNodes = tree.getLeaves(); - assertThat(leafNodes.size()).isEqualTo(6); + assertThat(leafNodes).hasSize(6); assertThat(leafNodes.keySet()).isEqualTo(ImmutableSet.of(0, 1, 2, 3, 4, 5)); assertThat(leafNodes).containsEntry(0, new Rectangle(0, 0, 2.5, 2.5)); assertThat(leafNodes).containsEntry(1, new Rectangle(0, 2.5, 2.5, 4)); @@ -206,7 +206,7 @@ private void testSkewedDistribution(double width, double height) KdbTree tree = buildKdbTree(10, extent, rectangles.build()); Map leafNodes = tree.getLeaves(); - assertThat(leafNodes.size()).isEqualTo(9); + assertThat(leafNodes).hasSize(9); assertThat(leafNodes.keySet()).isEqualTo(ImmutableSet.of(0, 1, 2, 3, 4, 5, 6, 7, 8)); assertThat(leafNodes).containsEntry(0, new Rectangle(0, 0, 1.5, 2.5)); assertThat(leafNodes).containsEntry(1, new Rectangle(1.5, 0, 3.5, 2.5)); @@ -240,7 +240,7 @@ private void testCantSplitVertically(double width, double height) KdbTree tree = buildKdbTree(10, extent, rectangles.build()); Map leafNodes = tree.getLeaves(); - assertThat(leafNodes.size()).isEqualTo(10); + assertThat(leafNodes).hasSize(10); assertThat(leafNodes.keySet()).isEqualTo(ImmutableSet.of(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)); assertThat(leafNodes).containsEntry(0, new Rectangle(0, 0, 4.5, 0.5)); assertThat(leafNodes).containsEntry(1, new Rectangle(0, 0.5, 4.5, 1.5)); @@ -275,7 +275,7 @@ private void testCantSplit(double width, double height) KdbTree tree = buildKdbTree(10, extent, rectangles.build()); Map leafNodes = tree.getLeaves(); - assertThat(leafNodes.size()).isEqualTo(2); + assertThat(leafNodes).hasSize(2); assertThat(leafNodes.keySet()).isEqualTo(ImmutableSet.of(0, 1)); assertThat(leafNodes).containsEntry(0, new Rectangle(0, 0, 4.5, 4 + height)); assertThat(leafNodes).containsEntry(1, new Rectangle(4.5, 0, 9 + width, 4 + height)); diff --git a/lib/trino-hdfs/src/test/java/io/trino/hdfs/s3/TestTrinoS3FileSystem.java b/lib/trino-hdfs/src/test/java/io/trino/hdfs/s3/TestTrinoS3FileSystem.java index 23444224d8a3a3..731cb5d8b6d13c 100644 --- a/lib/trino-hdfs/src/test/java/io/trino/hdfs/s3/TestTrinoS3FileSystem.java +++ b/lib/trino-hdfs/src/test/java/io/trino/hdfs/s3/TestTrinoS3FileSystem.java @@ -965,19 +965,19 @@ public ListObjectsV2Result listObjectsV2(ListObjectsV2Request listObjectsV2Reque fs.setS3Client(s3); List shallowAll = remoteIteratorToList(fs.listLocatedStatus(rootPath)); - assertThat(shallowAll.size()).isEqualTo(2); + assertThat(shallowAll).hasSize(2); assertThat(shallowAll.get(0).isDirectory()).isTrue(); assertThat(shallowAll.get(1).isDirectory()).isFalse(); assertThat(shallowAll.get(0).getPath()).isEqualTo(new Path(rootPath, "prefix")); assertThat(shallowAll.get(1).getPath()).isEqualTo(new Path(rootPath, rootObject.getKey())); List shallowFiles = remoteIteratorToList(fs.listFiles(rootPath, false)); - assertThat(shallowFiles.size()).isEqualTo(1); + assertThat(shallowFiles).hasSize(1); assertThat(shallowFiles.get(0).isDirectory()).isFalse(); assertThat(shallowFiles.get(0).getPath()).isEqualTo(new Path(rootPath, rootObject.getKey())); List recursiveFiles = remoteIteratorToList(fs.listFiles(rootPath, true)); - assertThat(recursiveFiles.size()).isEqualTo(2); + assertThat(recursiveFiles).hasSize(2); assertThat(recursiveFiles.get(0).isDirectory()).isFalse(); assertThat(recursiveFiles.get(1).isDirectory()).isFalse(); assertThat(recursiveFiles.get(0).getPath()).isEqualTo(new Path(rootPath, childObject.getKey())); diff --git a/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/FormatTestUtils.java b/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/FormatTestUtils.java index 0e5096909b2f1a..8efdf1e816e0c5 100644 --- a/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/FormatTestUtils.java +++ b/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/FormatTestUtils.java @@ -404,7 +404,7 @@ public static void assertColumnValueEquals(Type type, Object actual, Object expe if (type instanceof ArrayType) { List actualArray = (List) actual; List expectedArray = (List) expected; - assertThat(actualArray.size()).isEqualTo(expectedArray.size()); + assertThat(actualArray).hasSize(expectedArray.size()); Type elementType = type.getTypeParameters().get(0); for (int i = 0; i < actualArray.size(); i++) { @@ -416,7 +416,7 @@ public static void assertColumnValueEquals(Type type, Object actual, Object expe else if (type instanceof MapType) { Map actualMap = (Map) actual; Map expectedMap = (Map) expected; - assertThat(actualMap.size()).isEqualTo(expectedMap.size()); + assertThat(actualMap).hasSize(expectedMap.size()); Type keyType = type.getTypeParameters().get(0); Type valueType = type.getTypeParameters().get(1); @@ -442,8 +442,8 @@ else if (type instanceof RowType) { List actualRow = (List) actual; List expectedRow = (List) expected; - assertThat(actualRow.size()).isEqualTo(fieldTypes.size()); - assertThat(actualRow.size()).isEqualTo(expectedRow.size()); + assertThat(actualRow).hasSize(fieldTypes.size()); + assertThat(actualRow).hasSize(expectedRow.size()); for (int fieldId = 0; fieldId < actualRow.size(); fieldId++) { Type fieldType = fieldTypes.get(fieldId); diff --git a/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestAvroBase.java b/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestAvroBase.java index 02473d28ae4eb3..d5b39abbba6fdc 100644 --- a/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestAvroBase.java +++ b/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestAvroBase.java @@ -330,7 +330,7 @@ private void testSerdeCycles(Schema schema, AvroCompressionKind compressionKind) testRecordsActual.add(genericRecordDataFileReader.next()); } } - assertThat(testRecordsExpected.build().size()).isEqualTo(testRecordsActual.build().size()); + assertThat(testRecordsExpected.build()).hasSize(testRecordsActual.build().size()); List expected = testRecordsExpected.build(); List actual = testRecordsActual.build(); for (int i = 0; i < expected.size(); i++) { diff --git a/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestHiveAvroTypeBlockHandler.java b/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestHiveAvroTypeBlockHandler.java index d31f5b2867e532..2ff26ac6c5d8bd 100644 --- a/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestHiveAvroTypeBlockHandler.java +++ b/lib/trino-hive-formats/src/test/java/io/trino/hive/formats/avro/TestHiveAvroTypeBlockHandler.java @@ -121,12 +121,12 @@ public void testCoercionOfUnionToStruct() //check first column //check first column first row coerced struct RowBlock readStraightUpStringsOnly = (RowBlock) p.getBlock(0).getSingleValueBlock(0); - assertThat(readStraightUpStringsOnly.getFieldBlocks().size()).isEqualTo(3); // tag, int and string block fields + assertThat(readStraightUpStringsOnly.getFieldBlocks()).hasSize(3); // tag, int and string block fields assertThat(readStraightUpStringsOnly.getFieldBlocks().get(1).isNull(0)).isTrue(); // int field null assertThat(VARCHAR.getObjectValue(null, readStraightUpStringsOnly.getFieldBlocks().get(2), 0)).isEqualTo("I am in column 0 field 1"); //string field expected value // check first column second row coerced struct RowBlock readStraightUpInts = (RowBlock) p.getBlock(0).getSingleValueBlock(1); - assertThat(readStraightUpInts.getFieldBlocks().size()).isEqualTo(3); // tag, int and string block fields + assertThat(readStraightUpInts.getFieldBlocks()).hasSize(3); // tag, int and string block fields assertThat(readStraightUpInts.getFieldBlocks().get(2).isNull(0)).isTrue(); // string field null assertThat(INTEGER.getObjectValue(null, readStraightUpInts.getFieldBlocks().get(1), 0)).isEqualTo(5); @@ -135,12 +135,12 @@ public void testCoercionOfUnionToStruct() //check second column //check second column first row coerced struct RowBlock readFromReverseStringsOnly = (RowBlock) p.getBlock(1).getSingleValueBlock(0); - assertThat(readFromReverseStringsOnly.getFieldBlocks().size()).isEqualTo(3); // tag, int and string block fields + assertThat(readFromReverseStringsOnly.getFieldBlocks()).hasSize(3); // tag, int and string block fields assertThat(readFromReverseStringsOnly.getFieldBlocks().get(1).isNull(0)).isTrue(); // int field null assertThat(VARCHAR.getObjectValue(null, readFromReverseStringsOnly.getFieldBlocks().get(2), 0)).isEqualTo("I am in column 1 field 1"); //check second column second row coerced struct RowBlock readFromReverseUpInts = (RowBlock) p.getBlock(1).getSingleValueBlock(1); - assertThat(readFromReverseUpInts.getFieldBlocks().size()).isEqualTo(3); // tag, int and string block fields + assertThat(readFromReverseUpInts.getFieldBlocks()).hasSize(3); // tag, int and string block fields assertThat(readFromReverseUpInts.getFieldBlocks().get(2).isNull(0)).isTrue(); // string field null assertThat(INTEGER.getObjectValue(null, readFromReverseUpInts.getFieldBlocks().get(1), 0)).isEqualTo(21); //check second column third row is null @@ -149,17 +149,17 @@ public void testCoercionOfUnionToStruct() //check third column (default of 42 always) //check third column first row coerced struct RowBlock readFromDefaultStringsOnly = (RowBlock) p.getBlock(2).getSingleValueBlock(0); - assertThat(readFromDefaultStringsOnly.getFieldBlocks().size()).isEqualTo(3); // tag, int and string block fields + assertThat(readFromDefaultStringsOnly.getFieldBlocks()).hasSize(3); // tag, int and string block fields assertThat(readFromDefaultStringsOnly.getFieldBlocks().get(2).isNull(0)).isTrue(); // string field null assertThat(INTEGER.getObjectValue(null, readFromDefaultStringsOnly.getFieldBlocks().get(1), 0)).isEqualTo(42); //check third column second row coerced struct RowBlock readFromDefaultInts = (RowBlock) p.getBlock(2).getSingleValueBlock(1); - assertThat(readFromDefaultInts.getFieldBlocks().size()).isEqualTo(3); // tag, int and string block fields + assertThat(readFromDefaultInts.getFieldBlocks()).hasSize(3); // tag, int and string block fields assertThat(readFromDefaultInts.getFieldBlocks().get(2).isNull(0)).isTrue(); // string field null assertThat(INTEGER.getObjectValue(null, readFromDefaultInts.getFieldBlocks().get(1), 0)).isEqualTo(42); //check third column third row coerced struct RowBlock readFromDefaultNulls = (RowBlock) p.getBlock(2).getSingleValueBlock(2); - assertThat(readFromDefaultNulls.getFieldBlocks().size()).isEqualTo(3); // int and string block fields + assertThat(readFromDefaultNulls.getFieldBlocks()).hasSize(3); // int and string block fields assertThat(readFromDefaultNulls.getFieldBlocks().get(2).isNull(0)).isTrue(); // string field null assertThat(INTEGER.getObjectValue(null, readFromDefaultNulls.getFieldBlocks().get(1), 0)).isEqualTo(42); diff --git a/lib/trino-orc/src/test/java/io/trino/orc/OrcTester.java b/lib/trino-orc/src/test/java/io/trino/orc/OrcTester.java index e11fa9e2ac117d..5f02c8c8fe7e11 100644 --- a/lib/trino-orc/src/test/java/io/trino/orc/OrcTester.java +++ b/lib/trino-orc/src/test/java/io/trino/orc/OrcTester.java @@ -542,7 +542,7 @@ private static void assertColumnValueEquals(Type type, Object actual, Object exp if (type instanceof ArrayType) { List actualArray = (List) actual; List expectedArray = (List) expected; - assertThat(actualArray.size()).isEqualTo(expectedArray.size()); + assertThat(actualArray).hasSize(expectedArray.size()); Type elementType = type.getTypeParameters().get(0); for (int i = 0; i < actualArray.size(); i++) { @@ -554,7 +554,7 @@ private static void assertColumnValueEquals(Type type, Object actual, Object exp else if (type instanceof MapType) { Map actualMap = (Map) actual; Map expectedMap = (Map) expected; - assertThat(actualMap.size()).isEqualTo(expectedMap.size()); + assertThat(actualMap).hasSize(expectedMap.size()); Type keyType = type.getTypeParameters().get(0); Type valueType = type.getTypeParameters().get(1); @@ -582,8 +582,8 @@ else if (type instanceof RowType) { List actualRow = (List) actual; List expectedRow = (List) expected; - assertThat(actualRow.size()).isEqualTo(fieldTypes.size()); - assertThat(actualRow.size()).isEqualTo(expectedRow.size()); + assertThat(actualRow).hasSize(fieldTypes.size()); + assertThat(actualRow).hasSize(expectedRow.size()); for (int fieldId = 0; fieldId < actualRow.size(); fieldId++) { Type fieldType = fieldTypes.get(fieldId); diff --git a/lib/trino-orc/src/test/java/io/trino/orc/TestOrcBloomFilters.java b/lib/trino-orc/src/test/java/io/trino/orc/TestOrcBloomFilters.java index 8eb72ba697b32e..2e996cf392ca9b 100644 --- a/lib/trino-orc/src/test/java/io/trino/orc/TestOrcBloomFilters.java +++ b/lib/trino-orc/src/test/java/io/trino/orc/TestOrcBloomFilters.java @@ -132,7 +132,7 @@ public void testOrcHiveBloomFilterSerde() OrcMetadataReader metadataReader = new OrcMetadataReader(new OrcReaderOptions()); List bloomFilters = metadataReader.readBloomFilterIndexes(inputStream); - assertThat(bloomFilters.size()).isEqualTo(1); + assertThat(bloomFilters).hasSize(1); assertThat(bloomFilters.get(0).test(TEST_STRING)).isTrue(); assertThat(bloomFilters.get(0).testSlice(wrappedBuffer(TEST_STRING))).isTrue(); @@ -149,7 +149,7 @@ public void testOrcHiveBloomFilterSerde() CodedInputStream input = CodedInputStream.newInstance(bloomFilterBytes.getBytes()); OrcProto.BloomFilterIndex deserializedBloomFilterIndex = OrcProto.BloomFilterIndex.parseFrom(input); List bloomFilterList = deserializedBloomFilterIndex.getBloomFilterList(); - assertThat(bloomFilterList.size()).isEqualTo(1); + assertThat(bloomFilterList).hasSize(1); OrcProto.BloomFilter bloomFilterRead = bloomFilterList.get(0); diff --git a/lib/trino-orc/src/test/java/io/trino/orc/TestStructColumnReader.java b/lib/trino-orc/src/test/java/io/trino/orc/TestStructColumnReader.java index 9a42f19d8e6625..65e3078963e618 100644 --- a/lib/trino-orc/src/test/java/io/trino/orc/TestStructColumnReader.java +++ b/lib/trino-orc/src/test/java/io/trino/orc/TestStructColumnReader.java @@ -98,7 +98,7 @@ public void testValuesAreReadInCorrectly() RowBlock readBlock = read(tempFile, readerType); List actual = (List) readerType.getObjectValue(TestingConnectorSession.SESSION, readBlock, 0); - assertThat(actual.size()).isEqualTo(readerFields.size()); + assertThat(actual).hasSize(readerFields.size()); assertThat(actual.get(0)).isEqualTo("field_a_value"); assertThat(actual.get(1)).isEqualTo("field_b_value"); assertThat(actual.get(2)).isEqualTo("field_c_value"); @@ -121,7 +121,7 @@ public void testReaderLowerCasesFieldNamesFromStream() RowBlock readBlock = read(tempFile, readerType); List actual = (List) readerType.getObjectValue(TestingConnectorSession.SESSION, readBlock, 0); - assertThat(actual.size()).isEqualTo(readerFields.size()); + assertThat(actual).hasSize(readerFields.size()); assertThat(actual.get(0)).isEqualTo("fieldAValue"); assertThat(actual.get(1)).isEqualTo("fieldBValue"); assertThat(actual.get(2)).isEqualTo("fieldCValue"); @@ -144,7 +144,7 @@ public void testReaderLowerCasesFieldNamesFromType() RowBlock readBlock = read(tempFile, readerType); List actual = (List) readerType.getObjectValue(TestingConnectorSession.SESSION, readBlock, 0); - assertThat(actual.size()).isEqualTo(readerFields.size()); + assertThat(actual).hasSize(readerFields.size()); assertThat(actual.get(0)).isEqualTo("fieldAValue"); assertThat(actual.get(1)).isEqualTo("fieldBValue"); assertThat(actual.get(2)).isEqualTo("fieldCValue"); @@ -186,7 +186,7 @@ public void testExtraFieldsInReader() RowBlock readBlock = read(tempFile, readerType); List actual = (List) readerType.getObjectValue(TestingConnectorSession.SESSION, readBlock, 0); - assertThat(actual.size()).isEqualTo(readerFields.size()); + assertThat(actual).hasSize(readerFields.size()); assertThat(actual.get(0)).isEqualTo("field_a_value"); assertThat(actual.get(1)).isNull(); assertThat(actual.get(2)).isEqualTo("field_c_value"); @@ -210,7 +210,7 @@ public void testExtraFieldsInWriter() RowBlock readBlock = read(tempFile, readerType); List actual = (List) readerType.getObjectValue(TestingConnectorSession.SESSION, readBlock, 0); - assertThat(actual.size()).isEqualTo(readerFields.size()); + assertThat(actual).hasSize(readerFields.size()); assertThat(actual.get(0)).isEqualTo("field_a_value"); assertThat(actual.get(1)).isEqualTo("field_c_value"); } diff --git a/lib/trino-orc/src/test/java/io/trino/orc/stream/AbstractTestValueStream.java b/lib/trino-orc/src/test/java/io/trino/orc/stream/AbstractTestValueStream.java index f7e0d88599b602..65ff20664ccbfc 100644 --- a/lib/trino-orc/src/test/java/io/trino/orc/stream/AbstractTestValueStream.java +++ b/lib/trino-orc/src/test/java/io/trino/orc/stream/AbstractTestValueStream.java @@ -57,7 +57,7 @@ protected void testWriteValue(List> groups) assertThat(stream.getLength()).isEqualTo(sliceOutput.size()); List checkpoints = outputStream.getCheckpoints(); - assertThat(checkpoints.size()).isEqualTo(groups.size()); + assertThat(checkpoints).hasSize(groups.size()); R valueStream = createValueStream(sliceOutput.slice()); for (List group : groups) { diff --git a/lib/trino-orc/src/test/java/io/trino/orc/stream/TestBooleanOutputStream.java b/lib/trino-orc/src/test/java/io/trino/orc/stream/TestBooleanOutputStream.java index d7d9b1d73fbea2..7b05dc135ab55d 100644 --- a/lib/trino-orc/src/test/java/io/trino/orc/stream/TestBooleanOutputStream.java +++ b/lib/trino-orc/src/test/java/io/trino/orc/stream/TestBooleanOutputStream.java @@ -69,7 +69,7 @@ public void testWriteBoolean() buffer.writeDataTo(slice); Slice singleWriteBuffer = slice.slice(); - assertThat(batchWriteCheckpoints.size()).isEqualTo(singleWriteCheckpoints.size()); + assertThat(batchWriteCheckpoints).hasSize(singleWriteCheckpoints.size()); for (int i = 0; i < batchWriteCheckpoints.size(); i++) { assertThat(checkpointsEqual(batchWriteCheckpoints.get(i), singleWriteCheckpoints.get(i))).isTrue(); } diff --git a/lib/trino-parquet/src/test/java/io/trino/parquet/reader/TestParquetReader.java b/lib/trino-parquet/src/test/java/io/trino/parquet/reader/TestParquetReader.java index 2ef475a7644fe5..00ecd8388857bc 100644 --- a/lib/trino-parquet/src/test/java/io/trino/parquet/reader/TestParquetReader.java +++ b/lib/trino-parquet/src/test/java/io/trino/parquet/reader/TestParquetReader.java @@ -133,7 +133,7 @@ public void testEmptyRowRangesWithColumnIndex() new File(Resources.getResource("lineitem_sorted_by_shipdate/data.parquet").toURI()), new ParquetReaderOptions()); ParquetMetadata parquetMetadata = MetadataReader.readFooter(dataSource, Optional.empty()); - assertThat(parquetMetadata.getBlocks().size()).isEqualTo(2); + assertThat(parquetMetadata.getBlocks()).hasSize(2); // The predicate and the file are prepared so that page indexes will result in non-overlapping row ranges and eliminate the entire first row group // while the second row group still has to be read TupleDomain predicate = TupleDomain.withColumnDomains( diff --git a/lib/trino-parquet/src/test/java/io/trino/parquet/writer/TestParquetWriter.java b/lib/trino-parquet/src/test/java/io/trino/parquet/writer/TestParquetWriter.java index c67217385b0885..a80cbbcd00d73c 100644 --- a/lib/trino-parquet/src/test/java/io/trino/parquet/writer/TestParquetWriter.java +++ b/lib/trino-parquet/src/test/java/io/trino/parquet/writer/TestParquetWriter.java @@ -129,7 +129,7 @@ public void testWrittenPageSize() generateInputPages(types, 100, 1000)), new ParquetReaderOptions()); ParquetMetadata parquetMetadata = MetadataReader.readFooter(dataSource, Optional.empty()); - assertThat(parquetMetadata.getBlocks().size()).isEqualTo(1); + assertThat(parquetMetadata.getBlocks()).hasSize(1); assertThat(parquetMetadata.getBlocks().get(0).rowCount()).isEqualTo(100 * 1000); ColumnChunkMetadata chunkMetaData = parquetMetadata.getBlocks().get(0).columns().get(0); @@ -178,7 +178,7 @@ public void testWrittenPageValueCount() generateInputPages(types, 100, 1000)), new ParquetReaderOptions()); ParquetMetadata parquetMetadata = MetadataReader.readFooter(dataSource, Optional.empty()); - assertThat(parquetMetadata.getBlocks().size()).isEqualTo(1); + assertThat(parquetMetadata.getBlocks()).hasSize(1); assertThat(parquetMetadata.getBlocks().get(0).rowCount()).isEqualTo(100 * 1000); ColumnChunkMetadata columnAMetaData = parquetMetadata.getBlocks().get(0).columns().get(0); diff --git a/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/security/BaseFileBasedConnectorAccessControlTest.java b/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/security/BaseFileBasedConnectorAccessControlTest.java index 7dc2e76eae3b4a..88d8550759b655 100644 --- a/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/security/BaseFileBasedConnectorAccessControlTest.java +++ b/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/security/BaseFileBasedConnectorAccessControlTest.java @@ -425,7 +425,7 @@ public void testTableRulesForMixedGroupUsers() .build()); List rowFilters = accessControl.getRowFilters(userGroup3, myTable); - assertThat(rowFilters.size()).isEqualTo(1); + assertThat(rowFilters).hasSize(1); assertViewExpressionEquals( rowFilters.get(0), ViewExpression.builder() diff --git a/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/security/BaseFileBasedSystemAccessControlTest.java b/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/security/BaseFileBasedSystemAccessControlTest.java index ab0e05fc69a78d..44ba8e8fb281f8 100644 --- a/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/security/BaseFileBasedSystemAccessControlTest.java +++ b/lib/trino-plugin-toolkit/src/test/java/io/trino/plugin/base/security/BaseFileBasedSystemAccessControlTest.java @@ -778,7 +778,7 @@ public void testTableRulesForMixedGroupUsers() List rowFilters = accessControl.getRowFilters( userGroup3, new CatalogSchemaTableName("some-catalog", "my_schema", "my_table")); - assertThat(rowFilters.size()).isEqualTo(1); + assertThat(rowFilters).hasSize(1); assertViewExpressionEquals( rowFilters.get(0), ViewExpression.builder() @@ -1406,7 +1406,7 @@ public void testGetRowFilter() assertThat(accessControl.getRowFilters(ALICE, new CatalogSchemaTableName("some-catalog", "bobschema", "bobcolumns"))).isEqualTo(ImmutableList.of()); List rowFilters = accessControl.getRowFilters(CHARLIE, new CatalogSchemaTableName("some-catalog", "bobschema", "bobcolumns")); - assertThat(rowFilters.size()).isEqualTo(1); + assertThat(rowFilters).hasSize(1); assertViewExpressionEquals( rowFilters.get(0), ViewExpression.builder() @@ -1416,7 +1416,7 @@ public void testGetRowFilter() .build()); rowFilters = accessControl.getRowFilters(CHARLIE, new CatalogSchemaTableName("some-catalog", "bobschema", "bobcolumns_with_grant")); - assertThat(rowFilters.size()).isEqualTo(1); + assertThat(rowFilters).hasSize(1); assertViewExpressionEquals( rowFilters.get(0), ViewExpression.builder() diff --git a/lib/trino-record-decoder/src/test/java/io/trino/decoder/avro/TestAvroDecoder.java b/lib/trino-record-decoder/src/test/java/io/trino/decoder/avro/TestAvroDecoder.java index 0ad33ba2cec047..5e9bcb691ae134 100644 --- a/lib/trino-record-decoder/src/test/java/io/trino/decoder/avro/TestAvroDecoder.java +++ b/lib/trino-record-decoder/src/test/java/io/trino/decoder/avro/TestAvroDecoder.java @@ -159,7 +159,7 @@ private Map buildAndDecodeColumn(Decode ImmutableMap.of(columnName, columnType), ImmutableMap.of(columnName, actualValue)); - assertThat(decodedRow.size()).isEqualTo(1); + assertThat(decodedRow).hasSize(1); return decodedRow; } @@ -184,7 +184,7 @@ private static byte[] buildAvroData(Schema schema, Map values) private static Map buildMapFromKeysAndValues(List keys, List values) { - assertThat(keys.size()).isEqualTo(values.size()); + assertThat(keys).hasSize(values.size()); Map map = new HashMap<>(); for (int i = 0; i < keys.size(); i++) { map.put(keys.get(i), values.get(i)); @@ -254,7 +254,7 @@ public void testSchemaEvolutionAddingColumn() ImmutableSet.of(originalColumn, newlyAddedColumn), ImmutableMap.of(DATA_SCHEMA, addedColumnSchema)); - assertThat(decodedRow.size()).isEqualTo(2); + assertThat(decodedRow).hasSize(2); checkValue(decodedRow, originalColumn, "string_field_value"); checkIsNull(decodedRow, newlyAddedColumn); } @@ -277,7 +277,7 @@ public void testSchemaEvolutionRenamingColumn() ImmutableSet.of(renamedColumn), ImmutableMap.of(DATA_SCHEMA, renamedColumnSchema)); - assertThat(decodedEvolvedRow.size()).isEqualTo(1); + assertThat(decodedEvolvedRow).hasSize(1); checkIsNull(decodedEvolvedRow, renamedColumn); } @@ -302,7 +302,7 @@ public void testSchemaEvolutionRemovingColumn() ImmutableSet.of(evolvedColumn), ImmutableMap.of(DATA_SCHEMA, removedColumnSchema)); - assertThat(decodedEvolvedRow.size()).isEqualTo(1); + assertThat(decodedEvolvedRow).hasSize(1); checkValue(decodedEvolvedRow, evolvedColumn, "string_field_value"); } @@ -324,7 +324,7 @@ public void testSchemaEvolutionIntToLong() ImmutableSet.of(longColumnReadingIntData), ImmutableMap.of(DATA_SCHEMA, changedTypeSchema)); - assertThat(decodedEvolvedRow.size()).isEqualTo(1); + assertThat(decodedEvolvedRow).hasSize(1); checkValue(decodedEvolvedRow, longColumnReadingIntData, 100); } @@ -346,7 +346,7 @@ public void testSchemaEvolutionIntToDouble() ImmutableSet.of(doubleColumnReadingIntData), ImmutableMap.of(DATA_SCHEMA, changedTypeSchema)); - assertThat(decodedEvolvedRow.size()).isEqualTo(1); + assertThat(decodedEvolvedRow).hasSize(1); checkValue(decodedEvolvedRow, doubleColumnReadingIntData, 100.0); } @@ -503,7 +503,7 @@ public void testNestedRecord() ImmutableSet.of(row), ImmutableMap.of(DATA_SCHEMA, schema)); - assertThat(decodedRow.size()).isEqualTo(1); + assertThat(decodedRow).hasSize(1); checkValue(decodedRow, row, 98247748); } diff --git a/lib/trino-record-decoder/src/test/java/io/trino/decoder/csv/TestCsvDecoder.java b/lib/trino-record-decoder/src/test/java/io/trino/decoder/csv/TestCsvDecoder.java index 951442c1946c78..10bbe23b6a4b63 100644 --- a/lib/trino-record-decoder/src/test/java/io/trino/decoder/csv/TestCsvDecoder.java +++ b/lib/trino-record-decoder/src/test/java/io/trino/decoder/csv/TestCsvDecoder.java @@ -69,7 +69,7 @@ public void testSimple() Map decodedRow = rowDecoder.decodeRow(csv.getBytes(StandardCharsets.UTF_8)) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkValue(decodedRow, row1, "ro"); checkValue(decodedRow, row2, "row2"); @@ -100,7 +100,7 @@ public void testBoolean() Map decodedRow = rowDecoder.decodeRow(csv.getBytes(StandardCharsets.UTF_8)) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkValue(decodedRow, row1, true); checkValue(decodedRow, row2, false); @@ -128,7 +128,7 @@ public void testNulls() Map decodedRow = rowDecoder.decodeRow(csv.getBytes(StandardCharsets.UTF_8)) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkIsNull(decodedRow, row1); checkIsNull(decodedRow, row2); @@ -154,7 +154,7 @@ public void testLessTokensThanColumns() Map decodedRow = rowDecoder.decodeRow(csv.getBytes(StandardCharsets.UTF_8)) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkValue(decodedRow, column1, "ala"); checkValue(decodedRow, column2, 10); diff --git a/lib/trino-record-decoder/src/test/java/io/trino/decoder/json/TestJsonDecoder.java b/lib/trino-record-decoder/src/test/java/io/trino/decoder/json/TestJsonDecoder.java index 6856870ab125ae..c47a190dc5eac0 100644 --- a/lib/trino-record-decoder/src/test/java/io/trino/decoder/json/TestJsonDecoder.java +++ b/lib/trino-record-decoder/src/test/java/io/trino/decoder/json/TestJsonDecoder.java @@ -77,7 +77,7 @@ public void testSimple() Map decodedRow = rowDecoder.decodeRow(json) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkValue(decodedRow, column1, "twitterfeed"); checkValue(decodedRow, column2, "EKentuckyN"); @@ -102,7 +102,7 @@ public void testNonExistent() Map decodedRow = rowDecoder.decodeRow(json) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkIsNull(decodedRow, column1); checkIsNull(decodedRow, column2); @@ -126,7 +126,7 @@ public void testStringNumber() Optional> decodedRow = rowDecoder.decodeRow(json); assertThat(decodedRow).isPresent(); - assertThat(decodedRow.get().size()).isEqualTo(columns.size()); + assertThat(decodedRow.get()).hasSize(columns.size()); checkValue(decodedRow.get(), column1, "481516"); checkValue(decodedRow.get(), column2, 481516); diff --git a/lib/trino-record-decoder/src/test/java/io/trino/decoder/protobuf/TestProtobufDecoder.java b/lib/trino-record-decoder/src/test/java/io/trino/decoder/protobuf/TestProtobufDecoder.java index 552a2670f7329a..46e65b555f6773 100644 --- a/lib/trino-record-decoder/src/test/java/io/trino/decoder/protobuf/TestProtobufDecoder.java +++ b/lib/trino-record-decoder/src/test/java/io/trino/decoder/protobuf/TestProtobufDecoder.java @@ -154,7 +154,7 @@ private void testAllDataTypes(String stringData, Integer integerData, Long longD .decodeRow(messageBuilder.build().toByteArray()) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(9); + assertThat(decodedRow).hasSize(9); checkValue(decodedRow, stringColumn, stringData); checkValue(decodedRow, integerColumn, integerData); @@ -323,7 +323,7 @@ private void assertOneof(DynamicMessage.Builder messageBuilder, .decodeRow(message.toByteArray()) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(2); + assertThat(decodedRow).hasSize(2); final var obj = new ObjectMapper(); final var expected = obj.writeValueAsString(setValue); @@ -508,7 +508,7 @@ private void testStructuralDataTypes(String stringData, Integer integerData, Lon .decodeRow(messageBuilder.build().toByteArray()) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(3); + assertThat(decodedRow).hasSize(3); Block listBlock = (Block) decodedRow.get(listColumn).getObject(); assertThat(VARCHAR.getSlice(listBlock, 0).toStringUtf8()).isEqualTo("Presto"); @@ -637,7 +637,7 @@ private void testRowFlattening(String stringData, Integer integerData, Long long .decodeRow(messageBuilder.build().toByteArray()) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(9); + assertThat(decodedRow).hasSize(9); checkValue(decodedRow, stringColumn, stringData); checkValue(decodedRow, integerColumn, integerData); diff --git a/lib/trino-record-decoder/src/test/java/io/trino/decoder/raw/TestRawDecoder.java b/lib/trino-record-decoder/src/test/java/io/trino/decoder/raw/TestRawDecoder.java index 67b3e4f814aeae..6bd302c776547a 100644 --- a/lib/trino-record-decoder/src/test/java/io/trino/decoder/raw/TestRawDecoder.java +++ b/lib/trino-record-decoder/src/test/java/io/trino/decoder/raw/TestRawDecoder.java @@ -92,7 +92,7 @@ public void testSimple() Map decodedRow = rowDecoder.decodeRow(row) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkValue(decodedRow, row1, 4815162342L); checkValue(decodedRow, row2, 12345678); @@ -118,7 +118,7 @@ public void testFixedWithString() Map decodedRow = rowDecoder.decodeRow(row) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkValue(decodedRow, row1, str); checkValue(decodedRow, row2, str); @@ -148,7 +148,7 @@ public void testFloatStuff() Map decodedRow = rowDecoder.decodeRow(row) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkValue(decodedRow, row1, Math.PI); checkValue(decodedRow, row2, Math.E); @@ -222,7 +222,7 @@ public void testBooleanStuff() Map decodedRow = rowDecoder.decodeRow(row) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); checkValue(decodedRow, row01, 127); checkValue(decodedRow, row02, false); @@ -431,7 +431,7 @@ public void testGetValueTwice() Map decodedRow = rowDecoder.decodeRow(row) .orElseThrow(AssertionError::new); - assertThat(decodedRow.size()).isEqualTo(columns.size()); + assertThat(decodedRow).hasSize(columns.size()); for (DecoderColumnHandle handle : columns) { checkTwice(decodedRow, handle); diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java index 14a03e3cfc74f2..883f5798ff52f8 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnectorTest.java @@ -327,7 +327,7 @@ public void testPartitionPushdownsWithNotMatchingPredicate() ImmutableList.of("'2', 0"))) { String sql = "SELECT 1 FROM " + testCassandraTable.getTableName() + " WHERE id = '1' AND trino_filter_col = 0"; - assertThat(computeActual(sql).getMaterializedRows().size()).isEqualTo(0); + assertThat(computeActual(sql).getMaterializedRows()).hasSize(0); } } diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeConnectorSmokeTest.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeConnectorSmokeTest.java index 5d8e17210c7af9..f800eb0b9f123f 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeConnectorSmokeTest.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeConnectorSmokeTest.java @@ -761,7 +761,7 @@ private void validatePath(String schemaLocation, String schemaName, String table List materializedRows = getQueryRunner() .execute("SELECT DISTINCT regexp_replace(\"$path\", '(.*[/][^/]*)[/][^/]*$', '$1') FROM " + schemaName + "." + tableName) .getMaterializedRows(); - assertThat(materializedRows.size()).isEqualTo(1); + assertThat(materializedRows).hasSize(1); assertThat((String) materializedRows.get(0).getField(0)).matches(format("%s/%s.*", schemaLocation, tableName)); } @@ -886,7 +886,7 @@ public void testExternalTableFilesRetainedOnDrop() "SELECT count(*) FROM nation"); int fileCount = getTableFiles(tableName).size(); assertUpdate(format("DROP TABLE %s.%s", schemaName, tableName)); - assertThat(getTableFiles(tableName).size()).isEqualTo(fileCount); + assertThat(getTableFiles(tableName)).hasSize(fileCount); } @Test diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeTableWithCustomLocation.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeTableWithCustomLocation.java index 240afbe1cc8bc0..ddbdfbac1148fa 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeTableWithCustomLocation.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/BaseDeltaLakeTableWithCustomLocation.java @@ -65,7 +65,7 @@ public void testCreateAndDrop() .describedAs("The directory corresponding to the table storage location should exist") .isTrue(); List materializedRows = computeActual("SELECT \"$path\" FROM " + tableName).getMaterializedRows(); - assertThat(materializedRows.size()).isEqualTo(1); + assertThat(materializedRows).hasSize(1); Location filePath = Location.of((String) materializedRows.get(0).getField(0)); assertThat(fileSystem.listFiles(filePath).hasNext()) .describedAs("The data file should exist") diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeCreateTableStatistics.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeCreateTableStatistics.java index 074b0340681ce5..1263d32ce54b34 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeCreateTableStatistics.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeCreateTableStatistics.java @@ -404,7 +404,7 @@ public void testPartitionedTable() ImmutableList.of(partitionColumn), "VALUES ('a', 1), ('b', 1), ('c', 1), ('c', 2), ('d', 2), ('e', 2), (null, 1)")) { List addFileEntries = getAddFileEntries(table.getName()); - assertThat(addFileEntries.size()).isEqualTo(2); + assertThat(addFileEntries).hasSize(2); for (AddFileEntry addFileEntry : addFileEntries) { assertThat(addFileEntry.getStats()).isPresent(); diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakePageSink.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakePageSink.java index 992360f66d9a7f..ec8a53db706c39 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakePageSink.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakePageSink.java @@ -121,11 +121,11 @@ public void testPageSinkStats() .map(dataFileInfoCodec::fromJson) .collect(toImmutableList()); - assertThat(dataFileInfos.size()).isEqualTo(1); + assertThat(dataFileInfos).hasSize(1); DataFileInfo dataFileInfo = dataFileInfos.get(0); List files = ImmutableList.copyOf(new File(tablePath).listFiles((dir, name) -> !name.endsWith(".crc"))); - assertThat(files.size()).isEqualTo(1); + assertThat(files).hasSize(1); File outputFile = files.get(0); assertThat(round(stats.getInputPageSizeInBytes().getAllTime().getMax())).isEqualTo(page.getRetainedSizeInBytes()); diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestTransactionLogAccess.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestTransactionLogAccess.java index 34c68a4513642d..23e1d26fcd2b7e 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestTransactionLogAccess.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestTransactionLogAccess.java @@ -299,7 +299,7 @@ public void testAddEntryOverrides() List activeEntries = addFileEntries.stream() .filter(addFileEntry -> addFileEntry.getPath().equals(path)) .toList(); - assertThat(activeEntries.size()).isEqualTo(1); + assertThat(activeEntries).hasSize(1); assertThat(activeEntries.get(0).getModificationTime()).isEqualTo(9999999L); } } @@ -317,7 +317,7 @@ public void testAddRemoveAdd() .filter(addFileEntry -> addFileEntry.getPath().equals("age=30/part-00002-5800be2e-2373-47d8-8b86-776a8ea9d69f.c000.snappy.parquet")) .toList(); - assertThat(activeEntries.size()).isEqualTo(1); + assertThat(activeEntries).hasSize(1); assertThat(activeEntries.get(0).getModificationTime()).isEqualTo(9999999L); } } @@ -424,7 +424,7 @@ private void testAllGetProtocolEntries(String tableName, String resourcePath) try (Stream protocolEntryStream = transactionLogAccess.getProtocolEntries(SESSION, tableSnapshot)) { List protocolEntries = protocolEntryStream.toList(); - assertThat(protocolEntries.size()).isEqualTo(1); + assertThat(protocolEntries).hasSize(1); assertThat(protocolEntries.get(0).minReaderVersion()).isEqualTo(1); assertThat(protocolEntries.get(0).minWriterVersion()).isEqualTo(2); } @@ -659,7 +659,7 @@ public void testSnapshotsAreConsistent() assertThat(dataFilesWithFixedVersion.stream().noneMatch(entry -> entry.getPath().equals(newFilePath))).isTrue(); } - assertThat(expectedDataFiles.size()).isEqualTo(dataFilesWithFixedVersion.size()); + assertThat(expectedDataFiles).hasSize(dataFilesWithFixedVersion.size()); List columns = extractColumnMetadata(transactionLogAccess.getMetadataEntry(SESSION, tableSnapshot), transactionLogAccess.getProtocolEntry(SESSION, tableSnapshot), TESTING_TYPE_MANAGER); for (int i = 0; i < expectedDataFiles.size(); i++) { AddFileEntry expected = expectedDataFiles.get(i); diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/statistics/TestDeltaLakeFileBasedTableStatisticsProvider.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/statistics/TestDeltaLakeFileBasedTableStatisticsProvider.java index 8230a7a2785c87..cf35046b63ec4c 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/statistics/TestDeltaLakeFileBasedTableStatisticsProvider.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/statistics/TestDeltaLakeFileBasedTableStatisticsProvider.java @@ -136,7 +136,7 @@ public void testStatisticsNaN() DeltaLakeTableHandle tableHandle = registerTable("nan"); TableStatistics stats = getTableStatistics(SESSION, tableHandle); assertThat(stats.getRowCount()).isEqualTo(Estimate.of(1)); - assertThat(stats.getColumnStatistics().size()).isEqualTo(1); + assertThat(stats.getColumnStatistics()).hasSize(1); ColumnStatistics columnStatistics = stats.getColumnStatistics().get(COLUMN_HANDLE); assertThat(columnStatistics.getRange()).isEqualTo(Optional.empty()); diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/TestDeltaLakeSchemaSupport.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/TestDeltaLakeSchemaSupport.java index 699343a7d61c0b..f50a8e0e239409 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/TestDeltaLakeSchemaSupport.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/TestDeltaLakeSchemaSupport.java @@ -114,7 +114,7 @@ private void testSinglePrimitiveFieldSchema(String json, ColumnMetadata metadata List schema = DeltaLakeSchemaSupport.getColumnMetadata(json, typeManager, ColumnMappingMode.NONE, List.of()).stream() .map(DeltaLakeColumnMetadata::columnMetadata) .collect(toImmutableList()); - assertThat(schema.size()).isEqualTo(1); + assertThat(schema).hasSize(1); assertThat(schema.get(0)).isEqualTo(metadata); } @@ -144,7 +144,7 @@ public void testComplexSchema() List schema = DeltaLakeSchemaSupport.getColumnMetadata(json, typeManager, ColumnMappingMode.NONE, List.of()).stream() .map(DeltaLakeColumnMetadata::columnMetadata) .collect(toImmutableList()); - assertThat(schema.size()).isEqualTo(5); + assertThat(schema).hasSize(5); // asserting on the string representations, since they're more readable assertThat(schema.get(0).toString()).isEqualTo("ColumnMetadata{name='a', type=integer, nullable}"); assertThat(schema.get(1).toString()).isEqualTo("ColumnMetadata{name='b', type=row(b1 integer, b2 row(b21 varchar, b22 boolean)), nullable}"); diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/checkpoint/TestTransactionLogTail.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/checkpoint/TestTransactionLogTail.java index 4cf1b67b0c955c..b42a58cb04a703 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/checkpoint/TestTransactionLogTail.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/transactionlog/checkpoint/TestTransactionLogTail.java @@ -41,8 +41,8 @@ private void testTail(String dataSource) throws Exception { String tableLocation = getClass().getClassLoader().getResource(format("%s/person", dataSource)).toURI().toString(); - assertThat(readJsonTransactionLogTails(tableLocation).size()).isEqualTo(7); - assertThat(updateJsonTransactionLogTails(tableLocation).size()).isEqualTo(7); + assertThat(readJsonTransactionLogTails(tableLocation)).hasSize(7); + assertThat(updateJsonTransactionLogTails(tableLocation)).hasSize(7); } private List updateJsonTransactionLogTails(String tableLocation) diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java index 98df650027465b..3d79b29999454a 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseHiveConnectorTest.java @@ -1976,7 +1976,7 @@ private void testCreatePartitionedTableAs(Session session, HiveStorageFormat sto assertThat(tableMetadata.metadata().getProperties()).containsEntry(PARTITIONED_BY_PROPERTY, ImmutableList.of("ship_priority", "order_status")); List partitions = getPartitions("test_create_partitioned_table_as"); - assertThat(partitions.size()).isEqualTo(3); + assertThat(partitions).hasSize(3); assertQuery(session, "SELECT * FROM test_create_partitioned_table_as", "SELECT orderkey, shippriority, orderstatus FROM orders"); @@ -2736,7 +2736,7 @@ private void verifyPartitionedBucketedTable(HiveStorageFormat storageFormat, Str assertThat(tableMetadata.metadata().getProperties()).containsEntry(BUCKET_COUNT_PROPERTY, 11); List partitions = getPartitions(tableName); - assertThat(partitions.size()).isEqualTo(3); + assertThat(partitions).hasSize(3); // verify that we create bucket_count files in each partition assertEqualsIgnoreOrder( @@ -2904,7 +2904,7 @@ private void verifyPartitionedBucketedTableAsFewRows(HiveStorageFormat storageFo assertThat(tableMetadata.metadata().getProperties()).containsEntry(BUCKET_COUNT_PROPERTY, 11); List partitions = getPartitions(tableName); - assertThat(partitions.size()).isEqualTo(3); + assertThat(partitions).hasSize(3); MaterializedResult actual = computeActual("SELECT * FROM " + tableName); MaterializedResult expected = resultBuilder(getSession(), canonicalizeType(createUnboundedVarcharType()), canonicalizeType(createUnboundedVarcharType()), canonicalizeType(createUnboundedVarcharType())) @@ -2976,7 +2976,7 @@ public void testUnregisterRegisterPartition() assertUpdate(format("INSERT INTO %s (dummy_col, part) VALUES (1, 'first'), (2, 'second'), (3, 'third')", tableName), 3); List paths = getQueryRunner().execute(getSession(), "SELECT \"$path\" FROM " + tableName + " ORDER BY \"$path\" ASC").toTestTypes().getMaterializedRows(); - assertThat(paths.size()).isEqualTo(3); + assertThat(paths).hasSize(3); String firstPartition = Location.of((String) paths.get(0).getField(0)).parentDirectory().toString(); @@ -3309,7 +3309,7 @@ private void testInsertPartitionedTable(Session session, HiveStorageFormat stora // verify the partitions List partitions = getPartitions("test_insert_partitioned_table"); - assertThat(partitions.size()).isEqualTo(3); + assertThat(partitions).hasSize(3); assertQuery(session, "SELECT * FROM test_insert_partitioned_table", "SELECT orderkey, shippriority, orderstatus FROM orders"); @@ -3378,7 +3378,7 @@ private void testInsertPartitionedTableExistingPartition(Session session, HiveSt // verify the partitions List partitions = getPartitions(tableName); - assertThat(partitions.size()).isEqualTo(3); + assertThat(partitions).hasSize(3); assertQuery( session, @@ -3435,7 +3435,7 @@ private void testInsertPartitionedTableOverwriteExistingPartition(Session sessio // verify the partitions List partitions = getPartitions(tableName); - assertThat(partitions.size()).isEqualTo(3); + assertThat(partitions).hasSize(3); assertQuery( session, @@ -4750,7 +4750,7 @@ private void testPathHiddenColumn(Session session, HiveStorageFormat storageForm List columnNames = ImmutableList.of("col0", "col1", PATH_COLUMN_NAME, FILE_SIZE_COLUMN_NAME, FILE_MODIFIED_TIME_COLUMN_NAME, PARTITION_COLUMN_NAME); List columnMetadatas = tableMetadata.columns(); - assertThat(columnMetadatas.size()).isEqualTo(columnNames.size()); + assertThat(columnMetadatas).hasSize(columnNames.size()); for (int i = 0; i < columnMetadatas.size(); i++) { ColumnMetadata columnMetadata = columnMetadatas.get(i); assertThat(columnMetadata.getName()).isEqualTo(columnNames.get(i)); @@ -4759,7 +4759,7 @@ private void testPathHiddenColumn(Session session, HiveStorageFormat storageForm assertThat(columnMetadata.isHidden()).isTrue(); } } - assertThat(getPartitions("test_path").size()).isEqualTo(3); + assertThat(getPartitions("test_path")).hasSize(3); MaterializedResult results = computeActual(session, format("SELECT *, \"%s\" FROM test_path", PATH_COLUMN_NAME)); Map partitionPathMap = new HashMap<>(); @@ -4780,7 +4780,7 @@ private void testPathHiddenColumn(Session session, HiveStorageFormat storageForm partitionPathMap.put(col1, parentDirectory); } } - assertThat(partitionPathMap.size()).isEqualTo(3); + assertThat(partitionPathMap).hasSize(3); assertUpdate(session, "DROP TABLE test_path"); assertThat(getQueryRunner().tableExists(session, "test_path")).isFalse(); @@ -4808,7 +4808,7 @@ public void testBucketHiddenColumn() List columnNames = ImmutableList.of("col0", "col1", PATH_COLUMN_NAME, BUCKET_COLUMN_NAME, FILE_SIZE_COLUMN_NAME, FILE_MODIFIED_TIME_COLUMN_NAME); List columnMetadatas = tableMetadata.columns(); - assertThat(columnMetadatas.size()).isEqualTo(columnNames.size()); + assertThat(columnMetadatas).hasSize(columnNames.size()); for (int i = 0; i < columnMetadatas.size(); i++) { ColumnMetadata columnMetadata = columnMetadatas.get(i); assertThat(columnMetadata.getName()).isEqualTo(columnNames.get(i)); @@ -4858,7 +4858,7 @@ public void testFileSizeHiddenColumn() List columnNames = ImmutableList.of("col0", "col1", PATH_COLUMN_NAME, FILE_SIZE_COLUMN_NAME, FILE_MODIFIED_TIME_COLUMN_NAME, PARTITION_COLUMN_NAME); List columnMetadatas = tableMetadata.columns(); - assertThat(columnMetadatas.size()).isEqualTo(columnNames.size()); + assertThat(columnMetadatas).hasSize(columnNames.size()); for (int i = 0; i < columnMetadatas.size(); i++) { ColumnMetadata columnMetadata = columnMetadatas.get(i); assertThat(columnMetadata.getName()).isEqualTo(columnNames.get(i)); @@ -4866,7 +4866,7 @@ public void testFileSizeHiddenColumn() assertThat(columnMetadata.isHidden()).isTrue(); } } - assertThat(getPartitions("test_file_size").size()).isEqualTo(3); + assertThat(getPartitions("test_file_size")).hasSize(3); MaterializedResult results = computeActual(format("SELECT *, \"%s\" FROM test_file_size", FILE_SIZE_COLUMN_NAME)); Map fileSizeMap = new HashMap<>(); @@ -4885,7 +4885,7 @@ public void testFileSizeHiddenColumn() fileSizeMap.put(col1, fileSize); } } - assertThat(fileSizeMap.size()).isEqualTo(3); + assertThat(fileSizeMap).hasSize(3); assertUpdate("DROP TABLE test_file_size"); } @@ -4918,7 +4918,7 @@ private void testFileModifiedTimeHiddenColumn(HiveTimestampPrecision precision) List columnNames = ImmutableList.of("col0", "col1", PATH_COLUMN_NAME, FILE_SIZE_COLUMN_NAME, FILE_MODIFIED_TIME_COLUMN_NAME, PARTITION_COLUMN_NAME); List columnMetadatas = tableMetadata.columns(); - assertThat(columnMetadatas.size()).isEqualTo(columnNames.size()); + assertThat(columnMetadatas).hasSize(columnNames.size()); for (int i = 0; i < columnMetadatas.size(); i++) { ColumnMetadata columnMetadata = columnMetadatas.get(i); assertThat(columnMetadata.getName()).isEqualTo(columnNames.get(i)); @@ -4926,7 +4926,7 @@ private void testFileModifiedTimeHiddenColumn(HiveTimestampPrecision precision) assertThat(columnMetadata.isHidden()).isTrue(); } } - assertThat(getPartitions("test_file_modified_time").size()).isEqualTo(3); + assertThat(getPartitions("test_file_modified_time")).hasSize(3); Session sessionWithTimestampPrecision = withTimestampPrecision(getSession(), precision); MaterializedResult results = computeActual( @@ -4948,7 +4948,7 @@ private void testFileModifiedTimeHiddenColumn(HiveTimestampPrecision precision) fileModifiedTimeMap.put(col1, fileModifiedTime); } } - assertThat(fileModifiedTimeMap.size()).isEqualTo(3); + assertThat(fileModifiedTimeMap).hasSize(3); assertUpdate("DROP TABLE test_file_modified_time"); } @@ -4973,7 +4973,7 @@ public void testPartitionHiddenColumn() List columnNames = ImmutableList.of("col0", "col1", "col2", PATH_COLUMN_NAME, FILE_SIZE_COLUMN_NAME, FILE_MODIFIED_TIME_COLUMN_NAME, PARTITION_COLUMN_NAME); List columnMetadatas = tableMetadata.columns(); - assertThat(columnMetadatas.size()).isEqualTo(columnNames.size()); + assertThat(columnMetadatas).hasSize(columnNames.size()); for (int i = 0; i < columnMetadatas.size(); i++) { ColumnMetadata columnMetadata = columnMetadatas.get(i); assertThat(columnMetadata.getName()).isEqualTo(columnNames.get(i)); @@ -4981,7 +4981,7 @@ public void testPartitionHiddenColumn() assertThat(columnMetadata.isHidden()).isTrue(); } } - assertThat(getPartitions("test_partition_hidden_column").size()).isEqualTo(9); + assertThat(getPartitions("test_partition_hidden_column")).hasSize(9); MaterializedResult results = computeActual(format("SELECT *, \"%s\" FROM test_partition_hidden_column", PARTITION_COLUMN_NAME)); for (MaterializedRow row : results.getMaterializedRows()) { diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestBackgroundHiveSplitLoader.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestBackgroundHiveSplitLoader.java index 27f6563b24f79c..e1b2cb322d1edf 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestBackgroundHiveSplitLoader.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestBackgroundHiveSplitLoader.java @@ -159,7 +159,7 @@ public void testNoPathFilter() HiveSplitSource hiveSplitSource = hiveSplitSource(backgroundHiveSplitLoader); backgroundHiveSplitLoader.start(hiveSplitSource); - assertThat(drain(hiveSplitSource).size()).isEqualTo(2); + assertThat(drain(hiveSplitSource)).hasSize(2); } @Test @@ -196,7 +196,7 @@ private void assertCsvSplitCount(FileEntry file, Map tableProper HiveSplitSource hiveSplitSource = hiveSplitSource(backgroundHiveSplitLoader); backgroundHiveSplitLoader.start(hiveSplitSource); - assertThat(drainSplits(hiveSplitSource).size()).isEqualTo(expectedSplitCount); + assertThat(drainSplits(hiveSplitSource)).hasSize(expectedSplitCount); } @Test @@ -210,7 +210,7 @@ public void testPathFilter() HiveSplitSource hiveSplitSource = hiveSplitSource(backgroundHiveSplitLoader); backgroundHiveSplitLoader.start(hiveSplitSource); List paths = drain(hiveSplitSource); - assertThat(paths.size()).isEqualTo(1); + assertThat(paths).hasSize(1); assertThat(paths.get(0)).isEqualTo(LOCATION.toString()); } @@ -228,7 +228,7 @@ public void testPathFilterOneBucketMatchPartitionedTable() HiveSplitSource hiveSplitSource = hiveSplitSource(backgroundHiveSplitLoader); backgroundHiveSplitLoader.start(hiveSplitSource); List paths = drain(hiveSplitSource); - assertThat(paths.size()).isEqualTo(1); + assertThat(paths).hasSize(1); assertThat(paths.get(0)).isEqualTo(LOCATION.toString()); } @@ -252,7 +252,7 @@ public void testPathFilterBucketedPartitionedTable() HiveSplitSource hiveSplitSource = hiveSplitSource(backgroundHiveSplitLoader); backgroundHiveSplitLoader.start(hiveSplitSource); List paths = drain(hiveSplitSource); - assertThat(paths.size()).isEqualTo(1); + assertThat(paths).hasSize(1); assertThat(paths.get(0)).isEqualTo(LOCATION.toString()); } @@ -276,7 +276,7 @@ public void testEmptyFileWithNoBlocks() backgroundHiveSplitLoader.start(hiveSplitSource); List splits = drainSplits(hiveSplitSource); - assertThat(splits.size()).isEqualTo(0); + assertThat(splits).hasSize(0); } @Test @@ -339,7 +339,7 @@ public TupleDomain getCurrentPredicate() HiveSplitSource hiveSplitSource = hiveSplitSource(backgroundHiveSplitLoader); backgroundHiveSplitLoader.start(hiveSplitSource); - assertThat(drain(hiveSplitSource).size()).isEqualTo(2); + assertThat(drain(hiveSplitSource)).hasSize(2); assertThat(hiveSplitSource.isFinished()).isTrue(); } finally { @@ -381,7 +381,7 @@ public void testCachedDirectoryLister() } for (Future> future : futures) { - assertThat(future.get().size()).isEqualTo(TEST_LOCATIONS.size()); + assertThat(future.get()).hasSize(TEST_LOCATIONS.size()); } assertThat(cachingDirectoryLister.getRequestCount()).isEqualTo(totalCount); assertThat(cachingDirectoryLister.getHitCount()).isEqualTo(totalCount - 1); @@ -517,7 +517,7 @@ public void testMultipleSplitsPerBucket() HiveSplitSource hiveSplitSource = hiveSplitSource(backgroundHiveSplitLoader); backgroundHiveSplitLoader.start(hiveSplitSource); - assertThat(drainSplits(hiveSplitSource).size()).isEqualTo(17); + assertThat(drainSplits(hiveSplitSource)).hasSize(17); } @Test @@ -810,7 +810,7 @@ public void testBuildManifestFileIterator() locations, true); List splits = ImmutableList.copyOf(splitIterator); - assertThat(splits.size()).isEqualTo(2); + assertThat(splits).hasSize(2); assertThat(splits.get(0).getPath()).isEqualTo(firstFilePath.toString()); assertThat(splits.get(1).getPath()).isEqualTo(secondFilePath.toString()); } @@ -852,7 +852,7 @@ public void testBuildManifestFileIteratorNestedDirectory() locations, false); List splits = ImmutableList.copyOf(splitIterator); - assertThat(splits.size()).isEqualTo(2); + assertThat(splits).hasSize(2); assertThat(splits.get(0).getPath()).isEqualTo(filePath.toString()); assertThat(splits.get(1).getPath()).isEqualTo(directoryPath.toString()); } @@ -892,7 +892,7 @@ public void testBuildManifestFileIteratorWithCacheInvalidation() locations1, true); List splits1 = ImmutableList.copyOf(splitIterator1); - assertThat(splits1.size()).isEqualTo(1); + assertThat(splits1).hasSize(1); assertThat(splits1.get(0).getPath()).isEqualTo(firstFilePath.toString()); Location secondFilePath = Location.of("memory:///db_name/table_name/file2"); @@ -906,7 +906,7 @@ public void testBuildManifestFileIteratorWithCacheInvalidation() locations2, true); List splits2 = ImmutableList.copyOf(splitIterator2); - assertThat(splits2.size()).isEqualTo(2); + assertThat(splits2).hasSize(2); assertThat(splits2.get(0).getPath()).isEqualTo(firstFilePath.toString()); assertThat(splits2.get(1).getPath()).isEqualTo(secondFilePath.toString()); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveFileFormats.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveFileFormats.java index cbcd5572872879..d9f1fa85737efb 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveFileFormats.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveFileFormats.java @@ -1044,7 +1044,7 @@ private static void checkPageSource(ConnectorPageSource pageSource, List batch = getSplits(hiveSplitSource, 1); - assertThat(batch.size()).isEqualTo(1); + assertThat(batch).hasSize(1); splits.set(batch.get(0)); } catch (Throwable e) { @@ -269,7 +269,7 @@ public void testOutstandingSplitSize() assertThat(hiveSplitSource.getBufferedInternalSplitCount()).isEqualTo(i + 1); } - assertThat(getSplits(hiveSplitSource, maxSplitCount).size()).isEqualTo(maxSplitCount); + assertThat(getSplits(hiveSplitSource, maxSplitCount)).hasSize(maxSplitCount); for (int i = 0; i < maxSplitCount; i++) { hiveSplitSource.addToQueue(new TestSplit(i)); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java index ff88c2b5cf3fde..148ec287cd1f9b 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java @@ -438,24 +438,24 @@ public void testGetPartitionsByNames() assertThat(mockClient.getAccessCount()).isEqualTo(1); // Select half of the available partitions and load them into the cache - assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1)).size()).isEqualTo(1); + assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1))).hasSize(1); assertThat(mockClient.getAccessCount()).isEqualTo(2); // Now select all the partitions - assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size()).isEqualTo(2); + assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2))).hasSize(2); // There should be one more access to fetch the remaining partition assertThat(mockClient.getAccessCount()).isEqualTo(3); // Now if we fetch any or both of them, they should not hit the client - assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1)).size()).isEqualTo(1); - assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION2)).size()).isEqualTo(1); - assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size()).isEqualTo(2); + assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1))).hasSize(1); + assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION2))).hasSize(1); + assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2))).hasSize(2); assertThat(mockClient.getAccessCount()).isEqualTo(3); metastore.flushCache(); // Fetching both should only result in one batched access - assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size()).isEqualTo(2); + assertThat(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2))).hasSize(2); assertThat(mockClient.getAccessCount()).isEqualTo(4); } @@ -910,7 +910,7 @@ public void testInvalidGetPartitionsByNames() { Table table = metastore.getTable(TEST_DATABASE, TEST_TABLE).orElseThrow(); Map> partitionsByNames = metastore.getPartitionsByNames(table, ImmutableList.of(BAD_PARTITION)); - assertThat(partitionsByNames.size()).isEqualTo(1); + assertThat(partitionsByNames).hasSize(1); Optional onlyElement = Iterables.getOnlyElement(partitionsByNames.values()); assertThat(onlyElement).isEmpty(); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueInputConverter.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueInputConverter.java index 89b4f4a8af662c..e3d713056449f3 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueInputConverter.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueInputConverter.java @@ -100,7 +100,7 @@ public void testConvertFunction() .withResourceUris(input.getResourceUris()); LanguageFunction actual = GlueToTrinoConverter.convertFunction(function); - assertThat(input.getResourceUris().size()).isEqualTo(3); + assertThat(input.getResourceUris()).hasSize(3); assertThat(actual).isEqualTo(expected); // verify that the owner comes from the metastore @@ -114,7 +114,7 @@ private static void assertColumnList(List result = readFile(fileSystemFactory, Map.of(), OptionalLong.empty(), acidInfo, fileLocation, 625); - assertThat(result.size()).isEqualTo(1); + assertThat(result).hasSize(1); } @Test @@ -178,7 +178,7 @@ public void testFullFileReadOriginalFilesTable() List expected = expectedResult(OptionalLong.empty(), nationKey -> nationKey == 24, 1); List result = readFile(fileSystemFactory, ALL_COLUMNS, OptionalLong.empty(), Optional.of(acidInfo), fileLocation, 1780); - assertThat(result.size()).isEqualTo(expected.size()); + assertThat(result).hasSize(expected.size()); int deletedRowKey = 24; String deletedRowNameColumn = "UNITED STATES"; assertThat(result.stream().anyMatch(acidNationRow -> acidNationRow.name().equals(deletedRowNameColumn) && acidNationRow.nationKey() == deletedRowKey)) diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/parquet/predicate/TestParquetPredicateUtils.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/parquet/predicate/TestParquetPredicateUtils.java index 2d483b5fde5a55..f8b20620da80f9 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/parquet/predicate/TestParquetPredicateUtils.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/parquet/predicate/TestParquetPredicateUtils.java @@ -108,7 +108,7 @@ private void testParquetTupleDomainPrimitive(boolean useColumnNames) Map, ColumnDescriptor> descriptorsByPath = getDescriptors(fileSchema, fileSchema); TupleDomain tupleDomain = getParquetTupleDomain(descriptorsByPath, domain, fileSchema, useColumnNames); - assertThat(tupleDomain.getDomains().get().size()).isEqualTo(1); + assertThat(tupleDomain.getDomains().get()).hasSize(1); ColumnDescriptor descriptor = tupleDomain.getDomains().get().keySet().iterator().next(); assertThat(descriptor.getPath().length).isEqualTo(1); assertThat(descriptor.getPath()[0]).isEqualTo("my_primitive"); @@ -177,7 +177,7 @@ private void testParquetTupleDomainStructWithPrimitiveColumnPredicate(boolean us new PrimitiveType(OPTIONAL, INT32, "c"))); Map, ColumnDescriptor> descriptorsByPath = getDescriptors(fileSchema, fileSchema); TupleDomain calculatedTupleDomain = getParquetTupleDomain(descriptorsByPath, tupleDomain, fileSchema, useColumNames); - assertThat(calculatedTupleDomain.getDomains().get().size()).isEqualTo(1); + assertThat(calculatedTupleDomain.getDomains().get()).hasSize(1); ColumnDescriptor selectedColumnDescriptor = descriptorsByPath.get(ImmutableList.of("row_field", "b")); assertThat(calculatedTupleDomain.getDomains().get()).containsEntry(selectedColumnDescriptor, predicateDomain); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/statistics/TestMetastoreHiveStatisticsProvider.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/statistics/TestMetastoreHiveStatisticsProvider.java index 17118f7f1396c9..61845ce0bac8e1 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/statistics/TestMetastoreHiveStatisticsProvider.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/statistics/TestMetastoreHiveStatisticsProvider.java @@ -713,7 +713,7 @@ public void testGetTableStatisticsSampling() protected Map getPartitionsStatistics(ConnectorSession session, SchemaTableName table, List hivePartitions, Set columns) { assertThat(table).isEqualTo(TABLE); - assertThat(hivePartitions.size()).isEqualTo(1); + assertThat(hivePartitions).hasSize(1); return ImmutableMap.of(); } }; diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/util/TestAcidTables.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/util/TestAcidTables.java index c4e9a79f3d705c..6b2adbe5644bf8 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/util/TestAcidTables.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/util/TestAcidTables.java @@ -123,7 +123,7 @@ public void testOriginal() assertThat(state.deltas()).isEmpty(); List files = state.originalFiles(); - assertThat(files.size()).isEqualTo(7); + assertThat(files).hasSize(7); assertThat(files.get(0).location()).isEqualTo(Location.of("memory:///tbl/part1/000000_0")); assertThat(files.get(1).location()).isEqualTo(Location.of("memory:///tbl/part1/000000_0_copy_1")); assertThat(files.get(2).location()).isEqualTo(Location.of("memory:///tbl/part1/000000_0_copy_2")); @@ -160,7 +160,7 @@ public void testOriginalDeltas() assertThat(state.baseDirectory()).isEmpty(); List files = state.originalFiles(); - assertThat(files.size()).isEqualTo(5); + assertThat(files).hasSize(5); assertThat(files.get(0).location()).isEqualTo(Location.of("memory:///tbl/part1/000000_0")); assertThat(files.get(1).location()).isEqualTo(Location.of("memory:///tbl/part1/000001_1")); assertThat(files.get(2).location()).isEqualTo(Location.of("memory:///tbl/part1/000002_0")); @@ -168,7 +168,7 @@ public void testOriginalDeltas() assertThat(files.get(4).location()).isEqualTo(Location.of("memory:///tbl/part1/subdir/000000_0")); List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(2); + assertThat(deltas).hasSize(2); ParsedDelta delta = deltas.get(0); assertThat(delta.path()).isEqualTo("memory:///tbl/part1/delta_025_030"); assertThat(delta.min()).isEqualTo(25); @@ -201,10 +201,10 @@ public void testBaseDeltas() new ValidWriteIdList("tbl:100:%d:".formatted(Long.MAX_VALUE))); assertThat(dir.baseDirectory()).contains(Location.of("memory:///tbl/part1/base_49")); - assertThat(dir.originalFiles().size()).isEqualTo(0); + assertThat(dir.originalFiles()).hasSize(0); List deltas = dir.deltas(); - assertThat(deltas.size()).isEqualTo(1); + assertThat(deltas).hasSize(1); ParsedDelta delta = deltas.get(0); assertThat(delta.path()).isEqualTo("memory:///tbl/part1/delta_050_105"); assertThat(delta.min()).isEqualTo(50); @@ -250,7 +250,7 @@ public void testOverlapingDelta() assertThat(state.baseDirectory()).contains(Location.of("memory:///tbl/part1/base_50")); List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(4); + assertThat(deltas).hasSize(4); assertThat(deltas.get(0).path()).isEqualTo("memory:///tbl/part1/delta_40_60"); assertThat(deltas.get(1).path()).isEqualTo("memory:///tbl/part1/delta_00061_61"); assertThat(deltas.get(2).path()).isEqualTo("memory:///tbl/part1/delta_000062_62"); @@ -282,7 +282,7 @@ public void testOverlapingDelta2() assertThat(state.baseDirectory()).contains(Location.of("memory:///tbl/part1/base_50")); List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(5); + assertThat(deltas).hasSize(5); assertThat(deltas.get(0).path()).isEqualTo("memory:///tbl/part1/delta_40_60"); assertThat(deltas.get(1).path()).isEqualTo("memory:///tbl/part1/delta_00061_61_0"); assertThat(deltas.get(2).path()).isEqualTo("memory:///tbl/part1/delta_000062_62_0"); @@ -304,7 +304,7 @@ public void deltasWithOpenTxnInRead() new ValidWriteIdList("tbl:100:4:4")); List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(2); + assertThat(deltas).hasSize(2); assertThat(deltas.get(0).path()).isEqualTo("memory:///tbl/part1/delta_1_1"); assertThat(deltas.get(1).path()).isEqualTo("memory:///tbl/part1/delta_2_5"); } @@ -326,7 +326,7 @@ public void deltasWithOpenTxnInRead2() new ValidWriteIdList("tbl:100:4:4")); List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(2); + assertThat(deltas).hasSize(2); assertThat(deltas.get(0).path()).isEqualTo("memory:///tbl/part1/delta_1_1"); assertThat(deltas.get(1).path()).isEqualTo("memory:///tbl/part1/delta_2_5"); } @@ -357,7 +357,7 @@ public void testBaseWithDeleteDeltas() assertThat(state.originalFiles()).isEmpty(); List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(2); + assertThat(deltas).hasSize(2); assertThat(deltas.get(0).path()).isEqualTo("memory:///tbl/part1/delete_delta_050_105"); assertThat(deltas.get(1).path()).isEqualTo("memory:///tbl/part1/delta_050_105"); // The delete_delta_110_110 should not be read because it is greater than the high watermark. @@ -387,7 +387,7 @@ public void testOverlapingDeltaAndDeleteDelta() assertThat(state.baseDirectory()).contains(Location.of("memory:///tbl/part1/base_50")); List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(6); + assertThat(deltas).hasSize(6); assertThat(deltas.get(0).path()).isEqualTo("memory:///tbl/part1/delete_delta_40_60"); assertThat(deltas.get(1).path()).isEqualTo("memory:///tbl/part1/delta_40_60"); assertThat(deltas.get(2).path()).isEqualTo("memory:///tbl/part1/delta_00061_61"); @@ -412,7 +412,7 @@ public void testMinorCompactedDeltaMakesInBetweenDelteDeltaObsolete() new ValidWriteIdList("tbl:100:%d:".formatted(Long.MAX_VALUE))); List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(1); + assertThat(deltas).hasSize(1); assertThat(deltas.get(0).path()).isEqualTo("memory:///tbl/part1/delta_40_60"); } @@ -435,7 +435,7 @@ public void deleteDeltasWithOpenTxnInRead() new ValidWriteIdList("tbl:100:4:4")); List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(3); + assertThat(deltas).hasSize(3); assertThat(deltas.get(0).path()).isEqualTo("memory:///tbl/part1/delta_1_1"); assertThat(deltas.get(1).path()).isEqualTo("memory:///tbl/part1/delete_delta_2_5"); assertThat(deltas.get(2).path()).isEqualTo("memory:///tbl/part1/delta_2_5"); @@ -473,7 +473,7 @@ public void testSkippingSubDirectories() // Subdirectories in delta directories should be skipped similar to Hive implementation List deltas = state.deltas(); - assertThat(deltas.size()).isEqualTo(2); + assertThat(deltas).hasSize(2); assertThat(deltas.get(0).path()).isEqualTo("memory:///tbl/part1/delta_025_025"); assertThat(deltas.get(1).path()).isEqualTo("memory:///tbl/part1/delete_delta_029_029"); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/util/TestHiveAcidUtils.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/util/TestHiveAcidUtils.java index e0831b5da8e17f..939291bc28a838 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/util/TestHiveAcidUtils.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/util/TestHiveAcidUtils.java @@ -63,10 +63,10 @@ public void testOriginal() conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertThat(dir.getBaseDirectory()).isNull(); - assertThat(dir.getCurrentDirectories().size()).isEqualTo(0); - assertThat(dir.getObsolete().size()).isEqualTo(0); + assertThat(dir.getCurrentDirectories()).hasSize(0); + assertThat(dir.getObsolete()).hasSize(0); List result = dir.getOriginalFiles(); - assertThat(result.size()).isEqualTo(7); + assertThat(result).hasSize(7); assertThat(result.get(0).getFileStatus().getPath().toString()).isEqualTo("mock:/tbl/part1/000000_0"); assertThat(result.get(1).getFileStatus().getPath().toString()).isEqualTo("mock:/tbl/part1/000000_0" + Utilities.COPY_KEYWORD + "1"); assertThat(result.get(2).getFileStatus().getPath().toString()).isEqualTo("mock:/tbl/part1/000000_0" + Utilities.COPY_KEYWORD + "2"); @@ -101,18 +101,18 @@ public void testOriginalDeltas() new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertThat(dir.getBaseDirectory()).isNull(); List obsolete = dir.getObsolete(); - assertThat(obsolete.size()).isEqualTo(2); + assertThat(obsolete).hasSize(2); assertThat(obsolete.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_025_025"); assertThat(obsolete.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_029_029"); List result = dir.getOriginalFiles(); - assertThat(result.size()).isEqualTo(5); + assertThat(result).hasSize(5); assertThat(result.get(0).getFileStatus().getPath().toString()).isEqualTo("mock:/tbl/part1/000000_0"); assertThat(result.get(1).getFileStatus().getPath().toString()).isEqualTo("mock:/tbl/part1/000001_1"); assertThat(result.get(2).getFileStatus().getPath().toString()).isEqualTo("mock:/tbl/part1/000002_0"); assertThat(result.get(3).getFileStatus().getPath().toString()).isEqualTo("mock:/tbl/part1/random"); assertThat(result.get(4).getFileStatus().getPath().toString()).isEqualTo("mock:/tbl/part1/subdir/000000_0"); List deltas = dir.getCurrentDirectories(); - assertThat(deltas.size()).isEqualTo(2); + assertThat(deltas).hasSize(2); AcidUtils.ParsedDelta delt = deltas.get(0); assertThat(delt.getPath().toString()).isEqualTo("mock:/tbl/part1/delta_025_030"); assertThat(delt.getMinWriteId()).isEqualTo(25); @@ -143,15 +143,15 @@ public void testBaseDeltas() AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertThat(dir.getBaseDirectory().toString()).isEqualTo("mock:/tbl/part1/base_49"); List obsolete = dir.getObsolete(); - assertThat(obsolete.size()).isEqualTo(5); + assertThat(obsolete).hasSize(5); assertThat(obsolete.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/base_10"); assertThat(obsolete.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/base_5"); assertThat(obsolete.get(2).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_025_030"); assertThat(obsolete.get(3).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_025_025"); assertThat(obsolete.get(4).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_029_029"); - assertThat(dir.getOriginalFiles().size()).isEqualTo(0); + assertThat(dir.getOriginalFiles()).hasSize(0); List deltas = dir.getCurrentDirectories(); - assertThat(deltas.size()).isEqualTo(1); + assertThat(deltas).hasSize(1); AcidUtils.ParsedDelta delt = deltas.get(0); assertThat(delt.getPath().toString()).isEqualTo("mock:/tbl/part1/delta_050_105"); assertThat(delt.getMinWriteId()).isEqualTo(50); @@ -172,7 +172,7 @@ public void testObsoleteOriginals() AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:150:" + Long.MAX_VALUE + ":")); // Obsolete list should include the two original bucket files, and the old base dir List obsolete = dir.getObsolete(); - assertThat(obsolete.size()).isEqualTo(3); + assertThat(obsolete).hasSize(3); assertThat(obsolete.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/base_5"); assertThat(dir.getBaseDirectory().toString()).isEqualTo("mock:/tbl/part1/base_10"); } @@ -194,11 +194,11 @@ public void testOverlapingDelta() AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertThat(dir.getBaseDirectory().toString()).isEqualTo("mock:/tbl/part1/base_50"); List obsolete = dir.getObsolete(); - assertThat(obsolete.size()).isEqualTo(2); + assertThat(obsolete).hasSize(2); assertThat(obsolete.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_052_55"); assertThat(obsolete.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_0060_60"); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(4); + assertThat(delts).hasSize(4); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_40_60"); assertThat(delts.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_00061_61"); assertThat(delts.get(2).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_000062_62"); @@ -226,14 +226,14 @@ public void testOverlapingDelta2() AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertThat(dir.getBaseDirectory().toString()).isEqualTo("mock:/tbl/part1/base_50"); List obsolete = dir.getObsolete(); - assertThat(obsolete.size()).isEqualTo(5); + assertThat(obsolete).hasSize(5); assertThat(obsolete.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_052_55"); assertThat(obsolete.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_058_58"); assertThat(obsolete.get(2).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_0060_60_1"); assertThat(obsolete.get(3).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_0060_60_4"); assertThat(obsolete.get(4).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_0060_60_7"); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(5); + assertThat(delts).hasSize(5); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_40_60"); assertThat(delts.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_00061_61_0"); assertThat(delts.get(2).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_000062_62_0"); @@ -252,7 +252,7 @@ public void deltasWithOpenTxnInRead() Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:4:4")); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(2); + assertThat(delts).hasSize(2); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_1_1"); assertThat(delts.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_2_5"); } @@ -271,7 +271,7 @@ public void deltasWithOpenTxnInRead2() Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:4:4")); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(2); + assertThat(delts).hasSize(2); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_1_1"); assertThat(delts.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_2_5"); } @@ -287,7 +287,7 @@ public void deltasWithOpenTxnsNotInCompact() Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidCompactorWriteIdList("tbl:4:" + Long.MAX_VALUE)); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(1); + assertThat(delts).hasSize(1); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_1_1"); } @@ -304,7 +304,7 @@ public void deltasWithOpenTxnsNotInCompact2() Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidCompactorWriteIdList("tbl:3:" + Long.MAX_VALUE)); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(1); + assertThat(delts).hasSize(1); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_1_1"); } @@ -329,7 +329,7 @@ public void testBaseWithDeleteDeltas() AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertThat(dir.getBaseDirectory().toString()).isEqualTo("mock:/tbl/part1/base_49"); List obsolete = dir.getObsolete(); - assertThat(obsolete.size()).isEqualTo(7); + assertThat(obsolete).hasSize(7); assertThat(obsolete.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/base_10"); assertThat(obsolete.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/base_5"); assertThat(obsolete.get(2).getPath().toString()).isEqualTo("mock:/tbl/part1/delete_delta_025_030"); @@ -337,9 +337,9 @@ public void testBaseWithDeleteDeltas() assertThat(obsolete.get(4).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_025_025"); assertThat(obsolete.get(5).getPath().toString()).isEqualTo("mock:/tbl/part1/delete_delta_029_029"); assertThat(obsolete.get(6).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_029_029"); - assertThat(dir.getOriginalFiles().size()).isEqualTo(0); + assertThat(dir.getOriginalFiles()).hasSize(0); List deltas = dir.getCurrentDirectories(); - assertThat(deltas.size()).isEqualTo(2); + assertThat(deltas).hasSize(2); assertThat(deltas.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delete_delta_050_105"); assertThat(deltas.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_050_105"); // The delete_delta_110_110 should not be read because it is greater than the high watermark. @@ -365,12 +365,12 @@ public void testOverlapingDeltaAndDeleteDelta() AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertThat(dir.getBaseDirectory().toString()).isEqualTo("mock:/tbl/part1/base_50"); List obsolete = dir.getObsolete(); - assertThat(obsolete.size()).isEqualTo(3); + assertThat(obsolete).hasSize(3); assertThat(obsolete.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delete_delta_052_55"); assertThat(obsolete.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_052_55"); assertThat(obsolete.get(2).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_0060_60"); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(6); + assertThat(delts).hasSize(6); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delete_delta_40_60"); assertThat(delts.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_40_60"); assertThat(delts.get(2).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_00061_61"); @@ -392,10 +392,10 @@ public void testMinorCompactedDeltaMakesInBetweenDelteDeltaObsolete() Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); List obsolete = dir.getObsolete(); - assertThat(obsolete.size()).isEqualTo(1); + assertThat(obsolete).hasSize(1); assertThat(obsolete.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delete_delta_50_50"); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(1); + assertThat(delts).hasSize(1); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_40_60"); } @@ -417,7 +417,7 @@ public void deltasAndDeleteDeltasWithOpenTxnsNotInCompact() Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidCompactorWriteIdList("tbl:4:" + Long.MAX_VALUE + ":")); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(2); + assertThat(delts).hasSize(2); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_1_1"); assertThat(delts.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delete_delta_2_2"); } @@ -438,7 +438,7 @@ public void deleteDeltasWithOpenTxnInRead() Path part = new MockPath(fs, "mock:/tbl/part1"); AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:4:4")); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(3); + assertThat(delts).hasSize(3); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_1_1"); assertThat(delts.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delete_delta_2_5"); assertThat(delts.get(2).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_2_5"); @@ -472,9 +472,9 @@ public void testSkippingSubDirectories() AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReaderWriteIdList("tbl:100:" + Long.MAX_VALUE + ":")); assertThat(dir.getBaseDirectory().toString()).isEqualTo("mock:/tbl/part1/base_1"); List obsolete = dir.getObsolete(); - assertThat(obsolete.size()).isEqualTo(0); + assertThat(obsolete).hasSize(0); List delts = dir.getCurrentDirectories(); - assertThat(delts.size()).isEqualTo(2); + assertThat(delts).hasSize(2); assertThat(delts.get(0).getPath().toString()).isEqualTo("mock:/tbl/part1/delta_025_025"); assertThat(delts.get(1).getPath().toString()).isEqualTo("mock:/tbl/part1/delete_delta_029_029"); } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java index 140399c2c27526..b4faa0268f2324 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergConnectorTest.java @@ -6202,9 +6202,9 @@ public void testExpireSnapshots() .matches("VALUES (BIGINT '3', VARCHAR 'one two')"); List updatedFiles = getAllMetadataFilesFromTableDirectory(tableLocation); List updatedSnapshots = getSnapshotIds(tableName); - assertThat(updatedFiles.size()).isEqualTo(initialFiles.size() - 2); + assertThat(updatedFiles).hasSize(initialFiles.size() - 2); assertThat(updatedSnapshots.size()).isLessThan(initialSnapshots.size()); - assertThat(updatedSnapshots.size()).isEqualTo(1); + assertThat(updatedSnapshots).hasSize(1); assertThat(initialSnapshots).containsAll(updatedSnapshots); } @@ -6891,7 +6891,7 @@ public void testReadFromVersionedTableWithExpiredHistory() assertQuerySucceeds(sessionWithShortRetentionUnlocked, "ALTER TABLE " + tableName + " EXECUTE EXPIRE_SNAPSHOTS (retention_threshold => '0s')"); List updatedSnapshots = getSnapshotIds(tableName); assertThat(updatedSnapshots.size()).isLessThan(initialSnapshots.size()); - assertThat(updatedSnapshots.size()).isEqualTo(1); + assertThat(updatedSnapshots).hasSize(1); assertThat(query("SELECT sum(value), listagg(key, ' ') WITHIN GROUP (ORDER BY key) FROM " + tableName + " FOR VERSION AS OF " + v3SnapshotId)) .matches("VALUES (BIGINT '3', VARCHAR 'one two')"); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergMaterializedViewTest.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergMaterializedViewTest.java index f3b0de98c17a18..d68ee5af1b3528 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergMaterializedViewTest.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/BaseIcebergMaterializedViewTest.java @@ -889,14 +889,14 @@ public void testMaterializedViewCreatedFromTableFunctionAndTable() assertUpdate("CREATE MATERIALIZED VIEW " + viewName + " AS SELECT * FROM TABLE(mock.system.sequence_function()) CROSS JOIN " + sourceTableName); List materializedRows = computeActual("SELECT * FROM " + viewName).getMaterializedRows(); - assertThat(materializedRows.size()).isEqualTo(1); + assertThat(materializedRows).hasSize(1); assertThat(materializedRows.get(0).getField(1)).isEqualTo(2); int valueFromPtf1 = (int) materializedRows.get(0).getField(0); assertFreshness(viewName, "STALE"); assertThat(computeActual("SELECT last_fresh_time FROM system.metadata.materialized_views WHERE catalog_name = CURRENT_CATALOG AND schema_name = CURRENT_SCHEMA AND name = '" + viewName + "'").getOnlyValue()).isNull(); materializedRows = computeActual("SELECT * FROM " + viewName).getMaterializedRows(); - assertThat(materializedRows.size()).isEqualTo(1); + assertThat(materializedRows).hasSize(1); assertThat(materializedRows.get(0).getField(1)).isEqualTo(2); int valueFromPtf2 = (int) materializedRows.get(0).getField(0); assertThat(valueFromPtf2).isNotEqualTo(valueFromPtf1); // differs because PTF sequence_function is called directly as mv is considered stale @@ -908,14 +908,14 @@ public void testMaterializedViewCreatedFromTableFunctionAndTable() ZonedDateTime lastFreshTime = (ZonedDateTime) computeActual("SELECT last_fresh_time FROM system.metadata.materialized_views WHERE catalog_name = CURRENT_CATALOG AND schema_name = CURRENT_SCHEMA AND name = '" + viewName + "'").getOnlyValue(); assertThat(lastFreshTime).isNotNull(); materializedRows = computeActual("SELECT * FROM " + viewName).getMaterializedRows(); - assertThat(materializedRows.size()).isEqualTo(1); + assertThat(materializedRows).hasSize(1); assertThat(materializedRows.get(0).getField(1)).isEqualTo(2); int valueFromPtf3 = (int) materializedRows.get(0).getField(0); assertThat(valueFromPtf3).isNotEqualTo(valueFromPtf1); assertThat(valueFromPtf3).isNotEqualTo(valueFromPtf2); materializedRows = computeActual("SELECT * FROM " + viewName).getMaterializedRows(); - assertThat(materializedRows.size()).isEqualTo(1); + assertThat(materializedRows).hasSize(1); assertThat(materializedRows.get(0).getField(1)).isEqualTo(2); int valueFromPtf4 = (int) materializedRows.get(0).getField(0); assertThat(valueFromPtf4).isNotEqualTo(valueFromPtf1); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergOrcMetricsCollection.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergOrcMetricsCollection.java index 22b5ce33331189..c25e96ba00f5be 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergOrcMetricsCollection.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergOrcMetricsCollection.java @@ -144,10 +144,10 @@ public void testMetrics() materializedRows = computeActual("select * from \"c1_metrics$files\"").getMaterializedRows(); datafile = toDataFileRecord(materializedRows.get(0)); assertThat(datafile.getRecordCount()).isEqualTo(1); - assertThat(datafile.getValueCounts().size()).isEqualTo(1); - assertThat(datafile.getNullValueCounts().size()).isEqualTo(1); - assertThat(datafile.getUpperBounds().size()).isEqualTo(1); - assertThat(datafile.getLowerBounds().size()).isEqualTo(1); + assertThat(datafile.getValueCounts()).hasSize(1); + assertThat(datafile.getNullValueCounts()).hasSize(1); + assertThat(datafile.getUpperBounds()).hasSize(1); + assertThat(datafile.getLowerBounds()).hasSize(1); // set c1 metrics mode to count assertUpdate("create table c1_metrics_count (c1 varchar, c2 varchar)"); @@ -162,8 +162,8 @@ public void testMetrics() materializedRows = computeActual("select * from \"c1_metrics_count$files\"").getMaterializedRows(); datafile = toDataFileRecord(materializedRows.get(0)); assertThat(datafile.getRecordCount()).isEqualTo(1); - assertThat(datafile.getValueCounts().size()).isEqualTo(1); - assertThat(datafile.getNullValueCounts().size()).isEqualTo(1); + assertThat(datafile.getValueCounts()).hasSize(1); + assertThat(datafile.getNullValueCounts()).hasSize(1); assertThat(datafile.getUpperBounds()).isNull(); assertThat(datafile.getLowerBounds()).isNull(); @@ -180,8 +180,8 @@ public void testMetrics() materializedRows = computeActual("select * from \"c1_metrics_truncate$files\"").getMaterializedRows(); datafile = toDataFileRecord(materializedRows.get(0)); assertThat(datafile.getRecordCount()).isEqualTo(1); - assertThat(datafile.getValueCounts().size()).isEqualTo(1); - assertThat(datafile.getNullValueCounts().size()).isEqualTo(1); + assertThat(datafile.getValueCounts()).hasSize(1); + assertThat(datafile.getNullValueCounts()).hasSize(1); datafile.getUpperBounds().forEach((k, v) -> assertThat(v.length()).isEqualTo(10)); datafile.getLowerBounds().forEach((k, v) -> assertThat(v.length()).isEqualTo(10)); @@ -197,10 +197,10 @@ public void testMetrics() materializedRows = computeActual("select * from \"c_metrics$files\"").getMaterializedRows(); datafile = toDataFileRecord(materializedRows.get(0)); assertThat(datafile.getRecordCount()).isEqualTo(1); - assertThat(datafile.getValueCounts().size()).isEqualTo(2); - assertThat(datafile.getNullValueCounts().size()).isEqualTo(2); - assertThat(datafile.getUpperBounds().size()).isEqualTo(2); - assertThat(datafile.getLowerBounds().size()).isEqualTo(2); + assertThat(datafile.getValueCounts()).hasSize(2); + assertThat(datafile.getNullValueCounts()).hasSize(2); + assertThat(datafile.getUpperBounds()).hasSize(2); + assertThat(datafile.getLowerBounds()).hasSize(2); // keep all metrics assertUpdate("create table metrics (c1 varchar, c2 varchar)"); @@ -213,10 +213,10 @@ public void testMetrics() materializedRows = computeActual("select * from \"metrics$files\"").getMaterializedRows(); datafile = toDataFileRecord(materializedRows.get(0)); assertThat(datafile.getRecordCount()).isEqualTo(1); - assertThat(datafile.getValueCounts().size()).isEqualTo(2); - assertThat(datafile.getNullValueCounts().size()).isEqualTo(2); - assertThat(datafile.getUpperBounds().size()).isEqualTo(2); - assertThat(datafile.getLowerBounds().size()).isEqualTo(2); + assertThat(datafile.getValueCounts()).hasSize(2); + assertThat(datafile.getNullValueCounts()).hasSize(2); + assertThat(datafile.getUpperBounds()).hasSize(2); + assertThat(datafile.getLowerBounds()).hasSize(2); } @Test @@ -335,7 +335,7 @@ public void testWithNaNs() datafile.getValueCounts().values().forEach(valueCount -> assertThat(valueCount).isEqualTo((Long) 3L)); // Check per-column nan value count - assertThat(datafile.getNanValueCounts().size()).isEqualTo(2); + assertThat(datafile.getNanValueCounts()).hasSize(2); assertThat(datafile.getNanValueCounts()).containsEntry(2, (Long) 1L); assertThat(datafile.getNanValueCounts()).containsEntry(3, (Long) 1L); @@ -367,8 +367,8 @@ public void testNestedTypes() // 1. top-level primitive columns // 2. and nested primitive fields that are not descendants of LISTs or MAPs // should appear in lowerBounds or UpperBounds - assertThat(lowerBounds.size()).isEqualTo(3); - assertThat(upperBounds.size()).isEqualTo(3); + assertThat(lowerBounds).hasSize(3); + assertThat(upperBounds).hasSize(3); // col1 assertThat(lowerBounds).containsEntry(1, "-9"); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergSplitSource.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergSplitSource.java index db9a145fba8fd7..06db3e92853dda 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergSplitSource.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergSplitSource.java @@ -462,7 +462,7 @@ private IcebergSplit generateSplit(Table nationTable, IcebergTableHandle tableHa .forEach(builder::add); } List splits = builder.build(); - assertThat(splits.size()).isEqualTo(1); + assertThat(splits).hasSize(1); assertThat(splitSource.isFinished()).isTrue(); return splits.getFirst(); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java index a660869fde8c5b..e0473b8c027e56 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergV2.java @@ -770,7 +770,7 @@ public void testUpdatingAllTableProperties() assertThat(partitionFields.get(0).transform().isIdentity()).isTrue(); assertThat(table.sortOrder().isSorted()).isTrue(); List sortFields = table.sortOrder().fields(); - assertThat(sortFields.size()).isEqualTo(1); + assertThat(sortFields).hasSize(1); assertThat(getOnlyElement(sortFields).sourceId()).isEqualTo(table.schema().findField("comment").fieldId()); assertQuery("SELECT * FROM " + tableName, "SELECT * FROM nation"); } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestParquetPredicates.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestParquetPredicates.java index 7487641a922d67..481e5b324d26c0 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestParquetPredicates.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestParquetPredicates.java @@ -80,7 +80,7 @@ public void testParquetTupleDomainStructWithPrimitiveColumnPredicate() Map, ColumnDescriptor> descriptorsByPath = getDescriptors(fileSchema, fileSchema); TupleDomain calculatedTupleDomain = getParquetTupleDomain(descriptorsByPath, tupleDomain); - assertThat(calculatedTupleDomain.getDomains().orElseThrow().size()).isEqualTo(1); + assertThat(calculatedTupleDomain.getDomains().orElseThrow()).hasSize(1); ColumnDescriptor selectedColumnDescriptor = descriptorsByPath.get(ImmutableList.of("row_field", "b")); assertThat(calculatedTupleDomain.getDomains().orElseThrow().get(selectedColumnDescriptor)).isEqualTo(predicateDomain); } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestPartitionFields.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestPartitionFields.java index dd2290deffcef6..aacbda15dbaa78 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestPartitionFields.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestPartitionFields.java @@ -141,7 +141,7 @@ private static void assertParseName(List columnNames, Type type, List views = catalog.getViews(SESSION, Optional.of(schemaTableName.getSchemaName())); - assertThat(views.size()).isEqualTo(1); + assertThat(views).hasSize(1); assertViewDefinition(views.get(schemaTableName), viewDefinition); assertViewDefinition(catalog.getView(SESSION, schemaTableName).orElseThrow(), viewDefinition); catalog.renameView(SESSION, schemaTableName, renamedSchemaTableName); assertThat(catalog.listTables(SESSION, Optional.of(namespace)).stream().map(TableInfo::tableName).toList()).doesNotContain(schemaTableName); views = catalog.getViews(SESSION, Optional.of(schemaTableName.getSchemaName())); - assertThat(views.size()).isEqualTo(1); + assertThat(views).hasSize(1); assertViewDefinition(views.get(renamedSchemaTableName), viewDefinition); assertViewDefinition(catalog.getView(SESSION, renamedSchemaTableName).orElseThrow(), viewDefinition); assertThat(catalog.getView(SESSION, schemaTableName)).isEmpty(); @@ -442,7 +442,7 @@ protected void assertViewDefinition(ConnectorViewDefinition actualView, Connecto assertThat(actualView.getOriginalSql()).isEqualTo(expectedView.getOriginalSql()); assertThat(actualView.getCatalog()).isEqualTo(expectedView.getCatalog()); assertThat(actualView.getSchema()).isEqualTo(expectedView.getSchema()); - assertThat(actualView.getColumns().size()).isEqualTo(expectedView.getColumns().size()); + assertThat(actualView.getColumns()).hasSize(expectedView.getColumns().size()); for (int i = 0; i < actualView.getColumns().size(); i++) { assertViewColumnDefinition(actualView.getColumns().get(i), expectedView.getColumns().get(i)); } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestTrinoGlueCatalog.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestTrinoGlueCatalog.java index 8d380edd3cc118..bc12f4155d3473 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestTrinoGlueCatalog.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestTrinoGlueCatalog.java @@ -182,7 +182,7 @@ public void testCreateMaterializedViewWithSystemSecurity() .filter(info -> info.extendedRelationType() == TableInfo.ExtendedRelationType.TRINO_MATERIALIZED_VIEW) .map(TableInfo::tableName) .toList(); - assertThat(materializedViews.size()).isEqualTo(1); + assertThat(materializedViews).hasSize(1); assertThat(materializedViews.get(0).getTableName()).isEqualTo(table); Optional returned = glueTrinoCatalog.getMaterializedView(SESSION, materializedViews.get(0)); assertThat(returned).isPresent(); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestTrinoRestCatalog.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestTrinoRestCatalog.java index 6ad4a2792efea8..b868fb0fe4f37e 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestTrinoRestCatalog.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/rest/TestTrinoRestCatalog.java @@ -169,14 +169,14 @@ public void testView() assertThat(catalog.listTables(SESSION, Optional.of(namespace)).stream()).contains(new TableInfo(schemaTableName, OTHER_VIEW)); Map views = catalog.getViews(SESSION, Optional.of(schemaTableName.getSchemaName())); - assertThat(views.size()).isEqualTo(1); + assertThat(views).hasSize(1); assertViewDefinition(views.get(schemaTableName), viewDefinition); assertViewDefinition(catalog.getView(SESSION, schemaTableName).orElseThrow(), viewDefinition); catalog.renameView(SESSION, schemaTableName, renamedSchemaTableName); assertThat(catalog.listTables(SESSION, Optional.of(namespace)).stream().map(TableInfo::tableName).toList()).doesNotContain(schemaTableName); views = catalog.getViews(SESSION, Optional.of(schemaTableName.getSchemaName())); - assertThat(views.size()).isEqualTo(1); + assertThat(views).hasSize(1); assertViewDefinition(views.get(renamedSchemaTableName), viewDefinition); assertViewDefinition(catalog.getView(SESSION, renamedSchemaTableName).orElseThrow(), viewDefinition); assertThat(catalog.getView(SESSION, schemaTableName)).isEmpty(); diff --git a/plugin/trino-kafka-event-listener/src/test/java/io/trino/plugin/eventlistener/kafka/TestKafkaEventListenerConfig.java b/plugin/trino-kafka-event-listener/src/test/java/io/trino/plugin/eventlistener/kafka/TestKafkaEventListenerConfig.java index 62213686b633db..ce64d11acbf883 100644 --- a/plugin/trino-kafka-event-listener/src/test/java/io/trino/plugin/eventlistener/kafka/TestKafkaEventListenerConfig.java +++ b/plugin/trino-kafka-event-listener/src/test/java/io/trino/plugin/eventlistener/kafka/TestKafkaEventListenerConfig.java @@ -96,7 +96,7 @@ void testExcludedFields() KafkaEventListenerConfig conf = new KafkaEventListenerConfig(); // check default Set excludedFields = conf.getExcludedFields(); - assertThat(excludedFields.size()).isEqualTo(0); + assertThat(excludedFields).hasSize(0); // check setting multiple conf.setExcludedFields(Set.of("payload", "plan", "user", "groups")); diff --git a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisTableDescriptionSupplier.java b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisTableDescriptionSupplier.java index a001f6fabdc718..67dad656776705 100644 --- a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisTableDescriptionSupplier.java +++ b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisTableDescriptionSupplier.java @@ -77,7 +77,7 @@ public void testTableDefinition() assertThat(tableHandle.streamName()).isEqualTo("test_kinesis_stream"); assertThat(tableHandle.messageDataFormat()).isEqualTo("json"); Map columnHandles = metadata.getColumnHandles(SESSION, tableHandle); - assertThat(columnHandles.size()).isEqualTo(14); + assertThat(columnHandles).hasSize(14); assertThat(columnHandles.values().stream().filter(x -> ((KinesisColumnHandle) x).isInternal()).count()).isEqualTo(10); } @@ -89,7 +89,7 @@ public void testRelatedObjects() SchemaTableName tblName = new SchemaTableName("prod", "test_table"); List schemas = metadata.listSchemaNames(null); - assertThat(schemas.size()).isEqualTo(1); + assertThat(schemas).hasSize(1); assertThat(schemas.get(0)).isEqualTo("prod"); KinesisTableHandle tblHandle = metadata.getTableHandle(null, tblName, Optional.empty(), Optional.empty()); diff --git a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestRecordAccess.java b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestRecordAccess.java index 223f0c3c096663..442df8a6f576f9 100644 --- a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestRecordAccess.java +++ b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestRecordAccess.java @@ -210,13 +210,13 @@ private void testJsonStream(int uncompressedMessages, int compressedMessages, St assertThat(result.getRowCount()).isEqualTo(uncompressedMessages + compressedMessages); List types = result.getTypes(); - assertThat(types.size()).isEqualTo(5); + assertThat(types).hasSize(5); assertThat(types.get(0).toString()).isEqualTo("bigint"); assertThat(types.get(1).toString()).isEqualTo("varchar"); log.info("Types : %s", types); List rows = result.getMaterializedRows(); - assertThat(rows.size()).isEqualTo(uncompressedMessages + compressedMessages); + assertThat(rows).hasSize(uncompressedMessages + compressedMessages); for (MaterializedRow row : rows) { assertThat(row.getFieldCount()).isEqualTo(5); assertThat((long) row.getFields().get(0) >= 100).isTrue(); diff --git a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/s3config/TestS3TableConfigClient.java b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/s3config/TestS3TableConfigClient.java index 7d17de5f6d146d..b56cf500883df4 100644 --- a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/s3config/TestS3TableConfigClient.java +++ b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/s3config/TestS3TableConfigClient.java @@ -128,6 +128,6 @@ public void testTableReading() assertThat(tableHandle.streamName()).isEqualTo("test123"); assertThat(tableHandle.messageDataFormat()).isEqualTo("json"); Map columnHandles = metadata.getColumnHandles(SESSION, tableHandle); - assertThat(columnHandles.size()).isEqualTo(12); + assertThat(columnHandles).hasSize(12); } } diff --git a/plugin/trino-memory/src/test/java/io/trino/plugin/memory/TestMemoryPagesStore.java b/plugin/trino-memory/src/test/java/io/trino/plugin/memory/TestMemoryPagesStore.java index 6d94ea0316486b..f5336e720dc147 100644 --- a/plugin/trino-memory/src/test/java/io/trino/plugin/memory/TestMemoryPagesStore.java +++ b/plugin/trino-memory/src/test/java/io/trino/plugin/memory/TestMemoryPagesStore.java @@ -70,14 +70,14 @@ public void testInsertPage() { createTable(0L, 0L); insertToTable(0L, 0L); - assertThat(pagesStore.getPages(0L, 0, 1, new int[] {0}, List.of(INTEGER), POSITIONS_PER_PAGE, OptionalLong.empty(), OptionalDouble.empty()).size()).isEqualTo(1); + assertThat(pagesStore.getPages(0L, 0, 1, new int[] {0}, List.of(INTEGER), POSITIONS_PER_PAGE, OptionalLong.empty(), OptionalDouble.empty())).hasSize(1); } @Test public void testInsertPageWithoutCreate() { insertToTable(0L, 0L); - assertThat(pagesStore.getPages(0L, 0, 1, new int[] {0}, List.of(INTEGER), POSITIONS_PER_PAGE, OptionalLong.empty(), OptionalDouble.empty()).size()).isEqualTo(1); + assertThat(pagesStore.getPages(0L, 0, 1, new int[] {0}, List.of(INTEGER), POSITIONS_PER_PAGE, OptionalLong.empty(), OptionalDouble.empty())).hasSize(1); } @Test diff --git a/plugin/trino-pinot/src/test/java/io/trino/plugin/pinot/TestPinotSplitManager.java b/plugin/trino-pinot/src/test/java/io/trino/plugin/pinot/TestPinotSplitManager.java index 7fbb1260b209ff..97ddbcd49d95dd 100755 --- a/plugin/trino-pinot/src/test/java/io/trino/plugin/pinot/TestPinotSplitManager.java +++ b/plugin/trino-pinot/src/test/java/io/trino/plugin/pinot/TestPinotSplitManager.java @@ -100,7 +100,7 @@ public void testHybridSegmentSplitsOneSegmentPerServer() private void assertSplits(List splits, int numSplitsExpected, PinotSplit.SplitType splitType) { - assertThat(splits.size()).isEqualTo(numSplitsExpected); + assertThat(splits).hasSize(numSplitsExpected); splits.forEach(s -> assertThat(s.getSplitType()).isEqualTo(splitType)); } diff --git a/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/BaseTestDbResourceGroupsFlywayMigration.java b/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/BaseTestDbResourceGroupsFlywayMigration.java index 55450c8ac40073..d83512ba67a591 100644 --- a/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/BaseTestDbResourceGroupsFlywayMigration.java +++ b/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/BaseTestDbResourceGroupsFlywayMigration.java @@ -80,7 +80,7 @@ public void testMigrationWithNonEmptyDatabase() dropAllTables(); } - protected void verifyResourceGroupsSchema(long expectedPropertiesCount) + protected void verifyResourceGroupsSchema(int expectedPropertiesCount) { verifyResultSetCount("SELECT name FROM resource_groups_global_properties", expectedPropertiesCount); verifyResultSetCount("SELECT name FROM resource_groups", 0); @@ -88,11 +88,11 @@ protected void verifyResourceGroupsSchema(long expectedPropertiesCount) verifyResultSetCount("SELECT environment FROM exact_match_source_selectors", 0); } - private void verifyResultSetCount(String sql, long expectedCount) + private void verifyResultSetCount(String sql, int expectedCount) { List results = jdbi.withHandle(handle -> handle.createQuery(sql).mapTo(String.class).list()); - assertThat(results.size()).isEqualTo(expectedCount); + assertThat(results).hasSize(expectedCount); } protected void dropAllTables() diff --git a/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/TestDbResourceGroupConfigurationManager.java b/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/TestDbResourceGroupConfigurationManager.java index e79940dcc16482..6833841ef1152e 100644 --- a/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/TestDbResourceGroupConfigurationManager.java +++ b/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/TestDbResourceGroupConfigurationManager.java @@ -83,22 +83,22 @@ public void testEnvironments() // check the prod configuration DbResourceGroupConfigurationManager manager = new DbResourceGroupConfigurationManager(listener -> {}, new DbResourceGroupConfig(), daoProvider.get(), prodEnvironment); List groups = manager.getRootGroups(); - assertThat(groups.size()).isEqualTo(1); + assertThat(groups).hasSize(1); InternalResourceGroup prodGlobal = new InternalResourceGroup("prod_global", (group, export) -> {}, directExecutor()); manager.configure(prodGlobal, new SelectionContext<>(prodGlobal.getId(), new ResourceGroupIdTemplate("prod_global"))); assertEqualsResourceGroup(prodGlobal, "10MB", 1000, 100, 100, WEIGHTED, DEFAULT_WEIGHT, true, Duration.ofHours(1), Duration.ofDays(1)); - assertThat(manager.getSelectors().size()).isEqualTo(1); + assertThat(manager.getSelectors()).hasSize(1); ResourceGroupSelector prodSelector = manager.getSelectors().get(0); ResourceGroupId prodResourceGroupId = prodSelector.match(new SelectionCriteria(true, "prod_user", ImmutableSet.of(), Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.empty())).get().getResourceGroupId(); assertThat(prodResourceGroupId.toString()).isEqualTo("prod_global"); // check the dev configuration manager = new DbResourceGroupConfigurationManager(listener -> {}, new DbResourceGroupConfig(), daoProvider.get(), devEnvironment); - assertThat(groups.size()).isEqualTo(1); + assertThat(groups).hasSize(1); InternalResourceGroup devGlobal = new InternalResourceGroup("dev_global", (group, export) -> {}, directExecutor()); manager.configure(devGlobal, new SelectionContext<>(prodGlobal.getId(), new ResourceGroupIdTemplate("dev_global"))); assertEqualsResourceGroup(devGlobal, "1MB", 1000, 100, 100, WEIGHTED, DEFAULT_WEIGHT, true, Duration.ofHours(1), Duration.ofDays(1)); - assertThat(manager.getSelectors().size()).isEqualTo(1); + assertThat(manager.getSelectors()).hasSize(1); ResourceGroupSelector devSelector = manager.getSelectors().get(0); ResourceGroupId devResourceGroupId = devSelector.match(new SelectionCriteria(true, "dev_user", ImmutableSet.of(), Optional.empty(), ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES, Optional.empty())).get().getResourceGroupId(); assertThat(devResourceGroupId.toString()).isEqualTo("dev_global"); @@ -235,13 +235,13 @@ public void testExactMatchSelector() config.setExactMatchSelectorEnabled(true); DbResourceGroupConfigurationManager manager = new DbResourceGroupConfigurationManager(listener -> {}, config, daoProvider.get(), ENVIRONMENT); manager.load(); - assertThat(manager.getSelectors().size()).isEqualTo(2); + assertThat(manager.getSelectors()).hasSize(2); assertThat(manager.getSelectors().get(0)).isInstanceOf(DbSourceExactMatchSelector.class); config.setExactMatchSelectorEnabled(false); manager = new DbResourceGroupConfigurationManager(listener -> {}, config, daoProvider.get(), ENVIRONMENT); manager.load(); - assertThat(manager.getSelectors().size()).isEqualTo(1); + assertThat(manager.getSelectors()).hasSize(1); assertThat(manager.getSelectors().get(0) instanceof DbSourceExactMatchSelector).isFalse(); } @@ -276,7 +276,7 @@ public void testSelectorPriority() manager.load(); List selectors = manager.getSelectors(); - assertThat(selectors.size()).isEqualTo(expectedUsers.size()); + assertThat(selectors).hasSize(expectedUsers.size()); // when we load the selectors we expect the selector list to be ordered by priority expectedUsers.sort(Comparator.comparingInt(Integer::parseInt).reversed()); diff --git a/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/TestResourceGroupsDao.java b/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/TestResourceGroupsDao.java index d2e92dc2964d94..5177030452ade1 100644 --- a/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/TestResourceGroupsDao.java +++ b/plugin/trino-resource-group-managers/src/test/java/io/trino/plugin/resourcegroups/db/TestResourceGroupsDao.java @@ -82,7 +82,7 @@ private static void testResourceGroupInsert(H2ResourceGroupsDao dao, Map records = dao.getResourceGroups(ENVIRONMENT); - assertThat(records.size()).isEqualTo(2); + assertThat(records).hasSize(2); map.put(1L, new ResourceGroupSpecBuilder(1, new ResourceGroupNameTemplate("global"), "100%", 100, Optional.of(100), 100, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty())); map.put(2L, new ResourceGroupSpecBuilder(2, new ResourceGroupNameTemplate("bi"), "50%", 50, Optional.of(50), 50, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.of(1L))); compareResourceGroups(map, records); @@ -287,7 +287,7 @@ public void testExactMatchSelector() private static void compareResourceGroups(Map map, List records) { - assertThat(map.size()).isEqualTo(records.size()); + assertThat(map).hasSize(records.size()); for (ResourceGroupSpecBuilder record : records) { ResourceGroupSpecBuilder expected = map.get(record.getId()); assertThat(record.build()).isEqualTo(expected.build()); @@ -296,7 +296,7 @@ private static void compareResourceGroups(Map ma private static void compareSelectors(Map map, List records) { - assertThat(map.size()).isEqualTo(records.size()); + assertThat(map).hasSize(records.size()); for (SelectorRecord record : records) { SelectorRecord expected = map.get(record.getResourceGroupId()); assertThat(record.getResourceGroupId()).isEqualTo(expected.getResourceGroupId()); diff --git a/plugin/trino-session-property-managers/src/test/java/io/trino/plugin/session/db/TestDbSessionPropertyManager.java b/plugin/trino-session-property-managers/src/test/java/io/trino/plugin/session/db/TestDbSessionPropertyManager.java index 47be530e294e54..28798d17fa7960 100644 --- a/plugin/trino-session-property-managers/src/test/java/io/trino/plugin/session/db/TestDbSessionPropertyManager.java +++ b/plugin/trino-session-property-managers/src/test/java/io/trino/plugin/session/db/TestDbSessionPropertyManager.java @@ -222,7 +222,7 @@ public void testOrderingOfSpecs() assertThat(sessionProperties).containsEntry("prop_1", "val_1_3"); assertThat(sessionProperties).containsEntry("prop_2", "val_2_2"); assertThat(sessionProperties).containsEntry("prop_3", "val_3_1"); - assertThat(sessionProperties.size()).isEqualTo(3); + assertThat(sessionProperties).hasSize(3); } @Test diff --git a/plugin/trino-teradata-functions/src/test/java/io/trino/plugin/teradata/functions/dateformat/TestDateFormatParser.java b/plugin/trino-teradata-functions/src/test/java/io/trino/plugin/teradata/functions/dateformat/TestDateFormatParser.java index 13bb8632896d47..04278d16529028 100644 --- a/plugin/trino-teradata-functions/src/test/java/io/trino/plugin/teradata/functions/dateformat/TestDateFormatParser.java +++ b/plugin/trino-teradata-functions/src/test/java/io/trino/plugin/teradata/functions/dateformat/TestDateFormatParser.java @@ -37,9 +37,9 @@ public void testTokenize() @Test public void testGreedinessLongFirst() { - assertThat(DateFormatParser.tokenize("yy").size()).isEqualTo(1); - assertThat(DateFormatParser.tokenize("yyyy").size()).isEqualTo(1); - assertThat(DateFormatParser.tokenize("yyyyyy").size()).isEqualTo(2); + assertThat(DateFormatParser.tokenize("yy")).hasSize(1); + assertThat(DateFormatParser.tokenize("yyyy")).hasSize(1); + assertThat(DateFormatParser.tokenize("yyyyyy")).hasSize(2); } @Test diff --git a/service/trino-verifier/src/test/java/io/trino/verifier/TestShadowing.java b/service/trino-verifier/src/test/java/io/trino/verifier/TestShadowing.java index dd93675b3eaa1d..e0ee2bcdc58bb8 100644 --- a/service/trino-verifier/src/test/java/io/trino/verifier/TestShadowing.java +++ b/service/trino-verifier/src/test/java/io/trino/verifier/TestShadowing.java @@ -77,11 +77,11 @@ public void testCreateTableAsSelect() Query query = new Query(CATALOG, SCHEMA, ImmutableList.of(), "CREATE TABLE my_test_table AS SELECT 1 column1, CAST('2.0' AS DOUBLE) column2 LIMIT 1", ImmutableList.of(), null, null, ImmutableMap.of()); QueryRewriter rewriter = new QueryRewriter(parser, URL, QualifiedName.of("tmp_"), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 1, new Duration(10, SECONDS)); Query rewrittenQuery = rewriter.shadowQuery(query); - assertThat(rewrittenQuery.getPreQueries().size()).isEqualTo(1); - assertThat(rewrittenQuery.getPostQueries().size()).isEqualTo(1); + assertThat(rewrittenQuery.getPreQueries()).hasSize(1); + assertThat(rewrittenQuery.getPostQueries()).hasSize(1); CreateTableAsSelect createTableAs = (CreateTableAsSelect) parser.createStatement(rewrittenQuery.getPreQueries().get(0)); - assertThat(createTableAs.getName().getParts().size()).isEqualTo(1); + assertThat(createTableAs.getName().getParts()).hasSize(1); assertThat(createTableAs.getName().getSuffix()).startsWith("tmp_"); assertThat(createTableAs.getName().getSuffix()).doesNotContain("my_test_table"); @@ -104,9 +104,9 @@ public void testCreateTableAsSelectDifferentCatalog() Query query = new Query(CATALOG, SCHEMA, ImmutableList.of(), "CREATE TABLE public.my_test_table2 AS SELECT 1 column1, 2E0 column2", ImmutableList.of(), null, null, ImmutableMap.of()); QueryRewriter rewriter = new QueryRewriter(parser, URL, QualifiedName.of("other_catalog", "other_schema", "tmp_"), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 1, new Duration(10, SECONDS)); Query rewrittenQuery = rewriter.shadowQuery(query); - assertThat(rewrittenQuery.getPreQueries().size()).isEqualTo(1); + assertThat(rewrittenQuery.getPreQueries()).hasSize(1); CreateTableAsSelect createTableAs = (CreateTableAsSelect) parser.createStatement(rewrittenQuery.getPreQueries().get(0)); - assertThat(createTableAs.getName().getParts().size()).isEqualTo(3); + assertThat(createTableAs.getName().getParts()).hasSize(3); assertThat(createTableAs.getName().getPrefix().get()).isEqualTo(QualifiedName.of("other_catalog", "other_schema")); assertThat(createTableAs.getName().getSuffix()).startsWith("tmp_"); assertThat(createTableAs.getName().getSuffix()).doesNotContain("my_test_table"); @@ -122,9 +122,9 @@ public void testInsert() QueryRewriter rewriter = new QueryRewriter(parser, URL, QualifiedName.of("other_catalog", "other_schema", "tmp_"), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), 1, new Duration(10, SECONDS)); Query rewrittenQuery = rewriter.shadowQuery(query); - assertThat(rewrittenQuery.getPreQueries().size()).isEqualTo(2); + assertThat(rewrittenQuery.getPreQueries()).hasSize(2); CreateTable createTable = (CreateTable) parser.createStatement(rewrittenQuery.getPreQueries().get(0)); - assertThat(createTable.getName().getParts().size()).isEqualTo(3); + assertThat(createTable.getName().getParts()).hasSize(3); assertThat(createTable.getName().getPrefix().get()).isEqualTo(QualifiedName.of("other_catalog", "other_schema")); assertThat(createTable.getName().getSuffix()).startsWith("tmp_"); assertThat(createTable.getName().getSuffix()).doesNotContain("test_insert_table"); @@ -139,7 +139,7 @@ public void testInsert() SingleColumn columnC = new SingleColumn(new FunctionCall(QualifiedName.of("checksum"), ImmutableList.of(new Identifier("C")))); assertThat(parser.createStatement(rewrittenQuery.getQuery())).isEqualTo(simpleQuery(selectList(columnA, columnB, columnC), table)); - assertThat(rewrittenQuery.getPostQueries().size()).isEqualTo(1); + assertThat(rewrittenQuery.getPostQueries()).hasSize(1); assertThat(parser.createStatement(rewrittenQuery.getPostQueries().get(0))).isEqualTo(new DropTable(new NodeLocation(1, 1), createTable.getName(), true)); } } diff --git a/service/trino-verifier/src/test/java/io/trino/verifier/TestVerifierRewriteQueries.java b/service/trino-verifier/src/test/java/io/trino/verifier/TestVerifierRewriteQueries.java index e96bd3274b1cea..c009fbe51ff8b8 100644 --- a/service/trino-verifier/src/test/java/io/trino/verifier/TestVerifierRewriteQueries.java +++ b/service/trino-verifier/src/test/java/io/trino/verifier/TestVerifierRewriteQueries.java @@ -100,7 +100,7 @@ public void testSingleThread() config.setControlGateway(URL); config.setThreadCount(1); List rewrittenQueries = rewriteQueries(parser, config, queryPairs); - assertThat(rewrittenQueries.size()).isEqualTo(queryPairs.size()); + assertThat(rewrittenQueries).hasSize(queryPairs.size()); } @Test @@ -109,7 +109,7 @@ public void testMultipleThreads() config.setControlGateway(URL); config.setThreadCount(5); List rewrittenQueries = rewriteQueries(parser, config, queryPairs); - assertThat(rewrittenQueries.size()).isEqualTo(queryPairs.size()); + assertThat(rewrittenQueries).hasSize(queryPairs.size()); } @Test @@ -129,7 +129,7 @@ public void testQueryRewriteException() .addAll(queryPairs) .add(new QueryPair(QUERY_SUITE, QUERY_NAME, invalidQuery, invalidQuery)) .build()); - assertThat(rewrittenQueries.size()).isEqualTo(queryPairs.size()); + assertThat(rewrittenQueries).hasSize(queryPairs.size()); } @Test @@ -137,6 +137,6 @@ public void testSQLException() { config.setControlGateway("invalid:url"); List rewrittenQueries = rewriteQueries(parser, config, queryPairs); - assertThat(rewrittenQueries.size()).isEqualTo(0); + assertThat(rewrittenQueries).hasSize(0); } } diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/TpcTestUtils.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/TpcTestUtils.java index 7450507bfb7e7e..9e62acd30f0497 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/TpcTestUtils.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/TpcTestUtils.java @@ -132,7 +132,7 @@ public static Object[][] tpcdsQueries() public static void assertResults(List expected, String query) { List> result = onTrino().executeQuery(query).rows(); - assertThat(result.size()).isEqualTo(expected.size()); + assertThat(result).hasSize(expected.size()); for (int i = 0; i < expected.size(); i++) { String expectedRow = expected.get(i); diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergFormatVersionCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergFormatVersionCompatibility.java index 7cffd60a91e829..e81250269a1149 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergFormatVersionCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergFormatVersionCompatibility.java @@ -51,7 +51,7 @@ public void testTrinoTimeTravelReadTableCreatedByEarlyVersionTrino() List expected = onCompatibilityTestServer().executeQuery(format("SELECT * FROM %s", tableName)).rows().stream() .map(row -> row(row.toArray())) .collect(toImmutableList()); - assertThat(expected.size()).isEqualTo(3); + assertThat(expected).hasSize(3); assertThat(onTrino().executeQuery(format("SELECT * FROM %s FOR VERSION AS OF %d", tableName, latestSnapshotId))).containsOnly(expected); onCompatibilityTestServer().executeQuery(format("DROP TABLE %s", tableName)); diff --git a/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergSparkCompatibility.java b/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergSparkCompatibility.java index 4084b889768f6b..afa3660059c632 100644 --- a/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergSparkCompatibility.java +++ b/testing/trino-product-tests/src/main/java/io/trino/tests/product/iceberg/TestIcebergSparkCompatibility.java @@ -2630,7 +2630,7 @@ private void validatePartitioning(String baseTableName, String sparkTableName, L List partitioning = onTrino().executeQuery(format("SELECT partition, record_count FROM iceberg.default.\"%s$partitions\"", baseTableName)) .column(1); Set partitions = partitioning.stream().map(String::valueOf).collect(toUnmodifiableSet()); - assertThat(partitions.size()).isEqualTo(expectedValues.size()); + assertThat(partitions).hasSize(expectedValues.size()); assertThat(partitions).containsAll(trinoResult); List sparkResult = expectedValues.stream().map(m -> m.entrySet().stream() @@ -2639,7 +2639,7 @@ private void validatePartitioning(String baseTableName, String sparkTableName, L .collect(toImmutableList()); partitioning = onSpark().executeQuery(format("SELECT partition from %s.files", sparkTableName)).column(1); partitions = partitioning.stream().map(String::valueOf).collect(toUnmodifiableSet()); - assertThat(partitions.size()).isEqualTo(expectedValues.size()); + assertThat(partitions).hasSize(expectedValues.size()); assertThat(partitions).containsAll(sparkResult); } diff --git a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestAggregations.java b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestAggregations.java index 5339ac4ef13bc8..c165e64ee77939 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestAggregations.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestAggregations.java @@ -1013,7 +1013,7 @@ public void testGroupByNanRow() { MaterializedResult actual = computeActual("SELECT a, b, c FROM (VALUES ROW(nan(), 1, 2), ROW(nan(), 1, 2)) t(a, b, c) GROUP BY 1, 2, 3"); List actualRows = actual.getMaterializedRows(); - assertThat(actualRows.size()).isEqualTo(1); + assertThat(actualRows).hasSize(1); assertThat(Double.isNaN((Double) actualRows.get(0).getField(0))).isTrue(); assertThat(actualRows.get(0).getField(1)).isEqualTo(1); assertThat(actualRows.get(0).getField(2)).isEqualTo(2); @@ -1024,7 +1024,7 @@ public void testGroupByNanArray() { MaterializedResult actual = computeActual("SELECT a FROM (VALUES (ARRAY[nan(), 2e0, 3e0]), (ARRAY[nan(), 2e0, 3e0])) t(a) GROUP BY a"); List actualRows = actual.getMaterializedRows(); - assertThat(actualRows.size()).isEqualTo(1); + assertThat(actualRows).hasSize(1); @SuppressWarnings("unchecked") List value = (List) actualRows.get(0).getField(0); assertThat(Double.isNaN(value.get(0))).isTrue(); diff --git a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestEngineOnlyQueries.java b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestEngineOnlyQueries.java index 8160d920bc4189..bef0bbfdefd2ab 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestEngineOnlyQueries.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestEngineOnlyQueries.java @@ -213,7 +213,7 @@ public void testLocallyUnrepresentableTimeLiterals() public void testNodeRoster() { List result = computeActual("SELECT * FROM system.runtime.nodes").getMaterializedRows(); - assertThat(result.size()).isEqualTo(getNodeCount()); + assertThat(result).hasSize(getNodeCount()); } @Test @@ -474,7 +474,7 @@ public void testIntersect() assertQuery("SELECT NULL, NULL INTERSECT SELECT NULL, NULL FROM nation"); MaterializedResult emptyResult = computeActual("SELECT 100 INTERSECT (SELECT regionkey FROM nation WHERE nationkey <10)"); - assertThat(emptyResult.getMaterializedRows().size()).isEqualTo(0); + assertThat(emptyResult.getMaterializedRows()).hasSize(0); } @Test @@ -531,7 +531,7 @@ public void testExcept() "EXCEPT (SELECT * FROM (VALUES 1) EXCEPT SELECT * FROM (VALUES 1))"); MaterializedResult emptyResult = computeActual("SELECT 0 EXCEPT (SELECT regionkey FROM nation WHERE nationkey <10)"); - assertThat(emptyResult.getMaterializedRows().size()).isEqualTo(0); + assertThat(emptyResult.getMaterializedRows()).hasSize(0); } @Test @@ -1680,7 +1680,7 @@ public void testArrayShuffle() for (int i = 0; i < 3; i++) { MaterializedResult results = computeActual(format("SELECT shuffle(ARRAY %s) FROM orders LIMIT 10", expected)); List rows = results.getMaterializedRows(); - assertThat(rows.size()).isEqualTo(10); + assertThat(rows).hasSize(10); for (MaterializedRow row : rows) { @SuppressWarnings("unchecked") @@ -2180,7 +2180,7 @@ public void testValuesWithNonTrivialType() MaterializedResult actual = computeActual("VALUES (0E0/0E0, 1E0/0E0, -1E0/0E0)"); List rows = actual.getMaterializedRows(); - assertThat(rows.size()).isEqualTo(1); + assertThat(rows).hasSize(1); MaterializedRow row = rows.get(0); assertThat(((Double) row.getField(0)).isNaN()).isTrue(); @@ -2194,7 +2194,7 @@ public void testValuesWithTimestamp() MaterializedResult actual = computeActual("VALUES (current_timestamp, now())"); List rows = actual.getMaterializedRows(); - assertThat(rows.size()).isEqualTo(1); + assertThat(rows).hasSize(1); MaterializedRow row = rows.get(0); assertThat(row.getField(0)).isEqualTo(row.getField(1)); @@ -2454,7 +2454,7 @@ public void testRowNumberNoOptimization() " FROM orders\n" + ") WHERE NOT rn <= 10"); MaterializedResult all = computeExpected("SELECT orderkey, orderstatus FROM orders", actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(all.getMaterializedRows().size() - 10); + assertThat(actual.getMaterializedRows()).hasSize(all.getMaterializedRows().size() - 10); assertContains(all, actual); actual = computeActual("" + @@ -2463,7 +2463,7 @@ public void testRowNumberNoOptimization() " FROM orders\n" + ") WHERE rn - 5 <= 10"); all = computeExpected("SELECT orderkey, orderstatus FROM orders", actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(15); + assertThat(actual.getMaterializedRows()).hasSize(15); assertContains(all, actual); } @@ -2474,25 +2474,25 @@ public void testRowNumberLimit() "SELECT row_number() OVER (PARTITION BY orderstatus) rn, orderstatus\n" + "FROM orders\n" + "LIMIT 10"); - assertThat(actual.getMaterializedRows().size()).isEqualTo(10); + assertThat(actual.getMaterializedRows()).hasSize(10); actual = computeActual("" + "SELECT row_number() OVER (PARTITION BY orderstatus ORDER BY orderkey) rn\n" + "FROM orders\n" + "LIMIT 10"); - assertThat(actual.getMaterializedRows().size()).isEqualTo(10); + assertThat(actual.getMaterializedRows()).hasSize(10); actual = computeActual("" + "SELECT row_number() OVER () rn, orderstatus\n" + "FROM orders\n" + "LIMIT 10"); - assertThat(actual.getMaterializedRows().size()).isEqualTo(10); + assertThat(actual.getMaterializedRows()).hasSize(10); actual = computeActual("" + "SELECT row_number() OVER (ORDER BY orderkey) rn\n" + "FROM orders\n" + "LIMIT 10"); - assertThat(actual.getMaterializedRows().size()).isEqualTo(10); + assertThat(actual.getMaterializedRows()).hasSize(10); } @Test @@ -2580,7 +2580,7 @@ public void testRowNumberFilterAndLimit() .row(2, 1L) .row(2, 2L) .build(); - assertThat(actual.getMaterializedRows().size()).isEqualTo(2); + assertThat(actual.getMaterializedRows()).hasSize(2); assertContains(expected, actual); } @@ -2593,7 +2593,7 @@ public void testRowNumberUnpartitionedFilter() " FROM orders\n" + ") WHERE rn <= 5 AND orderstatus != 'Z'"); MaterializedResult all = computeExpected("SELECT orderkey, orderstatus FROM orders", actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(5); + assertThat(actual.getMaterializedRows()).hasSize(5); assertContains(all, actual); actual = computeActual("" + @@ -2603,7 +2603,7 @@ public void testRowNumberUnpartitionedFilter() ") WHERE rn < 5"); all = computeExpected("SELECT orderkey, orderstatus FROM orders", actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(4); + assertThat(actual.getMaterializedRows()).hasSize(4); assertContains(all, actual); actual = computeActual("" + @@ -2613,7 +2613,7 @@ public void testRowNumberUnpartitionedFilter() ") LIMIT 5"); all = computeExpected("SELECT orderkey, orderstatus FROM orders", actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(5); + assertThat(actual.getMaterializedRows()).hasSize(5); assertContains(all, actual); } @@ -2628,7 +2628,7 @@ public void testRowNumberPartitionedFilter() MaterializedResult all = computeExpected("SELECT orderkey, orderstatus FROM orders", actual.getTypes()); // there are 3 DISTINCT orderstatus, so expect 15 rows. - assertThat(actual.getMaterializedRows().size()).isEqualTo(15); + assertThat(actual.getMaterializedRows()).hasSize(15); assertContains(all, actual); // Test for unreferenced outputs @@ -2640,7 +2640,7 @@ public void testRowNumberPartitionedFilter() all = computeExpected("SELECT orderkey FROM orders", actual.getTypes()); // there are 3 distinct orderstatus, so expect 15 rows. - assertThat(actual.getMaterializedRows().size()).isEqualTo(15); + assertThat(actual.getMaterializedRows()).hasSize(15); assertContains(all, actual); } @@ -3647,7 +3647,7 @@ public void testOffset() MaterializedResult actual = computeActual("SELECT x FROM " + values + " OFFSET 2 ROWS"); MaterializedResult all = computeExpected("SELECT x FROM " + values, actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(2); + assertThat(actual.getMaterializedRows()).hasSize(2); assertThat(actual.getMaterializedRows().get(0)) .isNotEqualTo(actual.getMaterializedRows().get(1)); assertContains(all, actual); @@ -3661,7 +3661,7 @@ public void testOffsetWithFetch() MaterializedResult actual = computeActual("SELECT x FROM " + values + " OFFSET 2 ROWS FETCH NEXT ROW ONLY"); MaterializedResult all = computeExpected("SELECT x FROM " + values, actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(1); + assertThat(actual.getMaterializedRows()).hasSize(1); assertContains(all, actual); } @@ -6184,7 +6184,7 @@ public void testShowFunctions() assertThat(functions.containsKey("avg")) .describedAs("Expected function names " + functions + " to contain 'avg'") .isTrue(); - assertThat(functions.get("avg").asList().size()).isEqualTo(6); + assertThat(functions.get("avg").asList()).hasSize(6); assertThat(functions.get("avg").asList().get(0).getField(1)).isEqualTo("decimal(p,s)"); assertThat(functions.get("avg").asList().get(0).getField(2)).isEqualTo("decimal(p,s)"); assertThat(functions.get("avg").asList().get(0).getField(3)).isEqualTo("aggregate"); diff --git a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestQueries.java b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestQueries.java index af50c2c094a7d1..260d9cc2571c2a 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestQueries.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/AbstractTestQueries.java @@ -139,7 +139,7 @@ public void testLimit() { MaterializedResult actual = computeActual("SELECT orderkey FROM orders LIMIT 10"); MaterializedResult all = computeExpected("SELECT orderkey FROM orders", actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(10); + assertThat(actual.getMaterializedRows()).hasSize(10); assertContains(all, actual); actual = computeActual( @@ -149,7 +149,7 @@ public void testLimit() "(SELECT orderkey, custkey FROM orders LIMIT 5) UNION ALL " + "SELECT orderkey, custkey FROM orders LIMIT 10"); all = computeExpected("SELECT orderkey, custkey FROM orders", actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(10); + assertThat(actual.getMaterializedRows()).hasSize(10); assertContains(all, actual); // with ORDER BY @@ -175,7 +175,7 @@ public void testLimitWithAggregation() MaterializedResult actual = computeActual("SELECT custkey, SUM(orderkey) FROM orders GROUP BY custkey LIMIT 10"); MaterializedResult all = computeExpected("SELECT custkey, SUM(orderkey) FROM orders GROUP BY custkey", actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(10); + assertThat(actual.getMaterializedRows()).hasSize(10); assertContains(all, actual); } @@ -185,7 +185,7 @@ public void testLimitInInlineView() MaterializedResult actual = computeActual("SELECT orderkey FROM (SELECT orderkey FROM orders LIMIT 100) T LIMIT 10"); MaterializedResult all = computeExpected("SELECT orderkey FROM orders", actual.getTypes()); - assertThat(actual.getMaterializedRows().size()).isEqualTo(10); + assertThat(actual.getMaterializedRows()).hasSize(10); assertContains(all, actual); } @@ -450,7 +450,7 @@ public void testTableSampleBernoulliBoundaryValues() MaterializedResult all = computeExpected("SELECT orderkey FROM orders", fullSample.getTypes()); assertContains(all, fullSample); - assertThat(emptySample.getMaterializedRows().size()).isEqualTo(0); + assertThat(emptySample.getMaterializedRows()).hasSize(0); } @Test diff --git a/testing/trino-testing/src/main/java/io/trino/testing/BaseConnectorTest.java b/testing/trino-testing/src/main/java/io/trino/testing/BaseConnectorTest.java index a08b5d827c76ef..48cc1c0d4f1979 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/BaseConnectorTest.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/BaseConnectorTest.java @@ -1994,7 +1994,7 @@ public void testTableSampleSystem() MaterializedResult all = computeActual("SELECT orderkey FROM orders"); assertContains(all, fullSample); - assertThat(emptySample.getMaterializedRows().size()).isEqualTo(0); + assertThat(emptySample.getMaterializedRows()).hasSize(0); assertThat(all.getMaterializedRows().size() >= randomSample.getMaterializedRows().size()).isTrue(); } @@ -2005,7 +2005,7 @@ public void testTableSampleWithFiltering() MaterializedResult halfSample = computeActual("SELECT DISTINCT orderkey, orderdate FROM orders TABLESAMPLE SYSTEM (50) WHERE orderkey BETWEEN 0 AND 9999999999"); MaterializedResult all = computeActual("SELECT orderkey, orderdate FROM orders"); - assertThat(emptySample.getMaterializedRows().size()).isEqualTo(0); + assertThat(emptySample.getMaterializedRows()).hasSize(0); // Assertions need to be loose here because SYSTEM sampling random selects data on split boundaries. In this case either all the data will be selected, or // none of it. Sampling with a 100% ratio is ignored, so that also cannot be used to guarantee results. assertThat(all.getMaterializedRows().size() >= halfSample.getMaterializedRows().size()).isTrue(); diff --git a/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java b/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java index 07737715bdffa2..7bdeb9113422e1 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerBasic.java @@ -856,7 +856,7 @@ public void testPrepareAndExecute() assertThat(queryCompletedEvent.getContext().getResourceGroupId()).isPresent(); assertThat(queryCompletedEvent.getContext().getResourceGroupId().get()).isEqualTo(createResourceGroupId("global", "user-user")); assertThat(queryCompletedEvent.getIoMetadata().getOutput()).isEqualTo(Optional.empty()); - assertThat(queryCompletedEvent.getIoMetadata().getInputs().size()).isEqualTo(0); // Prepare has no inputs + assertThat(queryCompletedEvent.getIoMetadata().getInputs()).hasSize(0); // Prepare has no inputs assertThat(queryCompletedEvent.getContext().getClientInfo().get()).isEqualTo("{\"clientVersion\":\"testVersion\"}"); assertThat(queryCreatedEvent.getMetadata().getQueryId()).isEqualTo(queryCompletedEvent.getMetadata().getQueryId()); assertThat(queryCompletedEvent.getMetadata().getPreparedQuery()).isEmpty(); @@ -880,7 +880,7 @@ public void testPrepareAndExecute() assertThat(queryCompletedEvent.getContext().getResourceGroupId()).isPresent(); assertThat(queryCompletedEvent.getContext().getResourceGroupId().get()).isEqualTo(createResourceGroupId("global", "user-user")); assertThat(queryCompletedEvent.getIoMetadata().getOutput()).isEqualTo(Optional.empty()); - assertThat(queryCompletedEvent.getIoMetadata().getInputs().size()).isEqualTo(1); + assertThat(queryCompletedEvent.getIoMetadata().getInputs()).hasSize(1); assertThat(queryCompletedEvent.getContext().getClientInfo().get()).isEqualTo("{\"clientVersion\":\"testVersion\"}"); assertThat(getOnlyElement(queryCompletedEvent.getIoMetadata().getInputs()).getCatalogName()).isEqualTo("tpch"); assertThat(queryCreatedEvent.getMetadata().getQueryId()).isEqualTo(queryCompletedEvent.getMetadata().getQueryId()); diff --git a/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerWithSplits.java b/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerWithSplits.java index 0857b168f6882c..dee773021b3603 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerWithSplits.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/TestEventListenerWithSplits.java @@ -123,7 +123,7 @@ public void testSplitsForNormalQuery() assertThat(queryCompletedEvent.getContext().getResourceGroupId()).isPresent(); assertThat(queryCompletedEvent.getContext().getResourceGroupId().get()).isEqualTo(createResourceGroupId("global", "user-user")); assertThat(queryCompletedEvent.getIoMetadata().getOutput()).isEqualTo(Optional.empty()); - assertThat(queryCompletedEvent.getIoMetadata().getInputs().size()).isEqualTo(1); + assertThat(queryCompletedEvent.getIoMetadata().getInputs()).hasSize(1); assertThat(queryCompletedEvent.getContext().getClientInfo().get()).isEqualTo("{\"clientVersion\":\"testVersion\"}"); assertThat(getOnlyElement(queryCompletedEvent.getIoMetadata().getInputs()).getCatalogName()).isEqualTo("tpch"); assertThat(queryCreatedEvent.getMetadata().getQueryId()).isEqualTo(queryCompletedEvent.getMetadata().getQueryId()); @@ -131,7 +131,7 @@ public void testSplitsForNormalQuery() assertThat(queryCompletedEvent.getStatistics().getCompletedSplits()).isEqualTo(SPLITS_PER_NODE + 2); List splitCompletedEvents = queryEvents.waitForSplitCompletedEvents(SPLITS_PER_NODE + 2, new Duration(30, SECONDS)); - assertThat(splitCompletedEvents.size()).isEqualTo(SPLITS_PER_NODE + 2); // leaf splits + aggregation split + assertThat(splitCompletedEvents).hasSize(SPLITS_PER_NODE + 2); // leaf splits + aggregation split // All splits must have the same query ID Set actual = splitCompletedEvents.stream() @@ -159,7 +159,7 @@ public void testSplitsForNormalQuery() // Not a write query assertThat(statistics.getWrittenBytes()).isEqualTo(0); assertThat(statistics.getWrittenRows()).isEqualTo(0); - assertThat(statistics.getStageGcStatistics().size()).isEqualTo(2); + assertThat(statistics.getStageGcStatistics()).hasSize(2); // Deterministic statistics assertThat(statistics.getPhysicalInputBytes()).isEqualTo(0); diff --git a/testing/trino-tests/src/test/java/io/trino/execution/TestPendingStageState.java b/testing/trino-tests/src/test/java/io/trino/execution/TestPendingStageState.java index 017b9b403ed6d2..240e1586191630 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/TestPendingStageState.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/TestPendingStageState.java @@ -73,7 +73,7 @@ public void testPendingState() QueryInfo queryInfo = queryRunner.getCoordinator().getFullQueryInfo(queryId); assertThat(queryInfo.getState()).isEqualTo(RUNNING); assertThat(queryInfo.getOutputStage().get().getState()).isEqualTo(StageState.RUNNING); - assertThat(queryInfo.getOutputStage().get().getSubStages().size()).isEqualTo(1); + assertThat(queryInfo.getOutputStage().get().getSubStages()).hasSize(1); assertThat(queryInfo.getOutputStage().get().getSubStages().get(0).getState()).isEqualTo(StageState.PENDING); } diff --git a/testing/trino-tests/src/test/java/io/trino/execution/TestStatementStats.java b/testing/trino-tests/src/test/java/io/trino/execution/TestStatementStats.java index e902760e9e3b02..c8c852f29cc94b 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/TestStatementStats.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/TestStatementStats.java @@ -48,7 +48,7 @@ public void testUniqueNodeCounts() assertThat(rootStage.getNodes()).isEqualTo(1); // one child stage - assertThat(rootStage.getSubStages().size()).isEqualTo(1); + assertThat(rootStage.getSubStages()).hasSize(1); // child stage has two unique nodes assertThat(rootStage.getSubStages().get(0).getNodes()).isEqualTo(2); } diff --git a/testing/trino-tests/src/test/java/io/trino/execution/resourcegroups/TestResourceGroupIntegration.java b/testing/trino-tests/src/test/java/io/trino/execution/resourcegroups/TestResourceGroupIntegration.java index c4489b3ba95f80..ea70eb3d83d20c 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/resourcegroups/TestResourceGroupIntegration.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/resourcegroups/TestResourceGroupIntegration.java @@ -57,7 +57,7 @@ public void testPathToRoot() queryRunner.execute(testSessionBuilder().setCatalog("tpch").setSchema("tiny").setSource("dashboard-foo").build(), "SELECT COUNT(*), clerk FROM orders GROUP BY clerk"); List path = manager.tryGetPathToRoot(new ResourceGroupId(new ResourceGroupId(new ResourceGroupId("global"), "user-user"), "dashboard-user")) .orElseThrow(() -> new IllegalStateException("Resource group not found")); - assertThat(path.size()).isEqualTo(3); + assertThat(path).hasSize(3); assertThat(path.get(1).subGroups()).isPresent(); assertThat(path.get(2).id()).isEqualTo(new ResourceGroupId("global")); assertThat(path.get(2).hardConcurrencyLimit()).isEqualTo(100); diff --git a/testing/trino-tests/src/test/java/io/trino/execution/resourcegroups/db/TestQueuesDb.java b/testing/trino-tests/src/test/java/io/trino/execution/resourcegroups/db/TestQueuesDb.java index 382208cb703c41..0b63c136f6b812 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/resourcegroups/db/TestQueuesDb.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/resourcegroups/db/TestQueuesDb.java @@ -219,7 +219,7 @@ public void testRejection() int selectorCount = getSelectors(queryRunner).size(); dao.insertSelector(4, 100_000, "user.*", null, "(?i).*reject.*", null, null, null); dbConfigurationManager.load(); - assertThat(getSelectors(queryRunner).size()).isEqualTo(selectorCount + 1); + assertThat(getSelectors(queryRunner)).hasSize(selectorCount + 1); // Verify the query can be submitted queryId = createQuery(queryRunner, rejectingSession(), LONG_LASTING_QUERY); waitForQueryState(queryRunner, queryId, RUNNING); diff --git a/testing/trino-tests/src/test/java/io/trino/tests/TestMetadataManager.java b/testing/trino-tests/src/test/java/io/trino/tests/TestMetadataManager.java index f31258e6fc9991..fdcb1c6f6ff92a 100644 --- a/testing/trino-tests/src/test/java/io/trino/tests/TestMetadataManager.java +++ b/testing/trino-tests/src/test/java/io/trino/tests/TestMetadataManager.java @@ -110,7 +110,7 @@ public void testMetadataIsClearedAfterQueryFinished() @Language("SQL") String sql = "SELECT * FROM nation"; queryRunner.execute(sql); - assertThat(metadataManager.getActiveQueryIds().size()).isEqualTo(0); + assertThat(metadataManager.getActiveQueryIds()).hasSize(0); } @Test @@ -121,7 +121,7 @@ public void testMetadataIsClearedAfterQueryFailed() .isInstanceOf(RuntimeException.class) .hasMessage("Division by zero"); - assertThat(metadataManager.getActiveQueryIds().size()).isEqualTo(0); + assertThat(metadataManager.getActiveQueryIds()).hasSize(0); } @Test @@ -165,7 +165,7 @@ public void testMetadataIsClearedAfterQueryCanceled() // cancel query dispatchManager.cancelQuery(queryId); - assertThat(metadataManager.getActiveQueryIds().size()).isEqualTo(0); + assertThat(metadataManager.getActiveQueryIds()).hasSize(0); } @Test diff --git a/testing/trino-tests/src/test/java/io/trino/tests/TestMinWorkerRequirement.java b/testing/trino-tests/src/test/java/io/trino/tests/TestMinWorkerRequirement.java index 7bc046b61df668..095e5fab7512e8 100644 --- a/testing/trino-tests/src/test/java/io/trino/tests/TestMinWorkerRequirement.java +++ b/testing/trino-tests/src/test/java/io/trino/tests/TestMinWorkerRequirement.java @@ -118,10 +118,10 @@ public void testInsufficientWorkerNodesAfterDrop() .setWorkerCount(3) .build()) { queryRunner.execute("SELECT COUNT(*) from lineitem"); - assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size()).isEqualTo(4); + assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes()).hasSize(4); queryRunner.getServers().get(0).close(); - assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size()).isEqualTo(3); + assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes()).hasSize(3); assertThatThrownBy(() -> queryRunner.execute("SELECT COUNT(*) from lineitem")) .isInstanceOf(RuntimeException.class) .hasMessage("Insufficient active worker nodes. Waited 1.00ns for at least 4 workers, but only 3 workers are active"); @@ -182,7 +182,7 @@ public void testRequiredWorkerNodesSessionOverride() // After adding 2 nodes, query should run queryRunner.addServers(2); - assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size()).isEqualTo(6); + assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes()).hasSize(6); queryRunner.execute(require6Workers, "SELECT COUNT(*) from lineitem"); } } @@ -215,7 +215,7 @@ public void testMultipleRequiredWorkerNodesSessionOverride() assertThat(queryFuture3.isDone()).isFalse(); queryRunner.addServers(1); - assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size()).isEqualTo(2); + assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes()).hasSize(2); // After adding 1 node, only 1st query should run MILLISECONDS.sleep(1000); assertThat(queryFuture1.get().result().getRowCount() > 0).isTrue(); @@ -228,7 +228,7 @@ public void testMultipleRequiredWorkerNodesSessionOverride() // After adding 2 nodes, 2nd and 3rd query should also run queryRunner.addServers(2); - assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes().size()).isEqualTo(4); + assertThat(queryRunner.getCoordinator().refreshNodes().getActiveNodes()).hasSize(4); assertThat(queryFuture2.get().result().getRowCount() > 0).isTrue(); completedQueryInfo = queryManager.getFullQueryInfo(queryFuture2.get().queryId()); assertThat(completedQueryInfo.getQueryStats().getResourceWaitingTime().roundTo(SECONDS) >= 2).isTrue();