From aebcbfe8cd44619bdf965030e7e7a0f6927fe23d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Wa=C5=9B?= Date: Wed, 30 Oct 2024 15:22:11 +0100 Subject: [PATCH 01/31] Mark random_expression function as non-deterministic --- .../src/main/java/io/trino/plugin/faker/FakerFunctions.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerFunctions.java b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerFunctions.java index e0ecb8b316fee..bdc14538cbbd8 100644 --- a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerFunctions.java +++ b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerFunctions.java @@ -32,7 +32,7 @@ public FakerFunctions() faker = new Faker(); } - @ScalarFunction + @ScalarFunction(deterministic = false) @Description("Generate a random string based on the Faker expression") @SqlType(VARCHAR) public Slice randomString(@SqlType(VARCHAR) Slice fakerExpression) From b3ee8cb79b7a2f019ab6d033dfd8cffff370b470 Mon Sep 17 00:00:00 2001 From: "Mateusz \"Serafin\" Gajewski" Date: Wed, 30 Oct 2024 14:00:31 +0100 Subject: [PATCH 02/31] Update airlift to 279 --- .../io/trino/execution/SqlQueryExecution.java | 12 +++++----- .../io/trino/execution/SqlQueryManager.java | 2 +- .../main/java/io/trino/execution/SqlTask.java | 4 ++-- .../io/trino/execution/SqlTaskExecution.java | 10 ++++---- .../execution/SqlTaskExecutionFactory.java | 2 +- .../executor/dedicated/SplitProcessor.java | 2 +- .../timesharing/TimeSharingTaskExecutor.java | 6 ++--- .../scheduler/PipelinedQueryScheduler.java | 6 ++--- .../ContinuousTaskStatusFetcher.java | 6 ++--- .../remotetask/DynamicFiltersFetcher.java | 6 ++--- .../server/remotetask/HttpRemoteTask.java | 24 +++++++++---------- .../server/remotetask/TaskInfoFetcher.java | 6 ++--- pom.xml | 2 +- 13 files changed, 44 insertions(+), 44 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/execution/SqlQueryExecution.java b/core/trino-main/src/main/java/io/trino/execution/SqlQueryExecution.java index 7caed294ecd0a..f80bc5f63f439 100644 --- a/core/trino-main/src/main/java/io/trino/execution/SqlQueryExecution.java +++ b/core/trino-main/src/main/java/io/trino/execution/SqlQueryExecution.java @@ -187,7 +187,7 @@ private SqlQueryExecution( EventDrivenTaskSourceFactory eventDrivenTaskSourceFactory, TaskDescriptorStorage taskDescriptorStorage) { - try (SetThreadName _ = new SetThreadName("Query-%s", stateMachine.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + stateMachine.getQueryId())) { this.slug = requireNonNull(slug, "slug is null"); this.tracer = requireNonNull(tracer, "tracer is null"); this.plannerContext = requireNonNull(plannerContext, "plannerContext is null"); @@ -396,7 +396,7 @@ public BasicQueryInfo getBasicQueryInfo() @Override public void start() { - try (SetThreadName _ = new SetThreadName("Query-%s", stateMachine.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + stateMachine.getQueryId())) { try { if (!stateMachine.transitionToPlanning()) { // query already started or finished @@ -456,7 +456,7 @@ public void start() @Override public void addStateChangeListener(StateChangeListener stateChangeListener) { - try (SetThreadName _ = new SetThreadName("Query-%s", stateMachine.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + stateMachine.getQueryId())) { stateMachine.addStateChangeListener(stateChangeListener); } } @@ -607,7 +607,7 @@ public void cancelStage(StageId stageId) { requireNonNull(stageId, "stageId is null"); - try (SetThreadName _ = new SetThreadName("Query-%s", stateMachine.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + stateMachine.getQueryId())) { QueryScheduler scheduler = queryScheduler.get(); if (scheduler != null) { scheduler.cancelStage(stageId); @@ -620,7 +620,7 @@ public void failTask(TaskId taskId, Exception reason) { requireNonNull(taskId, "stageId is null"); - try (SetThreadName _ = new SetThreadName("Query-%s", stateMachine.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + stateMachine.getQueryId())) { QueryScheduler scheduler = queryScheduler.get(); if (scheduler != null) { scheduler.failTask(taskId, reason); @@ -693,7 +693,7 @@ public QueryId getQueryId() @Override public QueryInfo getQueryInfo() { - try (SetThreadName _ = new SetThreadName("Query-%s", stateMachine.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + stateMachine.getQueryId())) { // acquire reference to scheduler before checking finalQueryInfo, because // state change listener sets finalQueryInfo and then clears scheduler when // the query finishes. diff --git a/core/trino-main/src/main/java/io/trino/execution/SqlQueryManager.java b/core/trino-main/src/main/java/io/trino/execution/SqlQueryManager.java index 691cb46967c1c..55abd1e3679c8 100644 --- a/core/trino-main/src/main/java/io/trino/execution/SqlQueryManager.java +++ b/core/trino-main/src/main/java/io/trino/execution/SqlQueryManager.java @@ -265,7 +265,7 @@ public void createQuery(QueryExecution queryExecution) queryTracker.expireQuery(queryExecution.getQueryId()); }); - try (SetThreadName _ = new SetThreadName("Query-%s", queryExecution.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + queryExecution.getQueryId())) { try (var ignoredStartScope = scopedSpan(tracer.spanBuilder("query-start") .setParent(Context.current().with(queryExecution.getSession().getQuerySpan())) .startSpan())) { diff --git a/core/trino-main/src/main/java/io/trino/execution/SqlTask.java b/core/trino-main/src/main/java/io/trino/execution/SqlTask.java index df664f6d64422..bfa6fe9d3cafb 100644 --- a/core/trino-main/src/main/java/io/trino/execution/SqlTask.java +++ b/core/trino-main/src/main/java/io/trino/execution/SqlTask.java @@ -269,14 +269,14 @@ public void recordHeartbeat() public TaskInfo getTaskInfo() { - try (SetThreadName _ = new SetThreadName("Task-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("Task-" + taskId)) { return createTaskInfo(taskHolderReference.get()); } } public TaskStatus getTaskStatus() { - try (SetThreadName _ = new SetThreadName("Task-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("Task-" + taskId)) { return createTaskStatus(taskHolderReference.get()); } } diff --git a/core/trino-main/src/main/java/io/trino/execution/SqlTaskExecution.java b/core/trino-main/src/main/java/io/trino/execution/SqlTaskExecution.java index 4f40831a6cd2c..d1295bcbabcbc 100644 --- a/core/trino-main/src/main/java/io/trino/execution/SqlTaskExecution.java +++ b/core/trino-main/src/main/java/io/trino/execution/SqlTaskExecution.java @@ -140,7 +140,7 @@ public SqlTaskExecution( this.splitMonitor = requireNonNull(splitMonitor, "splitMonitor is null"); this.driverAndTaskTerminationTracker = new DriverAndTaskTerminationTracker(taskStateMachine); - try (SetThreadName _ = new SetThreadName("Task-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("Task-" + taskId)) { List driverFactories = localExecutionPlan.getDriverFactories(); // index driver factories Set partitionedSources = ImmutableSet.copyOf(localExecutionPlan.getPartitionedSourceOrder()); @@ -195,7 +195,7 @@ public SqlTaskExecution( // this must be synchronized to prevent a concurrent call to checkTaskCompletion() from proceeding before all task lifecycle drivers are created public synchronized void start() { - try (SetThreadName _ = new SetThreadName("Task-%s", getTaskId())) { + try (SetThreadName _ = new SetThreadName("Task-" + getTaskId())) { // Signal immediate termination complete if task termination has started if (taskStateMachine.getState().isTerminating()) { taskStateMachine.terminationComplete(); @@ -263,7 +263,7 @@ public void addSplitAssignments(List splitAssignments) return; } - try (SetThreadName _ = new SetThreadName("Task-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("Task-" + taskId)) { // update our record of split assignments and schedule drivers for new partitioned splits Set updatedUnpartitionedSources = updateSplitAssignments(splitAssignments); for (PlanNodeId planNodeId : updatedUnpartitionedSources) { @@ -415,7 +415,7 @@ private synchronized void enqueueDriverSplitRunner(boolean forceRunSplit, List blocked = split.processFor(SPLIT_RUN_QUANTA); diff --git a/core/trino-main/src/main/java/io/trino/execution/executor/timesharing/TimeSharingTaskExecutor.java b/core/trino-main/src/main/java/io/trino/execution/executor/timesharing/TimeSharingTaskExecutor.java index 49ec7b5725d9a..7d7d31a50b4a3 100644 --- a/core/trino-main/src/main/java/io/trino/execution/executor/timesharing/TimeSharingTaskExecutor.java +++ b/core/trino-main/src/main/java/io/trino/execution/executor/timesharing/TimeSharingTaskExecutor.java @@ -293,7 +293,7 @@ public synchronized TimeSharingTaskHandle addTask( public void removeTask(TaskHandle taskHandle) { TimeSharingTaskHandle handle = (TimeSharingTaskHandle) taskHandle; - try (SetThreadName _ = new SetThreadName("Task-%s", handle.getTaskId())) { + try (SetThreadName _ = new SetThreadName("Task-" + handle.getTaskId())) { // Skip additional scheduling if the task was already destroyed if (!doRemoveTask(handle)) { return; @@ -542,7 +542,7 @@ private class TaskRunner @Override public void run() { - try (SetThreadName runnerName = new SetThreadName("SplitRunner-%s", runnerId)) { + try (SetThreadName runnerName = new SetThreadName("SplitRunner-" + runnerId)) { while (!closed && !Thread.currentThread().isInterrupted()) { // select next worker PrioritizedSplitRunner split; @@ -555,7 +555,7 @@ public void run() } String threadId = split.getTaskHandle().getTaskId() + "-" + split.getSplitId(); - try (SetThreadName splitName = new SetThreadName(threadId)) { + try (SetThreadName _ = new SetThreadName(threadId)) { RunningSplitInfo splitInfo = new RunningSplitInfo(ticker.read(), threadId, Thread.currentThread(), split.getTaskHandle().getTaskId(), split::getInfo); runningSplitInfos.add(splitInfo); runningSplits.add(split); diff --git a/core/trino-main/src/main/java/io/trino/execution/scheduler/PipelinedQueryScheduler.java b/core/trino-main/src/main/java/io/trino/execution/scheduler/PipelinedQueryScheduler.java index 7d23f57dd2dfd..7f119afc89ccd 100644 --- a/core/trino-main/src/main/java/io/trino/execution/scheduler/PipelinedQueryScheduler.java +++ b/core/trino-main/src/main/java/io/trino/execution/scheduler/PipelinedQueryScheduler.java @@ -432,7 +432,7 @@ private synchronized void scheduleRetry() @Override public synchronized void cancelStage(StageId stageId) { - try (SetThreadName _ = new SetThreadName("Query-%s", queryStateMachine.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + queryStateMachine.getQueryId())) { coordinatorStagesScheduler.cancelStage(stageId); DistributedStagesScheduler distributedStagesScheduler = this.distributedStagesScheduler.get(); if (distributedStagesScheduler != null) { @@ -444,7 +444,7 @@ public synchronized void cancelStage(StageId stageId) @Override public void failTask(TaskId taskId, Throwable failureCause) { - try (SetThreadName _ = new SetThreadName("Query-%s", queryStateMachine.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + queryStateMachine.getQueryId())) { stageManager.failTaskRemotely(taskId, failureCause); } } @@ -1272,7 +1272,7 @@ public void schedule() { checkState(started.compareAndSet(false, true), "already started"); - try (SetThreadName _ = new SetThreadName("Query-%s", queryStateMachine.getQueryId())) { + try (SetThreadName _ = new SetThreadName("Query-" + queryStateMachine.getQueryId())) { stageSchedulers.values().forEach(StageScheduler::start); while (!executionSchedule.isFinished()) { List> blockedStages = new ArrayList<>(); diff --git a/core/trino-main/src/main/java/io/trino/server/remotetask/ContinuousTaskStatusFetcher.java b/core/trino-main/src/main/java/io/trino/server/remotetask/ContinuousTaskStatusFetcher.java index db5b29a385580..b643151d62883 100644 --- a/core/trino-main/src/main/java/io/trino/server/remotetask/ContinuousTaskStatusFetcher.java +++ b/core/trino-main/src/main/java/io/trino/server/remotetask/ContinuousTaskStatusFetcher.java @@ -171,7 +171,7 @@ private class TaskStatusResponseCallback @Override public void success(TaskStatus value) { - try (SetThreadName _ = new SetThreadName("ContinuousTaskStatusFetcher-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("ContinuousTaskStatusFetcher-" + taskId)) { updateStats(requestStartNanos); updateTaskStatus(value); errorTracker.requestSucceeded(); @@ -185,7 +185,7 @@ public void success(TaskStatus value) @Override public void failed(Throwable cause) { - try (SetThreadName _ = new SetThreadName("ContinuousTaskStatusFetcher-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("ContinuousTaskStatusFetcher-" + taskId)) { updateStats(requestStartNanos); // if task not already done, record error TaskStatus taskStatus = getTaskStatus(); @@ -209,7 +209,7 @@ public void failed(Throwable cause) @Override public void fatal(Throwable cause) { - try (SetThreadName _ = new SetThreadName("ContinuousTaskStatusFetcher-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("ContinuousTaskStatusFetcher-" + taskId)) { updateStats(requestStartNanos); onFail.accept(cause); } diff --git a/core/trino-main/src/main/java/io/trino/server/remotetask/DynamicFiltersFetcher.java b/core/trino-main/src/main/java/io/trino/server/remotetask/DynamicFiltersFetcher.java index 4560bb1c49ff6..a4413631209e6 100644 --- a/core/trino-main/src/main/java/io/trino/server/remotetask/DynamicFiltersFetcher.java +++ b/core/trino-main/src/main/java/io/trino/server/remotetask/DynamicFiltersFetcher.java @@ -183,7 +183,7 @@ public DynamicFiltersResponseCallback(long requestedDynamicFiltersVersion) @Override public void success(VersionedDynamicFilterDomains newDynamicFilterDomains) { - try (SetThreadName _ = new SetThreadName("DynamicFiltersFetcher-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("DynamicFiltersFetcher-" + taskId)) { updateStats(requestStartNanos); if (newDynamicFilterDomains.getVersion() < requestedDynamicFiltersVersion) { // Receiving older dynamic filter shouldn't happen unless @@ -208,7 +208,7 @@ public void success(VersionedDynamicFilterDomains newDynamicFilterDomains) @Override public void failed(Throwable cause) { - try (SetThreadName _ = new SetThreadName("DynamicFiltersFetcher-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("DynamicFiltersFetcher-" + taskId)) { updateStats(requestStartNanos); errorTracker.requestFailed(cause); fetchDynamicFiltersIfNecessary(); @@ -230,7 +230,7 @@ public void failed(Throwable cause) @Override public void fatal(Throwable cause) { - try (SetThreadName _ = new SetThreadName("DynamicFiltersFetcher-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("DynamicFiltersFetcher-" + taskId)) { updateStats(requestStartNanos); stop(); onFail.accept(cause); diff --git a/core/trino-main/src/main/java/io/trino/server/remotetask/HttpRemoteTask.java b/core/trino-main/src/main/java/io/trino/server/remotetask/HttpRemoteTask.java index 4a1c4a2eca863..77a203ae1fc5d 100644 --- a/core/trino-main/src/main/java/io/trino/server/remotetask/HttpRemoteTask.java +++ b/core/trino-main/src/main/java/io/trino/server/remotetask/HttpRemoteTask.java @@ -255,7 +255,7 @@ public HttpRemoteTask( requireNonNull(outboundDynamicFilterIds, "outboundDynamicFilterIds is null"); requireNonNull(estimatedMemory, "estimatedMemory is null"); - try (SetThreadName _ = new SetThreadName("HttpRemoteTask-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("HttpRemoteTask-" + taskId)) { this.taskId = taskId; this.session = session; this.stageSpan = stageSpan; @@ -419,7 +419,7 @@ public TaskStatus getTaskStatus() @Override public void start() { - try (SetThreadName _ = new SetThreadName("HttpRemoteTask-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("HttpRemoteTask-" + taskId)) { // to start we just need to trigger an update started.set(true); triggerUpdate(); @@ -586,7 +586,7 @@ private long getPendingSourceSplitsWeight() @Override public void addStateChangeListener(StateChangeListener stateChangeListener) { - try (SetThreadName _ = new SetThreadName("HttpRemoteTask-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("HttpRemoteTask-" + taskId)) { taskStatusFetcher.addStateChangeListener(stateChangeListener); } } @@ -847,7 +847,7 @@ public void abort() } synchronized (this) { - try (SetThreadName _ = new SetThreadName("HttpRemoteTask-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("HttpRemoteTask-" + taskId)) { if (!getTaskStatus().getState().isTerminatingOrDone()) { scheduleAsyncCleanupRequest("abort", true); } @@ -864,7 +864,7 @@ public void cancel() } synchronized (this) { - try (SetThreadName _ = new SetThreadName("HttpRemoteTask-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("HttpRemoteTask-" + taskId)) { TaskStatus taskStatus = getTaskStatus(); if (!taskStatus.getState().isTerminatingOrDone()) { scheduleAsyncCleanupRequest("cancel", false); @@ -1030,7 +1030,7 @@ public void onFailure(Throwable t) private void fatalAsyncCleanupFailure(TrinoTransportException cause) { synchronized (HttpRemoteTask.this) { - try (SetThreadName _ = new SetThreadName("HttpRemoteTask-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("HttpRemoteTask-" + taskId)) { TaskStatus taskStatus = getTaskStatus(); if (taskStatus.getState().isDone()) { log.warn("Task %s already in terminal state %s; cannot overwrite with FAILED due to %s", @@ -1057,7 +1057,7 @@ private void fatalAsyncCleanupFailure(TrinoTransportException cause) */ private synchronized void fatalUnacknowledgedFailure(Throwable cause) { - try (SetThreadName _ = new SetThreadName("HttpRemoteTask-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("HttpRemoteTask-" + taskId)) { TaskStatus taskStatus = getTaskStatus(); if (!taskStatus.getState().isDone()) { // Update the taskInfo with the new taskStatus. @@ -1107,7 +1107,7 @@ public void failRemotely(Throwable cause) } synchronized (this) { - try (SetThreadName _ = new SetThreadName("HttpRemoteTask-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("HttpRemoteTask-" + taskId)) { TaskStatus taskStatus = getTaskStatus(); if (!taskStatus.getState().isTerminatingOrDone()) { log.debug(cause, "Remote task %s failed with %s", taskStatus.getSelf(), cause); @@ -1124,7 +1124,7 @@ public void failLocallyImmediately(Throwable cause) // Prevent concurrent abort commands after this point terminating.set(true); synchronized (this) { - try (SetThreadName _ = new SetThreadName("HttpRemoteTask-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("HttpRemoteTask-" + taskId)) { TaskStatus taskStatus = getTaskStatus(); if (!taskStatus.getState().isDone()) { // Record and force the task into a failed state immediately without waiting for the task to respond. A final cleanup @@ -1184,7 +1184,7 @@ private UpdateResponseHandler(List splitAssignments, long curre @Override public void success(TaskInfo value) { - try (SetThreadName _ = new SetThreadName("UpdateResponseHandler-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("UpdateResponseHandler-" + taskId)) { sentDynamicFiltersVersion.set(currentRequestDynamicFiltersVersion); // Remove dynamic filters which were successfully sent to free up memory outboundDynamicFiltersCollector.acknowledge(currentRequestDynamicFiltersVersion); @@ -1203,7 +1203,7 @@ public void success(TaskInfo value) @Override public void failed(Throwable cause) { - try (SetThreadName _ = new SetThreadName("UpdateResponseHandler-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("UpdateResponseHandler-" + taskId)) { try { currentRequest.set(null); updateStats(); @@ -1230,7 +1230,7 @@ public void failed(Throwable cause) @Override public void fatal(Throwable cause) { - try (SetThreadName _ = new SetThreadName("UpdateResponseHandler-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("UpdateResponseHandler-" + taskId)) { fatalUnacknowledgedFailure(cause); } } diff --git a/core/trino-main/src/main/java/io/trino/server/remotetask/TaskInfoFetcher.java b/core/trino-main/src/main/java/io/trino/server/remotetask/TaskInfoFetcher.java index 366571b0b6bde..af3af8e8d2022 100644 --- a/core/trino-main/src/main/java/io/trino/server/remotetask/TaskInfoFetcher.java +++ b/core/trino-main/src/main/java/io/trino/server/remotetask/TaskInfoFetcher.java @@ -317,7 +317,7 @@ private class TaskInfoResponseCallback @Override public void success(TaskInfo newValue) { - try (SetThreadName _ = new SetThreadName("TaskInfoFetcher-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("TaskInfoFetcher-" + taskId)) { lastUpdateNanos.set(System.nanoTime()); updateStats(requestStartNanos); @@ -332,7 +332,7 @@ public void success(TaskInfo newValue) @Override public void failed(Throwable cause) { - try (SetThreadName _ = new SetThreadName("TaskInfoFetcher-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("TaskInfoFetcher-" + taskId)) { lastUpdateNanos.set(System.nanoTime()); // if task not already done, record error @@ -355,7 +355,7 @@ public void failed(Throwable cause) @Override public void fatal(Throwable cause) { - try (SetThreadName _ = new SetThreadName("TaskInfoFetcher-%s", taskId)) { + try (SetThreadName _ = new SetThreadName("TaskInfoFetcher-" + taskId)) { onFail.accept(cause); } finally { diff --git a/pom.xml b/pom.xml index c3e9111d8ab89..99a4b4bffed77 100644 --- a/pom.xml +++ b/pom.xml @@ -182,7 +182,7 @@ ${air.test.jvm.additional-arguments.default} - 278 + 279 2.9.6 4.13.2 1.12.0 From 8c801f1330680560d4617c08eb9d3962c066d9fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Wa=C5=9B?= Date: Wed, 30 Oct 2024 17:09:19 +0100 Subject: [PATCH 03/31] Remove the random_string function from the Faker connector Plugins are not supposed to add new global functions, but namespaced ones. --- .../io/trino/plugin/faker/FakerFunctions.java | 42 ------------------- .../io/trino/plugin/faker/FakerPlugin.java | 11 ----- .../trino/plugin/faker/TestFakerQueries.java | 8 ---- 3 files changed, 61 deletions(-) delete mode 100644 plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerFunctions.java diff --git a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerFunctions.java b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerFunctions.java deleted file mode 100644 index bdc14538cbbd8..0000000000000 --- a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerFunctions.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package io.trino.plugin.faker; - -import io.airlift.slice.Slice; -import io.trino.spi.function.Description; -import io.trino.spi.function.ScalarFunction; -import io.trino.spi.function.SqlType; -import net.datafaker.Faker; - -import static io.airlift.slice.Slices.utf8Slice; -import static io.trino.spi.type.StandardTypes.VARCHAR; - -public final class FakerFunctions -{ - private final Faker faker; - - public FakerFunctions() - { - faker = new Faker(); - } - - @ScalarFunction(deterministic = false) - @Description("Generate a random string based on the Faker expression") - @SqlType(VARCHAR) - public Slice randomString(@SqlType(VARCHAR) Slice fakerExpression) - { - return utf8Slice(faker.expression(fakerExpression.toStringUtf8())); - } -} diff --git a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerPlugin.java b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerPlugin.java index c3a0fa96a884b..84f6ae2ba63f6 100644 --- a/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerPlugin.java +++ b/plugin/trino-faker/src/main/java/io/trino/plugin/faker/FakerPlugin.java @@ -15,12 +15,9 @@ package io.trino.plugin.faker; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; import io.trino.spi.Plugin; import io.trino.spi.connector.ConnectorFactory; -import java.util.Set; - public class FakerPlugin implements Plugin { @@ -29,12 +26,4 @@ public Iterable getConnectorFactories() { return ImmutableList.of(new FakerConnectorFactory("faker")); } - - @Override - public Set> getFunctions() - { - return ImmutableSet.>builder() - .add(FakerFunctions.class) - .build(); - } } diff --git a/plugin/trino-faker/src/test/java/io/trino/plugin/faker/TestFakerQueries.java b/plugin/trino-faker/src/test/java/io/trino/plugin/faker/TestFakerQueries.java index 3b1a7ea235956..064d9df764b2d 100644 --- a/plugin/trino-faker/src/test/java/io/trino/plugin/faker/TestFakerQueries.java +++ b/plugin/trino-faker/src/test/java/io/trino/plugin/faker/TestFakerQueries.java @@ -245,14 +245,6 @@ void testSelectGenerator() assertUpdate("DROP TABLE faker.default.generators"); } - @Test - void testSelectFunctions() - { - @Language("SQL") - String testQuery = "SELECT random_string('#{options.option ''a'', ''b''}') IN ('a', 'b')"; - assertQuery(testQuery, "VALUES (true)"); - } - @Test void testSelectRange() { From 22640c5d25959eeab71fcdd2f9559baebe0273c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Wa=C5=9B?= Date: Thu, 24 Oct 2024 19:45:04 +0200 Subject: [PATCH 04/31] Document the Faker connector --- .../.vale/config/vocabularies/Base/accept.txt | 3 + docs/src/main/sphinx/connector.md | 1 + docs/src/main/sphinx/connector/faker.md | 283 ++++++++++++++++++ 3 files changed, 287 insertions(+) create mode 100644 docs/src/main/sphinx/connector/faker.md diff --git a/docs/.vale/config/vocabularies/Base/accept.txt b/docs/.vale/config/vocabularies/Base/accept.txt index f53de903994ea..1cdbf3cae4188 100644 --- a/docs/.vale/config/vocabularies/Base/accept.txt +++ b/docs/.vale/config/vocabularies/Base/accept.txt @@ -12,11 +12,13 @@ ConnectorPageSourceProvider ConnectorRecordSetProvider ConnectorSplitManager CPU +Datafaker DNS ETL Guice gzip HDFS +ipsum JDBC JDK JKS @@ -24,6 +26,7 @@ JVM Kerberos keystore KeyStore +Lorem Metastore open-source ORC diff --git a/docs/src/main/sphinx/connector.md b/docs/src/main/sphinx/connector.md index 0ccb69b40fa7c..d9101c1b5fc75 100644 --- a/docs/src/main/sphinx/connector.md +++ b/docs/src/main/sphinx/connector.md @@ -16,6 +16,7 @@ Delta Lake Druid Elasticsearch Exasol +Faker Google Sheets Hive Hudi diff --git a/docs/src/main/sphinx/connector/faker.md b/docs/src/main/sphinx/connector/faker.md new file mode 100644 index 0000000000000..9f19d23edbf7b --- /dev/null +++ b/docs/src/main/sphinx/connector/faker.md @@ -0,0 +1,283 @@ +# Faker connector + +The Faker connector generates random data matching a defined structure. It uses +the [Datafaker](https://www.datafaker.net/) library to make the generated data +more realistic. Use the connector to populate another data source with large +and realistic test data. This allows testing performance of applications +processing data, including Trino itself, and application user interfaces. + +## Configuration + +Create a catalog properties file that specifies the Faker connector by setting +the `connector.name` to `faker`. + +For example, to generate data in the `generator` catalog, create the file +`etc/catalog/generator.properties`. + +```text +connector.name=faker +faker.null-probability=0.1 +faker.default-limit=1000 +``` + +Create tables in the `default` schema, or create different schemas first. +Reading from tables in this catalog return random data. See [](faker-usage) for +more examples. + +Schema objects created in this connector are not persisted, and are stored in +memory only. They need to be recreated every time after restarting the +coordinator. + +The following table details all general configuration properties: + +:::{list-table} Faker configuration properties +:widths: 35, 55, 10 +:header-rows: 1 + +* - Property name + - Description + - Default +* - `faker.null-probability` + - Default null probability for any column in any table that allows them. + - `0.5` +* - `faker.default-limit` + - Default number of rows for each table, when the LIMIT clause is not + specified in the query. + - `1000` +::: + +The following table details all supported schema properties. If they're not +set, values from corresponding configuration properties are used. + +:::{list-table} Faker schema properties +:widths: 35, 65 +:header-rows: 1 + +* - Property name + - Description +* - `null_probability` + - Default probability of null values in any column that allows them, in any + table of this schema. +* - `default_limit` + - Default limit of rows returned from any table in this schema, if not + specified in the query. +::: + +The following table details all supported table properties. If they're not set, +values from corresponding schema properties are used. + +:::{list-table} Faker table properties +:widths: 35, 65 +:header-rows: 1 + +* - Property name + - Description +* - `null_probability` + - Default probability of null values in any column in this table that allows + them. +* - `default_limit` + - Default limit of rows returned from this table if not specified in the + query. +::: + +The following table details all supported column properties. + +:::{list-table} Faker column properties +:widths: 20, 40, 40 +:header-rows: 1 + +* - Property name + - Description + - Default +* - `null_probability` + - Default probability of null values in any column in this table that allows them. + - Defaults to the `null_probability` table or schema property, if set, or the + `faker.null-probability` configuration property. +* - `generator` + - Name of the Faker library generator used to generate data for this column. + Only valid for columns of a character based type. + - Defaults to a 3 to 40 word sentence from the + [Lorem](https://javadoc.io/doc/net.datafaker/datafaker/latest/net/datafaker/providers/base/Lorem.html) + provider. +::: + +### Character types + +Faker supports the following character types: + +- `CHAR` +- `VARCHAR` +- `VARBINARY` + +Columns of those types use a generator producing the [Lorem +ipsum](https://en.wikipedia.org/wiki/Lorem_ipsum) placeholder text. Unbounded +columns return a random sentence with 3 to 40 words. + +To have more control over the format of the generated data, use the `generator` +column property. Some examples of valid generator expressions: + +- `#{regexify '(a|b){2,3}'}` +- `#{regexify '\\.\\*\\?\\+'}` +- `#{bothify '????','false'}` +- `#{Name.first_name} #{Name.first_name} #{Name.last_name}` +- `#{number.number_between '1','10'}` + +See the Datafaker's documentation for more information about +[the expression](https://www.datafaker.net/documentation/expressions/) syntax +and [available providers](https://www.datafaker.net/documentation/providers/). + +### Non-character types + +Faker supports the following non-character types: + +- `BIGINT` +- `INTEGER` or `INT` +- `SMALLINT` +- `TINYINT` +- `BOOLEAN` +- `DATE` +- `DECIMAL` +- `REAL` +- `DOUBLE` +- `INTERVAL DAY TO SECOND` +- `INTERVAL YEAR TO MONTH` +- `TIMESTAMP` and `TIMESTAMP(P)` +- `TIMESTAMP WITH TIME ZONE` and `TIMESTAMP(P) WITH TIME ZONE` +- `TIME` and `TIME(P)` +- `TIME WITH TIME ZONE` and `TIME(P) WITH TIME ZONE` +- `IPADDRESS` +- `UUID` + +You can not use generator expressions for non-character-based columns. To limit +their data range, specify constraints in the `WHERE` clause. + +### Unsupported types + +Faker does not support the following data types: + +- structural types: `ARRAY`, `MAP`, `ROW` +- `JSON` +- Geometry +- HyperLogLog and all digest types + +To generate data using these complex types, data from column of primitive types +can be combined, like in the following example. + +```sql +CREATE TABLE faker.default.prices ( + currency VARCHAR NOT NULL WITH (generator = '#{Currency.code}'), + price DECIMAL(8,2) NOT NULL +); + +SELECT JSON_OBJECT(KEY currency VALUE price) AS complex +FROM faker.default.prices +WHERE price > 0 +LIMIT 3; +``` + +Executing these queries should return data structured like this: + +```text + complex +------------------- + {"TTD":924657.82} + {"MRO":968292.49} + {"LTL":357773.63} +(3 rows) +``` + +### Number of generated rows + +By default, the connector generates 1000 rows for every table. To control how +many rows are generated for a table, use the `LIMIT` clause in the query. A +default limit can be set using the `default_limit` table, or schema property or +in the connector configuration file, using the `faker.default-limit` property. + +### Null values + +For columns without a `NOT NULL` constraint, null values are generated using +the default probability of 50%. It can be modified using the `null_probability` +property set for a column, table, or schema. The default value of 0.5 can be +also modified in the connector configuration file, by using the +`faker.null-probability` property. + +(faker-type-mapping)= +## Type mapping + +The Faker connector generates data itself, so no mapping is required. + +(faker-sql-support)= +## SQL support + +The connector provides {ref}`globally available ` and +{ref}`read operation ` statements to generate data. + +To define the schema for generating data, it supports the following features: + +- [](/sql/create-table) +- [](/sql/create-table-as) +- [](/sql/drop-table) +- [](/sql/create-schema) +- [](/sql/drop-schema) + +(faker-usage)= +## Usage + +Faker generates data when reading from a table created in a catalog using this +connector. This makes it easy to fill an existing schema with random data, by +copying only the schema into a Faker catalog, and inserting the data back into +the original tables. + +Using the catalog definition from Configuration you can proceed with the +following steps. + +Create a table with the same columns as in the table to populate with random +data. Exclude all properties, because the Faker connector doesn't support the +same table properties as other connectors. + +```sql +CREATE TABLE generator.default.customer (LIKE production.public.customer EXCLUDING PROPERTIES); +``` + +Insert random data into the original table, by selecting it from the +`generator` catalog. Data generated by the Faker connector for columns of +non-character types cover the whole range of that data type. Add constraints to +adjust the data as desired. The following example ensures that date of birth +and age in years are related and realistic values. + +```sql +INSERT INTO production.public.customers +SELECT * +FROM generator.default.customers +WHERE + born_at BETWEEN CURRENT_DATE - INTERVAL '150' YEAR AND CURRENT_DATE + AND age_years BETWEEN 0 AND 150 +LIMIT 100; +``` + +To generate even more realistic data, choose specific generators by setting the +`generator` property on columns. Start with getting the complete definition of +a table: + +```sql +SHOW CREATE TABLE production.public.customers; +``` + +Modify the output of the previous query and add some column properties. + +```sql +CREATE TABLE generator.default.customer ( + id UUID NOT NULL, + name VARCHAR NOT NULL WITH (generator = '#{Name.first_name} #{Name.last_name}'), + address VARCHAR NOT NULL WITH (generator = '#{Address.fullAddress}'), + born_at DATE, + age_years INTEGER +); +``` + +## Limitations + +- Generated data is not deterministic. There is no way to specify a seed for + the random generator. The same query reading from catalogs using this + connector, executed multiple times, returns different results each time. +- It is not possible to choose the locale used by the Datafaker's generators. From 0a72a7482d93aae62f1bbdd9dc5499859b214821 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Wa=C5=9B?= Date: Wed, 30 Oct 2024 18:44:31 +0100 Subject: [PATCH 05/31] Fix permissions of the upload results workflow --- .github/workflows/upload-test-results.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/upload-test-results.yml b/.github/workflows/upload-test-results.yml index 114e05574048a..48bd7f6fbe11d 100644 --- a/.github/workflows/upload-test-results.yml +++ b/.github/workflows/upload-test-results.yml @@ -6,6 +6,9 @@ on: types: - completed +permissions: + actions: read + defaults: run: shell: bash --noprofile --norc -euo pipefail {0} @@ -52,6 +55,7 @@ jobs: AWS_ACCESS_KEY_ID: ${{ vars.TEST_RESULTS_AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.TEST_RESULTS_AWS_SECRET_ACCESS_KEY }} AWS_DEFAULT_REGION: us-east-2 + BRANCH_NAME: ${{ github.event.workflow_run.head_branch }} if: env.S3_BUCKET != '' && env.AWS_ACCESS_KEY_ID != '' && env.AWS_SECRET_ACCESS_KEY != '' shell: bash --noprofile --norc -euo pipefail {0} run: | @@ -92,9 +96,10 @@ jobs: continue; fi jq -c \ - --argjson addObj '{"branch":"${{ github.event.workflow_run.head_branch }}","git_sha":"${{ github.event.workflow_run.head_sha }}","workflow_name":"${{ github.event.workflow.name }}","workflow_run":"${{ github.event.workflow_run.id }}","workflow_conclusion":"${{ github.event.workflow_run.conclusion }}","workflow_job":"","workflow_run_attempt":"${{ github.event.workflow_run.run_attempt }}","timestamp":""}' \ + --argjson addObj '{"branch":"","git_sha":"${{ github.event.workflow_run.head_sha }}","workflow_name":"${{ github.event.workflow.name }}","workflow_run":"${{ github.event.workflow_run.id }}","workflow_conclusion":"${{ github.event.workflow_run.conclusion }}","workflow_job":"","workflow_run_attempt":"${{ github.event.workflow_run.run_attempt }}","timestamp":""}' \ --arg timestamp "$(date -u '+%F %T.%3NZ')" \ - '. + $addObj | .timestamp=$timestamp' "$filename" | gzip -c > "$artifact_id" + --arg branch "$BRANCH_NAME" \ + '. + $addObj | .branch=$branch | .timestamp=$timestamp' "$filename" | gzip -c > "$artifact_id" aws s3 cp --no-progress "$artifact_id" "s3://$S3_BUCKET/tests/results/type=$(basename "$filename" .ndjson)/repo=$(basename "${{ github.repository }}")/date_created=$(date -u '+%Y-%m-%d')/$artifact_id" done From 5594558e90faa144b59ccfa84995aa6c2285bbda Mon Sep 17 00:00:00 2001 From: Manfred Moser Date: Tue, 29 Oct 2024 21:58:18 -0700 Subject: [PATCH 06/31] Add docs for Vertica connector --- docs/src/main/sphinx/connector.md | 1 + docs/src/main/sphinx/connector/vertica.md | 234 ++++++++++++++++++++ docs/src/main/sphinx/static/img/vertica.png | Bin 0 -> 5653 bytes 3 files changed, 235 insertions(+) create mode 100644 docs/src/main/sphinx/connector/vertica.md create mode 100644 docs/src/main/sphinx/static/img/vertica.png diff --git a/docs/src/main/sphinx/connector.md b/docs/src/main/sphinx/connector.md index d9101c1b5fc75..30a4929fd9c55 100644 --- a/docs/src/main/sphinx/connector.md +++ b/docs/src/main/sphinx/connector.md @@ -45,6 +45,7 @@ System Thrift TPCDS TPCH +Vertica ``` ```{toctree} diff --git a/docs/src/main/sphinx/connector/vertica.md b/docs/src/main/sphinx/connector/vertica.md new file mode 100644 index 0000000000000..cdf6dc915686a --- /dev/null +++ b/docs/src/main/sphinx/connector/vertica.md @@ -0,0 +1,234 @@ +--- +myst: + substitutions: + default_domain_compaction_threshold: '`256`' +--- + +# Vertica connector + +```{raw} html + +``` + +The Vertica connector allows querying a [Vertica](https://www.vertica.com/) +database as an external data source. + +## Requirements + +To connect to Vertica, you need: + +- Vertica 9.1.x or higher. +- Network access from the coordinator and workers to the Vertica server. + Port 5433 is the default port. + +## Configuration + +Create a catalog properties file in `etc/catalog` named `example.properties` to +access the configured Vertica database in the `example` catalog. Replace example +with your database name or some other descriptive name of the catalog. Configure +the usage of the connector by specifying the name `vertica` and replace the +connection properties as appropriate for your setup. + +```properties +connector.name=vertica +connection-url=jdbc:vertica://example.net:5433/test_db +connection-user=root +connection-password=secret +``` + +The `connection-user` and `connection-password` are typically required and +determine the user credentials for the connection, often a service user. You can +use [secrets](/security/secrets) to avoid actual values in the catalog +properties files. + +```{include} jdbc-common-configurations.fragment +``` + +```{include} query-comment-format.fragment +``` + +```{include} jdbc-domain-compaction-threshold.fragment +``` + +```{include} jdbc-case-insensitive-matching.fragment +``` + +## Type mapping + +Because Trino and Vertica each support types that the other does not, this +connector [modifies some types](type-mapping-overview) when reading or writing +data. Data types may not map the same way in both directions between Trino and +the data source. Refer to the following sections for type mapping in each +direction. + +### Vertica to Trino type mapping + +The connector maps Vertica types to the corresponding Trino types according to +the following table: + +:::{list-table} Vertica to Trino type mapping +:widths: 35, 25, 40 +:header-rows: 1 + +* - Vertica type + - Trino type + - Notes +* - `BOOLEAN` + - `BOOLEAN` + - +* - `BIGINT` + - `BIGINT` + - Vertica treats TINYINT, SMALLINT, INTEGER, and BIGINT as synonyms for the + same 64-bit BIGINT data type +* - `DOUBLE PRECISION (FLOAT)` + - `DOUBLE` + - Vertica treats FLOAT and REAL as the same 64-bit IEEE FLOAT +* - `DECIMAL(p, s)` + - `DECIMAL(p, s)` + - +* - `CHAR, CHAR(n)` + - `CHAR, CHAR(n)` + - +* - `VARCHAR`, `LONG VARCHAR`, `VARCHAR(n)`, `LONG VARCHAR(n)` + - `VARCHAR(n)` + - +* - `VARBINARY`, `LONG VARBINARY`, `VARBINARY(n)`, `LONG VARBINARY(n)` + - `VARBINARY(n)` + - +* - `DATE` + - `DATE` + - +::: + +No other types are supported. + +Unsupported Vertica types can be converted to `VARCHAR` with the +`vertica.unsupported_type_handling` session property. The default value for +this property is `IGNORE`. + +```sql +SET SESSION vertica.unsupported_type_handling='CONVERT_TO_VARCHAR'; +``` + +### Trino to Vertica type mapping + +The connector maps Trino types to the corresponding Vertica types according to +the following table: + +:::{list-table} Trino to Vertica type mapping +:widths: 50, 50 +:header-rows: 1 + +* - Trino type + - Vertica type +* - `BOOLEAN` + - `BOOLEAN` +* - `TINYINT` + - `BIGINT` +* - `SMALLINT` + - `BIGINT` +* - `INTEGER` + - `BIGINT` +* - `BIGINT` + - `BIGINT` +* - `REAL` + - `DOUBLE PRECISION` +* - `DOUBLE` + - `DOUBLE PRECISION` +* - `DECIMAL(p, s)` + - `DECIMAL(p, s)` +* - `CHAR` + - `CHAR` +* - `VARCHAR` + - `VARCHAR` +* - `VARBINARY` + - `VARBINARY` +* - `DATE` + - `DATE` +::: + +No other types are supported. + +```{include} jdbc-type-mapping.fragment +``` + +(vertica-sql-support)= +## SQL support + +The connector provides read and write access to data and metadata in Vertica. In +addition to the [globally available](sql-globally-available) and [read +operation](sql-read-operations) statements, the connector supports the following +features: + +- [](sql-data-management) +- [](/sql/create-table) +- [](/sql/create-table-as) +- [](/sql/drop-table) +- [](/sql/alter-table) excluding `DROP COLUMN` +- [](/sql/create-schema) +- [](/sql/drop-schema) + +```{include} alter-table-limitation.fragment +``` + +## Table functions + +The connector provides specific [table functions](/functions/table) to +access Vertica. + +(vertica-query-function)= +### `query(VARCHAR) -> table` + +The `query` function allows you to query the underlying database directly. It +requires syntax native to the data source, because the full query is pushed down +and processed in the data source. This can be useful for accessing native +features or for improving query performance in situations where running a query +natively may be faster. + +The `query` table function is available in the `system` schema of any +catalog that uses the Vertica connector, such as `example`. The +following example passes `myQuery` to the data source. `myQuery` has to be a +valid query for the data source, and is required to return a table as a result: + +```sql +SELECT + * +FROM + TABLE( + example.system.query( + query => 'myQuery' + ) + ); +``` + +```{include} query-table-function-ordering.fragment +``` + +## Performance + +The connector includes a number of performance features, detailed in the +following sections. + +### Pushdown + +The connector supports pushdown for a number of operations: + +- [](join-pushdown) +- [](limit-pushdown) + +```{include} join-pushdown-enabled-false.fragment +``` + +### Table statistics + +You can use [](/sql/analyze) statements in Trino to populate the table +statistics in Vertica. The [cost-based +optimizer](/optimizer/cost-based-optimizations) then uses these statistics to +improve query performance. + +Support for table statistics is disabled by default. You can enable it with the +catalog property `statistics.enabled` set to `true`. In addition, the +`connection-user` configured in the catalog must have superuser permissions in +Vertica to gather and populate statistics. + +You can view statistics using [](/sql/show-stats). diff --git a/docs/src/main/sphinx/static/img/vertica.png b/docs/src/main/sphinx/static/img/vertica.png new file mode 100644 index 0000000000000000000000000000000000000000..22edf537c16f7d6f123850044a93aff2fda3d508 GIT binary patch literal 5653 zcmdUThzLkY_bv^xOUKek=hEFQ zUGm%SfAKsoX6DWHVm@l+ ze{#dYA@BU3;i8<2t#NQ@P#P*q`d;7zQ@{^>)hS!xE21Y*yzi8{v^4rZNdHQ5=N$GLkS;yx_Lj+)8^5<*YQh@u2!aZQvzA;4|(<#CVwz*aegLo=D>@x{coi3 zT#uLA@31+KM0tKo0sGi*4oNKCP2OFfIt$~sSfS#o4Zz+T5Ut^mi;=nP*_zLuTx@K< z$GF##4v4qe@(*hsmWuRmVp(J$#~#zB4K3#F!BP;v)yr#at%7izd@+4IJTOy4ec^O{ z`NnjGvB8Uge|J)Uuqk^VHD5{t3NFp`TD?yYP3AT`fx&EEk!j{D^)0m zjetv(_3rD*_B4D+1zxjR(XhDN?w#w!9)?dun!21E2gk%RirtWt*`5&M&HAp~clRgr zJ&e;SQ6$^ZasStms?Sf{o90E@NF6I;)1UG0eAA2X2XppRP50=0%MFd6ft3N+hxgKi`0iICwGFFsJMQ$2x>bd9 zWFw2s-%i_y4KsoA+74rJWK81v?!U}tT*u0Nc^M_p?&EYi=gt3EH@cO%UL)LhFo&<% z{3cAjCDnf%6=|hm@D^J(>L`ma2+tz{px{7+u8f+i`kt4$Ag?wwGNmp1gGFS^u3cf82&2XI!&{my=8T z99wEnjw06LWEREtZl{`#d+(*)HV2(haV&1`GBu9-zn69)oW;}WZ?k06cs^za&WrFQ z{~(g_iqt?+IKA_@xja&MJg5Q$wm;MT-IjLL`q1KQ!iY=xxAf7kcWP`wfy+oLIAni2 zgdjvG0$4|dS1o2Y?{czO?k4^Ch0z(_1Z|HuGaC@-xzC#{!GMR^ErT!SQIp4MFCx9& z!pN9xz3`gl?d|RTR%nOyXA}!Y!j9czRJX8fbqLeDub4?_2*L-%UG^uzF)5~yS#lP9 zmny%*=--9!)5RUt`5f@rXD(1!zIfAB#sa|AXu#kkTWiYB^+a7$WDX;-eWK>HjRGYo zAWEEHtU&-l{d$jeLOYwPp6M6s(pplO-qpf)+ol}f=c+_nbrJPE@Ko8|pP<;hh3PJ3 zW)&SgaRZ8S;t1pF5{gn=);L~;#k|~0D_8V$o~^bysFbcJ#{=zk4BO+QUvd(cexcYm zw?EEcA(;ysxRJg`$2zX5w{=5_`MQL`;@SMKI2>_N0GVDbmVdfctIg26p+VC9>T%na$=wC)L^jU-J~o+* zBE8o zNr?Au%z4~hxo(y}IZ&eQ_?qe9(f1@J{}rL2idv;1eEJb+qFPSfPD{4c<(Yqe(pa_4 z0Bgt8XQ7=9rOb)18~wuXge+GpW1njO;=fepv-<>8Fa4m{3n1ufFxEc_nRG06`pQ-6 z&zwbT5Sgn%mGH%@ssnGV$_hm&-NK`jF{8k{2;ObI-b|OXPu+uvsGZSNj3_)H(=v<5 zN}DR?XGjEv2Q1Ue@H%-o=gMV4ezzCer*oiPt%(X^(qHXfVj|i}=QEFhAOAV}dtcpO zE}iPp1#UUgC^Oq>jSFZ-}R_a7X6J!2I=@L}{A6uT=%%4y__r7Di5xigKI{~3;$)^j&FH7E*FKo5lb9$ohzCo{Q(lmh z$$7b!a`U zut$_4!}qgC4J4VSINL>@NqK*vKpb^ptDqW6Z5MBT=|yFvO&~9HcH8Ma!TQ`>xJH-) zDmSv+4lT-YDGR|Pwk1(IijJbw&2>f`h)*YKHlA(nO^B($L_xv=13I!d+j)X(@sRV_ z=}UDnhVFW0adrLH)ycRt^4nNNKKuY!HuoWWSR%Pc!5`Ss3}sQ1t+ct4+TcN)>|db< zE(P1c)OP7`peIMN!&* z%t2BpcdH*7&|QnJ_Y>m1H)nCAj4ykuTBe7ke#C2LbVl;fOxsVEPL2_h(AFxu>?8Fu z2_9g3o?eQ=!g?MDT=Od?!Pg?aoluAbX2i&7o-=Ml_Hh@8TtJf-8vxUHvOEyvm`yc@ylh^U@K&lk@aH}lkDecBm8))w3q z|3cb4RLa#}69B3{a?V0vpCt9A21opLmwcJ_VYd{od_srcf^(EZ?xOUAu2yZMr&^u0{G`1sCNZ%KGP ziTe3Z;%y+183H7%R;5?)MJ{u>vKB_`w2Tw)k`gQ zokR<^*zNx0fBVIaZxN)sr)x-;ypKcr7)l~U(xF=C{`W1r|0tCGAOKPi#cbi8X z*H&1@c)XF=#yuGNGFZuRzP?U8=R<(B`*vkQ$wW@aW0g#yo;O3f?s^I0&12-eFmt=l zd}CApb!uP17rtl`da1|MFXIhiiNl$Z)6bIZJ+ialw@xAF3oD@(bzha~fnpTzzyOmu z`b3>cyQRb%knYL$b;X2XKg$u+&8tI4u1X+1ZdtjN zA?cbc=}*G6aK^6~AeMXZ=GQgd^S)2&-T~=x|9K|p0`AQ>^m$6O5vl3YhxJTgD?*Y*!6;xH^dscwa9wk}!NSEg9B?%2FgsEy-0xM$~3QF$*u+Q z9iag$p^=yo#EvEvp>gJy`!jz{CCAm(TpwbPBioHN2j1zT84( zpu9m>pH9Slq4V zW-Z4-d2EM%6?7B8=DZsgzDBPRKsre3*I6YavxLi{OUY|9Xpzdjk!s6JiDl<`YRa*E z(7`1=7cZi&5~Y3iL4NHi@($4**;JZQ@ZVE*ETCuy^xO)^+=ke_bPsv1sc%MQcciim z4WA`|gGHr4Vte6wK|^RK%hQ2x>gd?vqNXs37ny1r8}imH?2l=>(2T^+gc|<@B1d(G zaX$zG>L#V?CTbtB*m%)Trul61Lo#ll_I4>lAiTKL&?Z(g`dUf+%aeUhsBC>wx%}%o zXtBF<7JIGyg2wT@5Wxm5W|vd&K4>U$47=&hc7{`AJLvGd|BVW z16s481%mVFXhyT(HRs@)Op*pOyYbk`eyttVjAc;?6{~&rp!jQ}`M2wl%4z`>Wwa)X z3|0Grd;!A4`KWJz(ZwMWiUr=x!~xfhOQcO=$$bRFhiI{Sm-`f)iqDj7G7bfwv$!Pt zWyXh?PxQiw5Do(iPX!YK+hMVlx5STkQ(r~3xv0n>I`)1S^qH`(caXMQ_$l~_?zX*< zEyeD_0q5GwW;A+Dkb@g}ODS0c7M-0qd^C{CFpY>~+*{bd1O~n%@r}X(l=3`sO(WWd zLW(#y4NuBA zNn%hzRq=<+mliJtmCVomO@NfYnJzx$G?RAS?FDu^SO6&Jv843H;r;tnSA|^?NE!YZ z($s7P$`ay=?^qhi;%9noxYXY2WTGF=5+cE;hK{D0AbQt1(d4nG`Y}t`dl-d`C`4BQ4efHe#`GfQg3QjX{TDyRfYx;AzTAoJ6Ie2 z+)n9X3_v^riFwLK1xIw#$FO~Zt}>7$u(yT-cLj}<#yz0{R9z6_Rw{7d{*9nWS+1Bt))7v*vl9-~oe^sB*L z)tkz~a0)gmSpD%xB8scy0M=sapM(I`%PYcv6~Kf(IIgohZi+8luqL82Kt#EbKEohg z?%#}YL`y?C0u>Y;e3Q>xTzaf|oX=i|{EK8iLr#T4IXF9D6SA9Bx$$eru~U<*&ta2O z{rL)T0+(FfGRtl^Ku~*c{+m+61tpPkhlFeseuV#O)?&4xcUo|6a5z#p`$3{>bR@ex zdd=l+V9@{>*k`Apl~Wy=WVM>FmgcGR)8;olwJYcl{igAajl@&QMY-2o7d9WoS3OtC zB37HAytdngR^+5llCCwtgenYm#UqiATY9Clj$T34I@6%m@-4RXWlf@wz_&U5Yj?Lw z#j($a+XB9+-AL)S8ilM<`K#ZuB0zC!7`ts%i0nzAu`W0q7fkrDl`oaM0vA1mmT%T< z3Uey?_vFjqTg)j@(8S!w8UA>Fe9Y`L5_3c6k(CP6P4x~uE?^&uR%zgoe<)g} zJ*vJE>o@agvVdF%?Qt&SG`;-dFzjx{M$09?fVo>h6QO;3Y*yc1FA5l%$zG70{qso1 z?Cg&|Kqr+o@T`v1Ym)eMzvE9IF4!tV9VW0~FMz3BfQm!q^E3|3A1{F1J1odLX3S XwwJe}To=LKd2uvUbyQ%==E465A(+Dp literal 0 HcmV?d00001 From 3d51e5a0f83f7a1aff12fc205bdcbd7adf7523cf Mon Sep 17 00:00:00 2001 From: Mayank Vadariya <48036907+mayankvadariya@users.noreply.github.com> Date: Wed, 30 Oct 2024 15:37:03 -0400 Subject: [PATCH 07/31] Revert "Do not slice ListVector" This reverts commit 24a6da93d0acadd42a1c439bc1cb3a6185880722. --- .../BigQueryArrowToPageConverter.java | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryArrowToPageConverter.java b/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryArrowToPageConverter.java index 24697d91c9c6f..63cad2e2de37a 100644 --- a/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryArrowToPageConverter.java +++ b/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryArrowToPageConverter.java @@ -29,6 +29,7 @@ import io.trino.spi.type.Type; import io.trino.spi.type.VarbinaryType; import io.trino.spi.type.VarcharType; +import org.apache.arrow.memory.ArrowBuf; import org.apache.arrow.memory.BufferAllocator; import org.apache.arrow.vector.BigIntVector; import org.apache.arrow.vector.BitVector; @@ -48,6 +49,7 @@ import org.apache.arrow.vector.complex.StructVector; import org.apache.arrow.vector.ipc.message.ArrowRecordBatch; import org.apache.arrow.vector.types.pojo.Schema; +import org.apache.arrow.vector.util.TransferPair; import java.math.BigDecimal; import java.util.List; @@ -75,6 +77,7 @@ import static java.lang.String.format; import static java.util.Objects.requireNonNull; import static org.apache.arrow.compression.CommonsCompressionFactory.INSTANCE; +import static org.apache.arrow.vector.complex.BaseRepeatedValueVector.OFFSET_WIDTH; import static org.apache.arrow.vector.types.Types.MinorType.DECIMAL256; public class BigQueryArrowToPageConverter @@ -84,10 +87,12 @@ public class BigQueryArrowToPageConverter private final VectorSchemaRoot root; private final VectorLoader loader; private final List columns; + private final BufferAllocator allocator; public BigQueryArrowToPageConverter(BigQueryTypeManager typeManager, BufferAllocator allocator, Schema schema, List columns) { this.typeManager = requireNonNull(typeManager, "typeManager is null"); + this.allocator = requireNonNull(allocator, "allocator is null"); this.columns = ImmutableList.copyOf(requireNonNull(columns, "columns is null")); List vectors = schema.getFields().stream() .map(field -> field.createVector(allocator)) @@ -172,7 +177,7 @@ else if (javaType == LongTimestampWithTimeZone.class) { writeVectorValues(output, vector, index -> writeObjectTimestampWithTimezone(output, type, vector, index), offset, length); } else if (type instanceof ArrayType arrayType) { - writeVectorValues(output, vector, _ -> writeArrayBlock(output, arrayType, vector), offset, length); + writeVectorValues(output, vector, index -> writeArrayBlock(output, arrayType, vector, index), offset, length); } else if (type instanceof RowType rowType) { writeVectorValues(output, vector, index -> writeRowBlock(output, rowType, vector, index), offset, length); @@ -241,11 +246,23 @@ private void writeObjectTimestampWithTimezone(BlockBuilder output, Type type, Fi type.writeObject(output, fromEpochMillisAndFraction(floorDiv(epochMicros, MICROSECONDS_PER_MILLISECOND), picosOfMillis, UTC_KEY)); } - private void writeArrayBlock(BlockBuilder output, ArrayType arrayType, FieldVector vector) + private void writeArrayBlock(BlockBuilder output, ArrayType arrayType, FieldVector vector, int index) { Type elementType = arrayType.getElementType(); - FieldVector innerVector = ((ListVector) vector).getDataVector(); - ((ArrayBlockBuilder) output).buildEntry(elementBuilder -> convertType(elementBuilder, elementType, innerVector, 0, innerVector.getValueCount())); + ((ArrayBlockBuilder) output).buildEntry(elementBuilder -> { + ArrowBuf offsetBuffer = vector.getOffsetBuffer(); + + int start = offsetBuffer.getInt((long) index * OFFSET_WIDTH); + int end = offsetBuffer.getInt((long) (index + 1) * OFFSET_WIDTH); + + FieldVector innerVector = ((ListVector) vector).getDataVector(); + + TransferPair transferPair = innerVector.getTransferPair(allocator); + transferPair.splitAndTransfer(start, end - start); + try (FieldVector sliced = (FieldVector) transferPair.getTo()) { + convertType(elementBuilder, elementType, sliced, 0, sliced.getValueCount()); + } + }); } private void writeRowBlock(BlockBuilder output, RowType rowType, FieldVector vector, int index) From aa8a4702b8e9c15f2a301dd2031f3c82a9f40831 Mon Sep 17 00:00:00 2001 From: Mayank Vadariya <48036907+mayankvadariya@users.noreply.github.com> Date: Wed, 30 Oct 2024 16:20:49 -0400 Subject: [PATCH 08/31] Add array data type test in Bigquery connector --- .../plugin/bigquery/BaseBigQueryTypeMapping.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/BaseBigQueryTypeMapping.java b/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/BaseBigQueryTypeMapping.java index de5d9abc4bb74..9724da1b9b554 100644 --- a/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/BaseBigQueryTypeMapping.java +++ b/plugin/trino-bigquery/src/test/java/io/trino/plugin/bigquery/BaseBigQueryTypeMapping.java @@ -48,6 +48,7 @@ import static io.trino.type.JsonType.JSON; import static java.lang.String.format; import static java.time.ZoneOffset.UTC; +import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; /** @@ -734,6 +735,18 @@ public void testArray() .execute(getQueryRunner(), bigqueryViewCreateAndInsert("test.array")); } + @Test + public void testArrayType() + { + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_array_", "(a BIGINT, b ARRAY, c ARRAY)")) { + assertUpdate("INSERT INTO " + table.getName() + " (a, b, c) VALUES (5, ARRAY[1.23E1], ARRAY[15]), (6, ARRAY[1.24E1, 1.27E1, 2.23E1], ARRAY[25, 26, 36])", 2); + assertThat(query("SELECT * FROM " + table.getName())) + .matches("VALUES " + + "(BIGINT '5', ARRAY[DOUBLE '12.3'], ARRAY[BIGINT '15']), " + + "(BIGINT '6', ARRAY[DOUBLE '12.4', DOUBLE '12.7', DOUBLE '22.3'], ARRAY[BIGINT '25', BIGINT '26', BIGINT '36'])"); + } + } + @Test public void testUnsupportedNullArray() { From 10e8ad3e51eb7b5038428d675c8f2861841bb025 Mon Sep 17 00:00:00 2001 From: Manfred Moser Date: Wed, 30 Oct 2024 15:25:28 -0700 Subject: [PATCH 09/31] Fix table stats docs for Vertica --- docs/src/main/sphinx/connector/vertica.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/src/main/sphinx/connector/vertica.md b/docs/src/main/sphinx/connector/vertica.md index cdf6dc915686a..0414ae5b2216c 100644 --- a/docs/src/main/sphinx/connector/vertica.md +++ b/docs/src/main/sphinx/connector/vertica.md @@ -221,10 +221,8 @@ The connector supports pushdown for a number of operations: ### Table statistics -You can use [](/sql/analyze) statements in Trino to populate the table -statistics in Vertica. The [cost-based -optimizer](/optimizer/cost-based-optimizations) then uses these statistics to -improve query performance. +The [cost-based optimizer](/optimizer/cost-based-optimizations) can use table +statistics from the Vertica database to improve query performance. Support for table statistics is disabled by default. You can enable it with the catalog property `statistics.enabled` set to `true`. In addition, the From df6652bb0dee759caf2ac707e0639057548c3fec Mon Sep 17 00:00:00 2001 From: Manfred Moser Date: Wed, 23 Oct 2024 10:09:54 -0700 Subject: [PATCH 10/31] Update docs for internal communication Add properties to HTTP server props page including HTTP/2 details. --- .../sphinx/admin/properties-http-server.md | 31 +++++++++++++++++++ .../sphinx/security/internal-communication.md | 19 ++++++++---- 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/docs/src/main/sphinx/admin/properties-http-server.md b/docs/src/main/sphinx/admin/properties-http-server.md index d18a584e9f30b..6a36c205564f2 100644 --- a/docs/src/main/sphinx/admin/properties-http-server.md +++ b/docs/src/main/sphinx/admin/properties-http-server.md @@ -159,3 +159,34 @@ Configuration properties for the `PASSWORD` authentication types ### `http-server.log.*` Configuration properties for [](/admin/properties-logging). + +(props-internal-communication) +## Internal communication + +The following properties are used for configuring the [internal +communication](/security/internal-communication) between all +[nodes](trino-concept-node) of a Trino cluster. + +### `internal-communication.shared-secret` + +- **Type:** [](prop-type-string) + +The string to use as secret that only the coordinators and workers in a specific +cluster share and use to authenticate within the cluster. See +[](internal-secret) for details. + +### `internal-communication.http2.enabled` + +- **Type:** [](prop-type-boolean) +- **Default value:** `true` + +Enable use of the HTTP/2 protocol for internal communication for enhanced +scalability compared to HTTP/1.1. Only turn this feature off, if you encounter +issues with HTTP/2 usage within the cluster in your deployment. + +### `internal-communication.https.required` + +- **Type:** [](prop-type-boolean) +- **Default value:** `false` + +Enable the use of [SSL/TLS for all internal communication](internal-tls). diff --git a/docs/src/main/sphinx/security/internal-communication.md b/docs/src/main/sphinx/security/internal-communication.md index e2847000befed..2bec797f5767e 100644 --- a/docs/src/main/sphinx/security/internal-communication.md +++ b/docs/src/main/sphinx/security/internal-communication.md @@ -4,15 +4,16 @@ The Trino cluster can be configured to use secured communication with internal authentication of the nodes in the cluster, and to optionally use added security with {ref}`TLS `. +(internal-secret)= ## Configure shared secret -Configure a shared secret to authenticate all communication between nodes of the -cluster. Use this configuration under the following conditions: +You must configure a shared secret to authenticate all communication between +nodes of the cluster in the following scenarios: -- When opting to configure [internal TLS encryption](internal-tls) - between nodes of the cluster -- When using any {doc}`external authentication ` method - between clients and the coordinator +- When using [any authentication](authentication-types) between clients and the + coordinator. +- When using [internal TLS encryption](internal-tls) between all nodes of the + cluster. Set the shared secret to the same value in {ref}`config.properties ` on all nodes of the cluster: @@ -122,6 +123,12 @@ window functions, which require repartitioning), the performance impact can be considerable. The slowdown may vary from 10% to even 100%+, depending on the network traffic and the CPU utilization. +:::{note} +By default, internal communication with SSL/TLS enabled uses HTTP/2 for +increased scalability. You can turn off this feature with +`internal-communication.http2.enabled=false`. +::: + (internal-performance)= ### Advanced performance tuning From 0bd992dcb2dde801e30f3ef20028024513d206e2 Mon Sep 17 00:00:00 2001 From: Manfred Moser Date: Mon, 28 Oct 2024 13:46:55 -0700 Subject: [PATCH 11/31] Remove manual internal TLS config info Automatic internal config has been in place for a long time. Discussed with David Phillips and decided to remove this last hint that manual config is even possible. --- docs/src/main/sphinx/admin/properties-http-server.md | 2 +- .../src/main/sphinx/security/internal-communication.md | 10 ---------- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/docs/src/main/sphinx/admin/properties-http-server.md b/docs/src/main/sphinx/admin/properties-http-server.md index 6a36c205564f2..941e03f727370 100644 --- a/docs/src/main/sphinx/admin/properties-http-server.md +++ b/docs/src/main/sphinx/admin/properties-http-server.md @@ -181,7 +181,7 @@ cluster share and use to authenticate within the cluster. See - **Default value:** `true` Enable use of the HTTP/2 protocol for internal communication for enhanced -scalability compared to HTTP/1.1. Only turn this feature off, if you encounter +scalability compared to HTTP/1.1. Only turn this feature off if you encounter issues with HTTP/2 usage within the cluster in your deployment. ### `internal-communication.https.required` diff --git a/docs/src/main/sphinx/security/internal-communication.md b/docs/src/main/sphinx/security/internal-communication.md index 2bec797f5767e..a365250cac8d1 100644 --- a/docs/src/main/sphinx/security/internal-communication.md +++ b/docs/src/main/sphinx/security/internal-communication.md @@ -99,16 +99,6 @@ configuration identical on all cluster nodes. Certificates are automatically created and used to ensure all communication inside the cluster is secured with TLS. -:::{warning} -Older versions of Trino required you to manually manage all the certificates -on the nodes. If you upgrade from this setup, you must remove the following -configuration properties: - -- `internal-communication.https.keystore.path` -- `internal-communication.https.truststore.path` -- `node.internal-address-source` -::: - ### Performance with SSL/TLS enabled Enabling encryption impacts performance. The performance degradation can vary From e13187fb00ddda57b321eb8ae6402823c7e86307 Mon Sep 17 00:00:00 2001 From: Manfred Moser Date: Wed, 23 Oct 2024 12:12:33 -0700 Subject: [PATCH 12/31] Add Trino 464 release notes --- docs/src/main/sphinx/release.md | 1 + docs/src/main/sphinx/release/release-464.md | 50 +++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 docs/src/main/sphinx/release/release-464.md diff --git a/docs/src/main/sphinx/release.md b/docs/src/main/sphinx/release.md index 9279a50d1b2ec..128cf1c568fc7 100644 --- a/docs/src/main/sphinx/release.md +++ b/docs/src/main/sphinx/release.md @@ -6,6 +6,7 @@ ```{toctree} :maxdepth: 1 +release/release-464 release/release-463 release/release-462 release/release-461 diff --git a/docs/src/main/sphinx/release/release-464.md b/docs/src/main/sphinx/release/release-464.md new file mode 100644 index 0000000000000..1d73796111380 --- /dev/null +++ b/docs/src/main/sphinx/release/release-464.md @@ -0,0 +1,50 @@ +# Release 464 (30 Oct 2024) + +## General + +* {{breaking}} Require JDK 23 to run Trino. ({issue}`21316`) +* Add the [](/connector/faker) for easy generation of data. ({issue}`23691`) +* Add the [](/connector/vertica). ({issue}`23948`) +* Rename the + `fault-tolerant-execution-eager-speculative-tasks-node_memory-overcommit` + configuration property to + `fault-tolerant-execution-eager-speculative-tasks-node-memory-overcommit`. + ({issue}`23876`) + +## Accumulo connector + +* {{breaking}} Remove the Accumulo connector. ({issue}`23792`) + +## Delta Lake connector + +* Fix failure of S3 file listing of buckets that enforce [requester + pays](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html). + ({issue}`23906`) + +## Hive connector + +* Use the `hive.metastore.partition-batch-size.max` catalog configuration + property value in the `sync_partition_metadata` procedure. Change the default + batch size from 1000 to 100. ({issue}`23895`) +* Fix failure of S3 file listing of buckets that enforce [requester + pays](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html). + ({issue}`23906`) + +## Hudi connector + +* Fix failure of S3 file listing of buckets that enforce [requester + pays](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html). + ({issue}`23906`) + +## Iceberg connector + +* Improve performance of `OPTIMIZE` on large partitioned tables. ({issue}`10785`) +* Rename the `iceberg.expire_snapshots.min-retention` configuration property to + `iceberg.expire-snapshots.min-retention`. ({issue}`23876`) +* Rename the `iceberg.remove_orphan_files.min-retention` configuration property + to `iceberg.remove-orphan-files.min-retention`. ({issue}`23876`) +* Fix failure of S3 file listing of buckets that enforce [requester + pays](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RequesterPaysBuckets.html). + ({issue}`23906`) +* Fix incorrect column constraints when using the `migrate` procedure on tables + that contain `NULL` values. ({issue}`23928`) From 9df2715539bf3b798a4b2b94a6c1c523a454cf15 Mon Sep 17 00:00:00 2001 From: Manfred Moser Date: Wed, 30 Oct 2024 16:12:48 -0700 Subject: [PATCH 13/31] Add BigQuery release notes entry for 464 --- docs/src/main/sphinx/release/release-464.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/src/main/sphinx/release/release-464.md b/docs/src/main/sphinx/release/release-464.md index 1d73796111380..d5dc56df32515 100644 --- a/docs/src/main/sphinx/release/release-464.md +++ b/docs/src/main/sphinx/release/release-464.md @@ -15,6 +15,11 @@ * {{breaking}} Remove the Accumulo connector. ({issue}`23792`) +## BigQuery connector + +* Fix incorrect results when reading array columns and + `bigquery.arrow-serialization.enabled` is set to true. ({issue}`23982`) + ## Delta Lake connector * Fix failure of S3 file listing of buckets that enforce [requester From 299842e3cddde87b2f4b2589edbe53c41743f71a Mon Sep 17 00:00:00 2001 From: Martin Traverso Date: Wed, 30 Oct 2024 23:43:30 +0000 Subject: [PATCH 14/31] [maven-release-plugin] prepare release 464 --- client/trino-cli/pom.xml | 2 +- client/trino-client/pom.xml | 2 +- client/trino-jdbc/pom.xml | 2 +- core/trino-grammar/pom.xml | 2 +- core/trino-main/pom.xml | 2 +- core/trino-parser/pom.xml | 2 +- core/trino-server-main/pom.xml | 2 +- core/trino-server-rpm/pom.xml | 2 +- core/trino-server/pom.xml | 2 +- core/trino-spi/pom.xml | 2 +- core/trino-web-ui/pom.xml | 2 +- docs/pom.xml | 2 +- lib/trino-array/pom.xml | 2 +- lib/trino-cache/pom.xml | 2 +- lib/trino-filesystem-alluxio/pom.xml | 2 +- lib/trino-filesystem-azure/pom.xml | 2 +- lib/trino-filesystem-cache-alluxio/pom.xml | 2 +- lib/trino-filesystem-gcs/pom.xml | 2 +- lib/trino-filesystem-manager/pom.xml | 2 +- lib/trino-filesystem-s3/pom.xml | 2 +- lib/trino-filesystem/pom.xml | 2 +- lib/trino-geospatial-toolkit/pom.xml | 2 +- lib/trino-hdfs/pom.xml | 2 +- lib/trino-hive-formats/pom.xml | 2 +- lib/trino-matching/pom.xml | 2 +- lib/trino-memory-context/pom.xml | 2 +- lib/trino-metastore/pom.xml | 2 +- lib/trino-orc/pom.xml | 2 +- lib/trino-parquet/pom.xml | 2 +- lib/trino-plugin-toolkit/pom.xml | 2 +- lib/trino-record-decoder/pom.xml | 2 +- plugin/trino-base-jdbc/pom.xml | 2 +- plugin/trino-bigquery/pom.xml | 2 +- plugin/trino-blackhole/pom.xml | 2 +- plugin/trino-cassandra/pom.xml | 2 +- plugin/trino-clickhouse/pom.xml | 2 +- plugin/trino-delta-lake/pom.xml | 2 +- plugin/trino-druid/pom.xml | 2 +- plugin/trino-elasticsearch/pom.xml | 2 +- plugin/trino-example-http/pom.xml | 2 +- plugin/trino-example-jdbc/pom.xml | 2 +- plugin/trino-exasol/pom.xml | 2 +- plugin/trino-exchange-filesystem/pom.xml | 2 +- plugin/trino-exchange-hdfs/pom.xml | 2 +- plugin/trino-faker/pom.xml | 2 +- plugin/trino-geospatial/pom.xml | 2 +- plugin/trino-google-sheets/pom.xml | 2 +- plugin/trino-hive/pom.xml | 2 +- plugin/trino-http-event-listener/pom.xml | 2 +- plugin/trino-http-server-event-listener/pom.xml | 2 +- plugin/trino-hudi/pom.xml | 2 +- plugin/trino-iceberg/pom.xml | 2 +- plugin/trino-ignite/pom.xml | 2 +- plugin/trino-jmx/pom.xml | 2 +- plugin/trino-kafka-event-listener/pom.xml | 2 +- plugin/trino-kafka/pom.xml | 2 +- plugin/trino-kinesis/pom.xml | 2 +- plugin/trino-kudu/pom.xml | 2 +- plugin/trino-mariadb/pom.xml | 2 +- plugin/trino-memory/pom.xml | 2 +- plugin/trino-ml/pom.xml | 2 +- plugin/trino-mongodb/pom.xml | 2 +- plugin/trino-mysql-event-listener/pom.xml | 2 +- plugin/trino-mysql/pom.xml | 2 +- plugin/trino-opa/pom.xml | 2 +- plugin/trino-openlineage/pom.xml | 2 +- plugin/trino-opensearch/pom.xml | 2 +- plugin/trino-oracle/pom.xml | 2 +- plugin/trino-password-authenticators/pom.xml | 2 +- plugin/trino-phoenix5/pom.xml | 2 +- plugin/trino-pinot/pom.xml | 2 +- plugin/trino-postgresql/pom.xml | 2 +- plugin/trino-prometheus/pom.xml | 2 +- plugin/trino-redis/pom.xml | 2 +- plugin/trino-redshift/pom.xml | 2 +- plugin/trino-resource-group-managers/pom.xml | 2 +- plugin/trino-session-property-managers/pom.xml | 2 +- plugin/trino-singlestore/pom.xml | 2 +- plugin/trino-snowflake/pom.xml | 2 +- plugin/trino-spooling-filesystem/pom.xml | 2 +- plugin/trino-sqlserver/pom.xml | 2 +- plugin/trino-teradata-functions/pom.xml | 2 +- plugin/trino-thrift-api/pom.xml | 2 +- plugin/trino-thrift-testing-server/pom.xml | 2 +- plugin/trino-thrift/pom.xml | 2 +- plugin/trino-tpcds/pom.xml | 2 +- plugin/trino-tpch/pom.xml | 2 +- plugin/trino-vertica/pom.xml | 2 +- pom.xml | 6 +++--- service/trino-proxy/pom.xml | 2 +- service/trino-verifier/pom.xml | 2 +- testing/trino-benchmark-queries/pom.xml | 2 +- testing/trino-benchto-benchmarks/pom.xml | 2 +- testing/trino-faulttolerant-tests/pom.xml | 2 +- testing/trino-plugin-reader/pom.xml | 2 +- testing/trino-product-tests-groups/pom.xml | 2 +- testing/trino-product-tests-launcher/pom.xml | 2 +- testing/trino-product-tests/pom.xml | 2 +- testing/trino-server-dev/pom.xml | 2 +- testing/trino-test-jdbc-compatibility-old-driver/pom.xml | 4 ++-- testing/trino-test-jdbc-compatibility-old-server/pom.xml | 2 +- testing/trino-testing-containers/pom.xml | 2 +- testing/trino-testing-kafka/pom.xml | 2 +- testing/trino-testing-resources/pom.xml | 2 +- testing/trino-testing-services/pom.xml | 2 +- testing/trino-testing/pom.xml | 2 +- testing/trino-tests/pom.xml | 2 +- 107 files changed, 110 insertions(+), 110 deletions(-) diff --git a/client/trino-cli/pom.xml b/client/trino-cli/pom.xml index a86c05ce96317..c92afe74f4b5c 100644 --- a/client/trino-cli/pom.xml +++ b/client/trino-cli/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/client/trino-client/pom.xml b/client/trino-client/pom.xml index 780ba6667dccd..041db4bc56a30 100644 --- a/client/trino-client/pom.xml +++ b/client/trino-client/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/client/trino-jdbc/pom.xml b/client/trino-jdbc/pom.xml index 5dbd809ee204f..31b350a76e91c 100644 --- a/client/trino-jdbc/pom.xml +++ b/client/trino-jdbc/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/core/trino-grammar/pom.xml b/core/trino-grammar/pom.xml index 396d67d40cb84..110e80ab75cc9 100644 --- a/core/trino-grammar/pom.xml +++ b/core/trino-grammar/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/core/trino-main/pom.xml b/core/trino-main/pom.xml index 9c60a9c2e33c2..b85418adb3557 100644 --- a/core/trino-main/pom.xml +++ b/core/trino-main/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/core/trino-parser/pom.xml b/core/trino-parser/pom.xml index 2edce6428deff..bb07d42ea7e8b 100644 --- a/core/trino-parser/pom.xml +++ b/core/trino-parser/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/core/trino-server-main/pom.xml b/core/trino-server-main/pom.xml index 1045af003198b..51f3befe9762c 100644 --- a/core/trino-server-main/pom.xml +++ b/core/trino-server-main/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/core/trino-server-rpm/pom.xml b/core/trino-server-rpm/pom.xml index 2e34a91ca3645..c3125902f8fa9 100644 --- a/core/trino-server-rpm/pom.xml +++ b/core/trino-server-rpm/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/core/trino-server/pom.xml b/core/trino-server/pom.xml index acaa5d0b5736f..8b47855089927 100644 --- a/core/trino-server/pom.xml +++ b/core/trino-server/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/core/trino-spi/pom.xml b/core/trino-spi/pom.xml index ca780cb8b78d3..c940f99be4094 100644 --- a/core/trino-spi/pom.xml +++ b/core/trino-spi/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/core/trino-web-ui/pom.xml b/core/trino-web-ui/pom.xml index 315a03ad02623..f2b883be0e8f1 100644 --- a/core/trino-web-ui/pom.xml +++ b/core/trino-web-ui/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/docs/pom.xml b/docs/pom.xml index 83bd691679aa9..09e32e0732da9 100644 --- a/docs/pom.xml +++ b/docs/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 trino-docs diff --git a/lib/trino-array/pom.xml b/lib/trino-array/pom.xml index daf4b09f90c61..b3e0a7fb161b9 100644 --- a/lib/trino-array/pom.xml +++ b/lib/trino-array/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-cache/pom.xml b/lib/trino-cache/pom.xml index e7c1b6ae782d7..245f7c5e13981 100644 --- a/lib/trino-cache/pom.xml +++ b/lib/trino-cache/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-filesystem-alluxio/pom.xml b/lib/trino-filesystem-alluxio/pom.xml index 5cbef51d69cb4..bb0d2e355d8aa 100644 --- a/lib/trino-filesystem-alluxio/pom.xml +++ b/lib/trino-filesystem-alluxio/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-filesystem-azure/pom.xml b/lib/trino-filesystem-azure/pom.xml index 337d720afaa31..53214b74c0cfb 100644 --- a/lib/trino-filesystem-azure/pom.xml +++ b/lib/trino-filesystem-azure/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-filesystem-cache-alluxio/pom.xml b/lib/trino-filesystem-cache-alluxio/pom.xml index 4f8407b2668ec..3251e47e30e30 100644 --- a/lib/trino-filesystem-cache-alluxio/pom.xml +++ b/lib/trino-filesystem-cache-alluxio/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-filesystem-gcs/pom.xml b/lib/trino-filesystem-gcs/pom.xml index c7617c1bef64f..d77773f9780bc 100644 --- a/lib/trino-filesystem-gcs/pom.xml +++ b/lib/trino-filesystem-gcs/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-filesystem-manager/pom.xml b/lib/trino-filesystem-manager/pom.xml index 8e48ed0948ae4..2e6bf4470f5a7 100644 --- a/lib/trino-filesystem-manager/pom.xml +++ b/lib/trino-filesystem-manager/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-filesystem-s3/pom.xml b/lib/trino-filesystem-s3/pom.xml index f098695024f9b..38e8530af3fd2 100644 --- a/lib/trino-filesystem-s3/pom.xml +++ b/lib/trino-filesystem-s3/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-filesystem/pom.xml b/lib/trino-filesystem/pom.xml index eec84a4f00b5e..f724a8d1c2760 100644 --- a/lib/trino-filesystem/pom.xml +++ b/lib/trino-filesystem/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-geospatial-toolkit/pom.xml b/lib/trino-geospatial-toolkit/pom.xml index e457f2e302611..8ecc852131148 100644 --- a/lib/trino-geospatial-toolkit/pom.xml +++ b/lib/trino-geospatial-toolkit/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-hdfs/pom.xml b/lib/trino-hdfs/pom.xml index ef58aa4d318ff..7791ff4d18501 100644 --- a/lib/trino-hdfs/pom.xml +++ b/lib/trino-hdfs/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-hive-formats/pom.xml b/lib/trino-hive-formats/pom.xml index 1e102cd5d223e..8dd857e0d8ffe 100644 --- a/lib/trino-hive-formats/pom.xml +++ b/lib/trino-hive-formats/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-matching/pom.xml b/lib/trino-matching/pom.xml index a1a9343393fbc..4d86232460fc2 100644 --- a/lib/trino-matching/pom.xml +++ b/lib/trino-matching/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-memory-context/pom.xml b/lib/trino-memory-context/pom.xml index 91639d7e20d45..4094d4a43edda 100644 --- a/lib/trino-memory-context/pom.xml +++ b/lib/trino-memory-context/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-metastore/pom.xml b/lib/trino-metastore/pom.xml index f391aa5088a5f..449623cdee6f6 100644 --- a/lib/trino-metastore/pom.xml +++ b/lib/trino-metastore/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-orc/pom.xml b/lib/trino-orc/pom.xml index e722a5f61e209..577c33f1d9753 100644 --- a/lib/trino-orc/pom.xml +++ b/lib/trino-orc/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-parquet/pom.xml b/lib/trino-parquet/pom.xml index 424b3df80b249..402cb7d01d3b3 100644 --- a/lib/trino-parquet/pom.xml +++ b/lib/trino-parquet/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-plugin-toolkit/pom.xml b/lib/trino-plugin-toolkit/pom.xml index 0abca197e292f..c00a772295162 100644 --- a/lib/trino-plugin-toolkit/pom.xml +++ b/lib/trino-plugin-toolkit/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/lib/trino-record-decoder/pom.xml b/lib/trino-record-decoder/pom.xml index 54c7e00edae1a..2d542a4e453df 100644 --- a/lib/trino-record-decoder/pom.xml +++ b/lib/trino-record-decoder/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-base-jdbc/pom.xml b/plugin/trino-base-jdbc/pom.xml index 40dc97bd942e8..e1040f8202fd3 100644 --- a/plugin/trino-base-jdbc/pom.xml +++ b/plugin/trino-base-jdbc/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-bigquery/pom.xml b/plugin/trino-bigquery/pom.xml index 07c85725806ca..aa4821c1d54b6 100644 --- a/plugin/trino-bigquery/pom.xml +++ b/plugin/trino-bigquery/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-blackhole/pom.xml b/plugin/trino-blackhole/pom.xml index c6691c3c229cd..035efb3a887ee 100644 --- a/plugin/trino-blackhole/pom.xml +++ b/plugin/trino-blackhole/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-cassandra/pom.xml b/plugin/trino-cassandra/pom.xml index 1a5c02fd233ad..016c9bbc57258 100644 --- a/plugin/trino-cassandra/pom.xml +++ b/plugin/trino-cassandra/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-clickhouse/pom.xml b/plugin/trino-clickhouse/pom.xml index 17d29647541f4..d5f3f37cd87d9 100644 --- a/plugin/trino-clickhouse/pom.xml +++ b/plugin/trino-clickhouse/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-delta-lake/pom.xml b/plugin/trino-delta-lake/pom.xml index 05bbe668ab0f3..8adc7b49dc968 100644 --- a/plugin/trino-delta-lake/pom.xml +++ b/plugin/trino-delta-lake/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-druid/pom.xml b/plugin/trino-druid/pom.xml index aafe1abd6348a..5d80779281019 100644 --- a/plugin/trino-druid/pom.xml +++ b/plugin/trino-druid/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-elasticsearch/pom.xml b/plugin/trino-elasticsearch/pom.xml index 9cbcc755257ab..4a68ca1826dbc 100644 --- a/plugin/trino-elasticsearch/pom.xml +++ b/plugin/trino-elasticsearch/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-example-http/pom.xml b/plugin/trino-example-http/pom.xml index ff1b0fa9fa18b..c5eda57718c92 100644 --- a/plugin/trino-example-http/pom.xml +++ b/plugin/trino-example-http/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-example-jdbc/pom.xml b/plugin/trino-example-jdbc/pom.xml index c0179ec5b3001..950780923fb82 100644 --- a/plugin/trino-example-jdbc/pom.xml +++ b/plugin/trino-example-jdbc/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-exasol/pom.xml b/plugin/trino-exasol/pom.xml index 9ebf5c4483489..83abb5ceb3eed 100644 --- a/plugin/trino-exasol/pom.xml +++ b/plugin/trino-exasol/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-exchange-filesystem/pom.xml b/plugin/trino-exchange-filesystem/pom.xml index 646b7d1371d21..0fc433803b13f 100644 --- a/plugin/trino-exchange-filesystem/pom.xml +++ b/plugin/trino-exchange-filesystem/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-exchange-hdfs/pom.xml b/plugin/trino-exchange-hdfs/pom.xml index 34772fdc4ec94..35babfcedc630 100644 --- a/plugin/trino-exchange-hdfs/pom.xml +++ b/plugin/trino-exchange-hdfs/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-faker/pom.xml b/plugin/trino-faker/pom.xml index 28452667be0e7..1d2af17c5b136 100644 --- a/plugin/trino-faker/pom.xml +++ b/plugin/trino-faker/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-geospatial/pom.xml b/plugin/trino-geospatial/pom.xml index 8179557cd6707..d386b3ea3b193 100644 --- a/plugin/trino-geospatial/pom.xml +++ b/plugin/trino-geospatial/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-google-sheets/pom.xml b/plugin/trino-google-sheets/pom.xml index 1ec7ec351635e..6b649aae7b5de 100644 --- a/plugin/trino-google-sheets/pom.xml +++ b/plugin/trino-google-sheets/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-hive/pom.xml b/plugin/trino-hive/pom.xml index 41c71a73f0eab..ad7c19551c724 100644 --- a/plugin/trino-hive/pom.xml +++ b/plugin/trino-hive/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-http-event-listener/pom.xml b/plugin/trino-http-event-listener/pom.xml index 87acbf942142e..0d13f868e1b60 100644 --- a/plugin/trino-http-event-listener/pom.xml +++ b/plugin/trino-http-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-http-server-event-listener/pom.xml b/plugin/trino-http-server-event-listener/pom.xml index 6750ea4e4296c..3cfad9b2a0f40 100644 --- a/plugin/trino-http-server-event-listener/pom.xml +++ b/plugin/trino-http-server-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-hudi/pom.xml b/plugin/trino-hudi/pom.xml index 69688d64d50e2..c93be563a5fff 100644 --- a/plugin/trino-hudi/pom.xml +++ b/plugin/trino-hudi/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-iceberg/pom.xml b/plugin/trino-iceberg/pom.xml index 8699d669d6db6..d19b33f41ad1c 100644 --- a/plugin/trino-iceberg/pom.xml +++ b/plugin/trino-iceberg/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-ignite/pom.xml b/plugin/trino-ignite/pom.xml index e632db459a264..12667df758522 100644 --- a/plugin/trino-ignite/pom.xml +++ b/plugin/trino-ignite/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-jmx/pom.xml b/plugin/trino-jmx/pom.xml index 0caa8c593168c..b8fdc777448af 100644 --- a/plugin/trino-jmx/pom.xml +++ b/plugin/trino-jmx/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-kafka-event-listener/pom.xml b/plugin/trino-kafka-event-listener/pom.xml index e2291c5be5d57..a98ccba0fb351 100644 --- a/plugin/trino-kafka-event-listener/pom.xml +++ b/plugin/trino-kafka-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-kafka/pom.xml b/plugin/trino-kafka/pom.xml index 205b0de6da312..05574a51bce0f 100644 --- a/plugin/trino-kafka/pom.xml +++ b/plugin/trino-kafka/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-kinesis/pom.xml b/plugin/trino-kinesis/pom.xml index bfa7282d39a8e..222976bdfa19f 100644 --- a/plugin/trino-kinesis/pom.xml +++ b/plugin/trino-kinesis/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-kudu/pom.xml b/plugin/trino-kudu/pom.xml index 93ca495e59796..1eb4e2b0bdd6f 100644 --- a/plugin/trino-kudu/pom.xml +++ b/plugin/trino-kudu/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-mariadb/pom.xml b/plugin/trino-mariadb/pom.xml index 13978d1a7ae3b..b3f1e2a088b0e 100644 --- a/plugin/trino-mariadb/pom.xml +++ b/plugin/trino-mariadb/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-memory/pom.xml b/plugin/trino-memory/pom.xml index 2aa8d00520ee4..36b78ad5a29cc 100644 --- a/plugin/trino-memory/pom.xml +++ b/plugin/trino-memory/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-ml/pom.xml b/plugin/trino-ml/pom.xml index 010f830ecc51a..f7a3fb948b340 100644 --- a/plugin/trino-ml/pom.xml +++ b/plugin/trino-ml/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-mongodb/pom.xml b/plugin/trino-mongodb/pom.xml index 296acf390b444..a26b04797f8bc 100644 --- a/plugin/trino-mongodb/pom.xml +++ b/plugin/trino-mongodb/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-mysql-event-listener/pom.xml b/plugin/trino-mysql-event-listener/pom.xml index df9f1ce57f442..bd06e05858f6e 100644 --- a/plugin/trino-mysql-event-listener/pom.xml +++ b/plugin/trino-mysql-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-mysql/pom.xml b/plugin/trino-mysql/pom.xml index f65206f27fc31..54349993e01e9 100644 --- a/plugin/trino-mysql/pom.xml +++ b/plugin/trino-mysql/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-opa/pom.xml b/plugin/trino-opa/pom.xml index 5e1a2f90f720e..360665aeeff5e 100644 --- a/plugin/trino-opa/pom.xml +++ b/plugin/trino-opa/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-openlineage/pom.xml b/plugin/trino-openlineage/pom.xml index 14e5b3b7193fc..c75a8ceb43d61 100644 --- a/plugin/trino-openlineage/pom.xml +++ b/plugin/trino-openlineage/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-opensearch/pom.xml b/plugin/trino-opensearch/pom.xml index 3bf1476efe032..501ca2867a3fa 100644 --- a/plugin/trino-opensearch/pom.xml +++ b/plugin/trino-opensearch/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-oracle/pom.xml b/plugin/trino-oracle/pom.xml index 281a1685c9ee8..ecb2a8a407438 100644 --- a/plugin/trino-oracle/pom.xml +++ b/plugin/trino-oracle/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-password-authenticators/pom.xml b/plugin/trino-password-authenticators/pom.xml index 132c416c7d3dd..8ca6400be8669 100644 --- a/plugin/trino-password-authenticators/pom.xml +++ b/plugin/trino-password-authenticators/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-phoenix5/pom.xml b/plugin/trino-phoenix5/pom.xml index 8982c2efbf65e..bf16251ed6337 100644 --- a/plugin/trino-phoenix5/pom.xml +++ b/plugin/trino-phoenix5/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-pinot/pom.xml b/plugin/trino-pinot/pom.xml index 3cb2188b7aed1..b47b61521ca43 100755 --- a/plugin/trino-pinot/pom.xml +++ b/plugin/trino-pinot/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-postgresql/pom.xml b/plugin/trino-postgresql/pom.xml index 16815aedde1e6..4fdb5d68268bf 100644 --- a/plugin/trino-postgresql/pom.xml +++ b/plugin/trino-postgresql/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-prometheus/pom.xml b/plugin/trino-prometheus/pom.xml index f6dfd8b966ee1..b68df1d2d3731 100644 --- a/plugin/trino-prometheus/pom.xml +++ b/plugin/trino-prometheus/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-redis/pom.xml b/plugin/trino-redis/pom.xml index 153ab09f9d84d..58efa78bdc681 100644 --- a/plugin/trino-redis/pom.xml +++ b/plugin/trino-redis/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-redshift/pom.xml b/plugin/trino-redshift/pom.xml index 7feef70f7d0ce..ed0b6a08e68fe 100644 --- a/plugin/trino-redshift/pom.xml +++ b/plugin/trino-redshift/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-resource-group-managers/pom.xml b/plugin/trino-resource-group-managers/pom.xml index f44af60f1b384..4db338a2a771a 100644 --- a/plugin/trino-resource-group-managers/pom.xml +++ b/plugin/trino-resource-group-managers/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-session-property-managers/pom.xml b/plugin/trino-session-property-managers/pom.xml index 1531dad8c3b87..6813ef0137efc 100644 --- a/plugin/trino-session-property-managers/pom.xml +++ b/plugin/trino-session-property-managers/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-singlestore/pom.xml b/plugin/trino-singlestore/pom.xml index 4481f4ede795d..215d7f0f2f599 100644 --- a/plugin/trino-singlestore/pom.xml +++ b/plugin/trino-singlestore/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-snowflake/pom.xml b/plugin/trino-snowflake/pom.xml index 4de091faf0650..e03164ec5f8d8 100644 --- a/plugin/trino-snowflake/pom.xml +++ b/plugin/trino-snowflake/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-spooling-filesystem/pom.xml b/plugin/trino-spooling-filesystem/pom.xml index 2dc87c8c82242..ad17f87043c75 100644 --- a/plugin/trino-spooling-filesystem/pom.xml +++ b/plugin/trino-spooling-filesystem/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-sqlserver/pom.xml b/plugin/trino-sqlserver/pom.xml index 2f5bbe11f7af8..ea1193c419c94 100644 --- a/plugin/trino-sqlserver/pom.xml +++ b/plugin/trino-sqlserver/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-teradata-functions/pom.xml b/plugin/trino-teradata-functions/pom.xml index 457ed3e555427..2e2862b0bdb67 100644 --- a/plugin/trino-teradata-functions/pom.xml +++ b/plugin/trino-teradata-functions/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-thrift-api/pom.xml b/plugin/trino-thrift-api/pom.xml index 68ac36debef46..2da5cc20d37ee 100644 --- a/plugin/trino-thrift-api/pom.xml +++ b/plugin/trino-thrift-api/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-thrift-testing-server/pom.xml b/plugin/trino-thrift-testing-server/pom.xml index 34fb5758cfe19..c22fc21409a1e 100644 --- a/plugin/trino-thrift-testing-server/pom.xml +++ b/plugin/trino-thrift-testing-server/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-thrift/pom.xml b/plugin/trino-thrift/pom.xml index e803990bfb639..55e02bc199ad3 100644 --- a/plugin/trino-thrift/pom.xml +++ b/plugin/trino-thrift/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-tpcds/pom.xml b/plugin/trino-tpcds/pom.xml index fc905cdc69acf..074a11caf15cf 100644 --- a/plugin/trino-tpcds/pom.xml +++ b/plugin/trino-tpcds/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-tpch/pom.xml b/plugin/trino-tpch/pom.xml index c8948ee89162b..64173feb555e6 100644 --- a/plugin/trino-tpch/pom.xml +++ b/plugin/trino-tpch/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/plugin/trino-vertica/pom.xml b/plugin/trino-vertica/pom.xml index aeae3c3667f64..06074dcbb0d13 100644 --- a/plugin/trino-vertica/pom.xml +++ b/plugin/trino-vertica/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/pom.xml b/pom.xml index 99a4b4bffed77..4e81065443a14 100644 --- a/pom.xml +++ b/pom.xml @@ -10,7 +10,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 pom ${project.artifactId} @@ -139,14 +139,14 @@ scm:git:git://github.com/trinodb/trino.git scm:git:git@github.com:trinodb/trino.git - HEAD + 464 https://github.com/trinodb/trino 23 - 2024-10-23T17:52:24Z + 2024-10-30T23:26:09Z ERROR diff --git a/service/trino-proxy/pom.xml b/service/trino-proxy/pom.xml index ba20e050a7e2a..0f19f30b78085 100644 --- a/service/trino-proxy/pom.xml +++ b/service/trino-proxy/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/service/trino-verifier/pom.xml b/service/trino-verifier/pom.xml index a2bfb66a12378..2c2880b164d30 100644 --- a/service/trino-verifier/pom.xml +++ b/service/trino-verifier/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-benchmark-queries/pom.xml b/testing/trino-benchmark-queries/pom.xml index 0c024ab011381..1a90127bc911a 100644 --- a/testing/trino-benchmark-queries/pom.xml +++ b/testing/trino-benchmark-queries/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-benchto-benchmarks/pom.xml b/testing/trino-benchto-benchmarks/pom.xml index 8506c689c00bc..3d2e59df9b49d 100644 --- a/testing/trino-benchto-benchmarks/pom.xml +++ b/testing/trino-benchto-benchmarks/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-faulttolerant-tests/pom.xml b/testing/trino-faulttolerant-tests/pom.xml index 9327aa3762364..117c16d96b690 100644 --- a/testing/trino-faulttolerant-tests/pom.xml +++ b/testing/trino-faulttolerant-tests/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-plugin-reader/pom.xml b/testing/trino-plugin-reader/pom.xml index 88491a52f2536..e4cfbdfe436ad 100644 --- a/testing/trino-plugin-reader/pom.xml +++ b/testing/trino-plugin-reader/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-product-tests-groups/pom.xml b/testing/trino-product-tests-groups/pom.xml index ef53d78861923..907d6b17dc3b9 100644 --- a/testing/trino-product-tests-groups/pom.xml +++ b/testing/trino-product-tests-groups/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-product-tests-launcher/pom.xml b/testing/trino-product-tests-launcher/pom.xml index 74cd9be1e9898..a6439346440e9 100644 --- a/testing/trino-product-tests-launcher/pom.xml +++ b/testing/trino-product-tests-launcher/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-product-tests/pom.xml b/testing/trino-product-tests/pom.xml index 0adb535509a76..ec1ba7145707e 100644 --- a/testing/trino-product-tests/pom.xml +++ b/testing/trino-product-tests/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-server-dev/pom.xml b/testing/trino-server-dev/pom.xml index 6343dbace9fb7..c6fb0528852bb 100644 --- a/testing/trino-server-dev/pom.xml +++ b/testing/trino-server-dev/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-test-jdbc-compatibility-old-driver/pom.xml b/testing/trino-test-jdbc-compatibility-old-driver/pom.xml index 2a951846d8573..78c2a20392e3f 100644 --- a/testing/trino-test-jdbc-compatibility-old-driver/pom.xml +++ b/testing/trino-test-jdbc-compatibility-old-driver/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml @@ -14,7 +14,7 @@ - 464-SNAPSHOT + 464 diff --git a/testing/trino-test-jdbc-compatibility-old-server/pom.xml b/testing/trino-test-jdbc-compatibility-old-server/pom.xml index 6de83cb62be34..4398c2a49f1b8 100644 --- a/testing/trino-test-jdbc-compatibility-old-server/pom.xml +++ b/testing/trino-test-jdbc-compatibility-old-server/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-testing-containers/pom.xml b/testing/trino-testing-containers/pom.xml index 1f985d3733032..cae2b6b84e108 100644 --- a/testing/trino-testing-containers/pom.xml +++ b/testing/trino-testing-containers/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-testing-kafka/pom.xml b/testing/trino-testing-kafka/pom.xml index 813d5a56cfd9d..1c6fed292153b 100644 --- a/testing/trino-testing-kafka/pom.xml +++ b/testing/trino-testing-kafka/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-testing-resources/pom.xml b/testing/trino-testing-resources/pom.xml index b0fb2970a3538..2e2aaab9022a3 100644 --- a/testing/trino-testing-resources/pom.xml +++ b/testing/trino-testing-resources/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-testing-services/pom.xml b/testing/trino-testing-services/pom.xml index 52911eb8c1890..025d6e4890645 100644 --- a/testing/trino-testing-services/pom.xml +++ b/testing/trino-testing-services/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-testing/pom.xml b/testing/trino-testing/pom.xml index 421123fdce9b4..d6f23922d1149 100644 --- a/testing/trino-testing/pom.xml +++ b/testing/trino-testing/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml diff --git a/testing/trino-tests/pom.xml b/testing/trino-tests/pom.xml index 25df09ffa78ba..46e55e29d87ab 100644 --- a/testing/trino-tests/pom.xml +++ b/testing/trino-tests/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464-SNAPSHOT + 464 ../../pom.xml From fc08c12dd437412af5fde497bd82b67135380fd1 Mon Sep 17 00:00:00 2001 From: Martin Traverso Date: Wed, 30 Oct 2024 23:43:31 +0000 Subject: [PATCH 15/31] [maven-release-plugin] prepare for next development iteration --- client/trino-cli/pom.xml | 2 +- client/trino-client/pom.xml | 2 +- client/trino-jdbc/pom.xml | 2 +- core/trino-grammar/pom.xml | 2 +- core/trino-main/pom.xml | 2 +- core/trino-parser/pom.xml | 2 +- core/trino-server-main/pom.xml | 2 +- core/trino-server-rpm/pom.xml | 2 +- core/trino-server/pom.xml | 2 +- core/trino-spi/pom.xml | 2 +- core/trino-web-ui/pom.xml | 2 +- docs/pom.xml | 2 +- lib/trino-array/pom.xml | 2 +- lib/trino-cache/pom.xml | 2 +- lib/trino-filesystem-alluxio/pom.xml | 2 +- lib/trino-filesystem-azure/pom.xml | 2 +- lib/trino-filesystem-cache-alluxio/pom.xml | 2 +- lib/trino-filesystem-gcs/pom.xml | 2 +- lib/trino-filesystem-manager/pom.xml | 2 +- lib/trino-filesystem-s3/pom.xml | 2 +- lib/trino-filesystem/pom.xml | 2 +- lib/trino-geospatial-toolkit/pom.xml | 2 +- lib/trino-hdfs/pom.xml | 2 +- lib/trino-hive-formats/pom.xml | 2 +- lib/trino-matching/pom.xml | 2 +- lib/trino-memory-context/pom.xml | 2 +- lib/trino-metastore/pom.xml | 2 +- lib/trino-orc/pom.xml | 2 +- lib/trino-parquet/pom.xml | 2 +- lib/trino-plugin-toolkit/pom.xml | 2 +- lib/trino-record-decoder/pom.xml | 2 +- plugin/trino-base-jdbc/pom.xml | 2 +- plugin/trino-bigquery/pom.xml | 2 +- plugin/trino-blackhole/pom.xml | 2 +- plugin/trino-cassandra/pom.xml | 2 +- plugin/trino-clickhouse/pom.xml | 2 +- plugin/trino-delta-lake/pom.xml | 2 +- plugin/trino-druid/pom.xml | 2 +- plugin/trino-elasticsearch/pom.xml | 2 +- plugin/trino-example-http/pom.xml | 2 +- plugin/trino-example-jdbc/pom.xml | 2 +- plugin/trino-exasol/pom.xml | 2 +- plugin/trino-exchange-filesystem/pom.xml | 2 +- plugin/trino-exchange-hdfs/pom.xml | 2 +- plugin/trino-faker/pom.xml | 2 +- plugin/trino-geospatial/pom.xml | 2 +- plugin/trino-google-sheets/pom.xml | 2 +- plugin/trino-hive/pom.xml | 2 +- plugin/trino-http-event-listener/pom.xml | 2 +- plugin/trino-http-server-event-listener/pom.xml | 2 +- plugin/trino-hudi/pom.xml | 2 +- plugin/trino-iceberg/pom.xml | 2 +- plugin/trino-ignite/pom.xml | 2 +- plugin/trino-jmx/pom.xml | 2 +- plugin/trino-kafka-event-listener/pom.xml | 2 +- plugin/trino-kafka/pom.xml | 2 +- plugin/trino-kinesis/pom.xml | 2 +- plugin/trino-kudu/pom.xml | 2 +- plugin/trino-mariadb/pom.xml | 2 +- plugin/trino-memory/pom.xml | 2 +- plugin/trino-ml/pom.xml | 2 +- plugin/trino-mongodb/pom.xml | 2 +- plugin/trino-mysql-event-listener/pom.xml | 2 +- plugin/trino-mysql/pom.xml | 2 +- plugin/trino-opa/pom.xml | 2 +- plugin/trino-openlineage/pom.xml | 2 +- plugin/trino-opensearch/pom.xml | 2 +- plugin/trino-oracle/pom.xml | 2 +- plugin/trino-password-authenticators/pom.xml | 2 +- plugin/trino-phoenix5/pom.xml | 2 +- plugin/trino-pinot/pom.xml | 2 +- plugin/trino-postgresql/pom.xml | 2 +- plugin/trino-prometheus/pom.xml | 2 +- plugin/trino-redis/pom.xml | 2 +- plugin/trino-redshift/pom.xml | 2 +- plugin/trino-resource-group-managers/pom.xml | 2 +- plugin/trino-session-property-managers/pom.xml | 2 +- plugin/trino-singlestore/pom.xml | 2 +- plugin/trino-snowflake/pom.xml | 2 +- plugin/trino-spooling-filesystem/pom.xml | 2 +- plugin/trino-sqlserver/pom.xml | 2 +- plugin/trino-teradata-functions/pom.xml | 2 +- plugin/trino-thrift-api/pom.xml | 2 +- plugin/trino-thrift-testing-server/pom.xml | 2 +- plugin/trino-thrift/pom.xml | 2 +- plugin/trino-tpcds/pom.xml | 2 +- plugin/trino-tpch/pom.xml | 2 +- plugin/trino-vertica/pom.xml | 2 +- pom.xml | 6 +++--- service/trino-proxy/pom.xml | 2 +- service/trino-verifier/pom.xml | 2 +- testing/trino-benchmark-queries/pom.xml | 2 +- testing/trino-benchto-benchmarks/pom.xml | 2 +- testing/trino-faulttolerant-tests/pom.xml | 2 +- testing/trino-plugin-reader/pom.xml | 2 +- testing/trino-product-tests-groups/pom.xml | 2 +- testing/trino-product-tests-launcher/pom.xml | 2 +- testing/trino-product-tests/pom.xml | 2 +- testing/trino-server-dev/pom.xml | 2 +- testing/trino-test-jdbc-compatibility-old-driver/pom.xml | 4 ++-- testing/trino-test-jdbc-compatibility-old-server/pom.xml | 2 +- testing/trino-testing-containers/pom.xml | 2 +- testing/trino-testing-kafka/pom.xml | 2 +- testing/trino-testing-resources/pom.xml | 2 +- testing/trino-testing-services/pom.xml | 2 +- testing/trino-testing/pom.xml | 2 +- testing/trino-tests/pom.xml | 2 +- 107 files changed, 110 insertions(+), 110 deletions(-) diff --git a/client/trino-cli/pom.xml b/client/trino-cli/pom.xml index c92afe74f4b5c..dcf921a93e827 100644 --- a/client/trino-cli/pom.xml +++ b/client/trino-cli/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/client/trino-client/pom.xml b/client/trino-client/pom.xml index 041db4bc56a30..9d0c58312ef98 100644 --- a/client/trino-client/pom.xml +++ b/client/trino-client/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/client/trino-jdbc/pom.xml b/client/trino-jdbc/pom.xml index 31b350a76e91c..4990bf795b8e1 100644 --- a/client/trino-jdbc/pom.xml +++ b/client/trino-jdbc/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/core/trino-grammar/pom.xml b/core/trino-grammar/pom.xml index 110e80ab75cc9..6f6f9ea58cd76 100644 --- a/core/trino-grammar/pom.xml +++ b/core/trino-grammar/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/core/trino-main/pom.xml b/core/trino-main/pom.xml index b85418adb3557..e138ce7bf0fb7 100644 --- a/core/trino-main/pom.xml +++ b/core/trino-main/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/core/trino-parser/pom.xml b/core/trino-parser/pom.xml index bb07d42ea7e8b..21772f85347fb 100644 --- a/core/trino-parser/pom.xml +++ b/core/trino-parser/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/core/trino-server-main/pom.xml b/core/trino-server-main/pom.xml index 51f3befe9762c..f13efa03bd528 100644 --- a/core/trino-server-main/pom.xml +++ b/core/trino-server-main/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/core/trino-server-rpm/pom.xml b/core/trino-server-rpm/pom.xml index c3125902f8fa9..2bc5a24015082 100644 --- a/core/trino-server-rpm/pom.xml +++ b/core/trino-server-rpm/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/core/trino-server/pom.xml b/core/trino-server/pom.xml index 8b47855089927..21286cb009221 100644 --- a/core/trino-server/pom.xml +++ b/core/trino-server/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/core/trino-spi/pom.xml b/core/trino-spi/pom.xml index c940f99be4094..d70ec48600592 100644 --- a/core/trino-spi/pom.xml +++ b/core/trino-spi/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/core/trino-web-ui/pom.xml b/core/trino-web-ui/pom.xml index f2b883be0e8f1..ca183c838b804 100644 --- a/core/trino-web-ui/pom.xml +++ b/core/trino-web-ui/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/docs/pom.xml b/docs/pom.xml index 09e32e0732da9..b59b6929d6e18 100644 --- a/docs/pom.xml +++ b/docs/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT trino-docs diff --git a/lib/trino-array/pom.xml b/lib/trino-array/pom.xml index b3e0a7fb161b9..95b546f37eb7f 100644 --- a/lib/trino-array/pom.xml +++ b/lib/trino-array/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-cache/pom.xml b/lib/trino-cache/pom.xml index 245f7c5e13981..f9e2d0e8223c2 100644 --- a/lib/trino-cache/pom.xml +++ b/lib/trino-cache/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-alluxio/pom.xml b/lib/trino-filesystem-alluxio/pom.xml index bb0d2e355d8aa..5985edbf9e950 100644 --- a/lib/trino-filesystem-alluxio/pom.xml +++ b/lib/trino-filesystem-alluxio/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-azure/pom.xml b/lib/trino-filesystem-azure/pom.xml index 53214b74c0cfb..1a3c68827a884 100644 --- a/lib/trino-filesystem-azure/pom.xml +++ b/lib/trino-filesystem-azure/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-cache-alluxio/pom.xml b/lib/trino-filesystem-cache-alluxio/pom.xml index 3251e47e30e30..2db4b52c48642 100644 --- a/lib/trino-filesystem-cache-alluxio/pom.xml +++ b/lib/trino-filesystem-cache-alluxio/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-gcs/pom.xml b/lib/trino-filesystem-gcs/pom.xml index d77773f9780bc..3073c57e8fcf3 100644 --- a/lib/trino-filesystem-gcs/pom.xml +++ b/lib/trino-filesystem-gcs/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-manager/pom.xml b/lib/trino-filesystem-manager/pom.xml index 2e6bf4470f5a7..70203543cf332 100644 --- a/lib/trino-filesystem-manager/pom.xml +++ b/lib/trino-filesystem-manager/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem-s3/pom.xml b/lib/trino-filesystem-s3/pom.xml index 38e8530af3fd2..d0df6d3298ffb 100644 --- a/lib/trino-filesystem-s3/pom.xml +++ b/lib/trino-filesystem-s3/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-filesystem/pom.xml b/lib/trino-filesystem/pom.xml index f724a8d1c2760..f2861f8d76728 100644 --- a/lib/trino-filesystem/pom.xml +++ b/lib/trino-filesystem/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-geospatial-toolkit/pom.xml b/lib/trino-geospatial-toolkit/pom.xml index 8ecc852131148..7ca4345e61fab 100644 --- a/lib/trino-geospatial-toolkit/pom.xml +++ b/lib/trino-geospatial-toolkit/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-hdfs/pom.xml b/lib/trino-hdfs/pom.xml index 7791ff4d18501..d95a645ddc204 100644 --- a/lib/trino-hdfs/pom.xml +++ b/lib/trino-hdfs/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-hive-formats/pom.xml b/lib/trino-hive-formats/pom.xml index 8dd857e0d8ffe..6ebc299db7f53 100644 --- a/lib/trino-hive-formats/pom.xml +++ b/lib/trino-hive-formats/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-matching/pom.xml b/lib/trino-matching/pom.xml index 4d86232460fc2..ce4499c14fb78 100644 --- a/lib/trino-matching/pom.xml +++ b/lib/trino-matching/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-memory-context/pom.xml b/lib/trino-memory-context/pom.xml index 4094d4a43edda..08e2aefd2820a 100644 --- a/lib/trino-memory-context/pom.xml +++ b/lib/trino-memory-context/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-metastore/pom.xml b/lib/trino-metastore/pom.xml index 449623cdee6f6..67d1396ce1e15 100644 --- a/lib/trino-metastore/pom.xml +++ b/lib/trino-metastore/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-orc/pom.xml b/lib/trino-orc/pom.xml index 577c33f1d9753..ba6c0c915632b 100644 --- a/lib/trino-orc/pom.xml +++ b/lib/trino-orc/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-parquet/pom.xml b/lib/trino-parquet/pom.xml index 402cb7d01d3b3..d9360e3c7d2c5 100644 --- a/lib/trino-parquet/pom.xml +++ b/lib/trino-parquet/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-plugin-toolkit/pom.xml b/lib/trino-plugin-toolkit/pom.xml index c00a772295162..6791f0c3d55f9 100644 --- a/lib/trino-plugin-toolkit/pom.xml +++ b/lib/trino-plugin-toolkit/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/lib/trino-record-decoder/pom.xml b/lib/trino-record-decoder/pom.xml index 2d542a4e453df..10a8e27c1fce5 100644 --- a/lib/trino-record-decoder/pom.xml +++ b/lib/trino-record-decoder/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-base-jdbc/pom.xml b/plugin/trino-base-jdbc/pom.xml index e1040f8202fd3..67a1e84efb597 100644 --- a/plugin/trino-base-jdbc/pom.xml +++ b/plugin/trino-base-jdbc/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-bigquery/pom.xml b/plugin/trino-bigquery/pom.xml index aa4821c1d54b6..028ff5705b134 100644 --- a/plugin/trino-bigquery/pom.xml +++ b/plugin/trino-bigquery/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-blackhole/pom.xml b/plugin/trino-blackhole/pom.xml index 035efb3a887ee..09cb4595d0e2f 100644 --- a/plugin/trino-blackhole/pom.xml +++ b/plugin/trino-blackhole/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-cassandra/pom.xml b/plugin/trino-cassandra/pom.xml index 016c9bbc57258..ee86a07fa6d23 100644 --- a/plugin/trino-cassandra/pom.xml +++ b/plugin/trino-cassandra/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-clickhouse/pom.xml b/plugin/trino-clickhouse/pom.xml index d5f3f37cd87d9..e32f11d8fdc4b 100644 --- a/plugin/trino-clickhouse/pom.xml +++ b/plugin/trino-clickhouse/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-delta-lake/pom.xml b/plugin/trino-delta-lake/pom.xml index 8adc7b49dc968..4c531b6748087 100644 --- a/plugin/trino-delta-lake/pom.xml +++ b/plugin/trino-delta-lake/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-druid/pom.xml b/plugin/trino-druid/pom.xml index 5d80779281019..5d82b3de6599d 100644 --- a/plugin/trino-druid/pom.xml +++ b/plugin/trino-druid/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-elasticsearch/pom.xml b/plugin/trino-elasticsearch/pom.xml index 4a68ca1826dbc..3de15f310bf54 100644 --- a/plugin/trino-elasticsearch/pom.xml +++ b/plugin/trino-elasticsearch/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-example-http/pom.xml b/plugin/trino-example-http/pom.xml index c5eda57718c92..d14887e44bcff 100644 --- a/plugin/trino-example-http/pom.xml +++ b/plugin/trino-example-http/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-example-jdbc/pom.xml b/plugin/trino-example-jdbc/pom.xml index 950780923fb82..d2b2a72949c70 100644 --- a/plugin/trino-example-jdbc/pom.xml +++ b/plugin/trino-example-jdbc/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-exasol/pom.xml b/plugin/trino-exasol/pom.xml index 83abb5ceb3eed..641a869dd6b12 100644 --- a/plugin/trino-exasol/pom.xml +++ b/plugin/trino-exasol/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-exchange-filesystem/pom.xml b/plugin/trino-exchange-filesystem/pom.xml index 0fc433803b13f..d2772fbea3179 100644 --- a/plugin/trino-exchange-filesystem/pom.xml +++ b/plugin/trino-exchange-filesystem/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-exchange-hdfs/pom.xml b/plugin/trino-exchange-hdfs/pom.xml index 35babfcedc630..b89d064c5fe15 100644 --- a/plugin/trino-exchange-hdfs/pom.xml +++ b/plugin/trino-exchange-hdfs/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-faker/pom.xml b/plugin/trino-faker/pom.xml index 1d2af17c5b136..54bfc2a11f1b3 100644 --- a/plugin/trino-faker/pom.xml +++ b/plugin/trino-faker/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-geospatial/pom.xml b/plugin/trino-geospatial/pom.xml index d386b3ea3b193..be75cf00f9be1 100644 --- a/plugin/trino-geospatial/pom.xml +++ b/plugin/trino-geospatial/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-google-sheets/pom.xml b/plugin/trino-google-sheets/pom.xml index 6b649aae7b5de..44c162b2fee52 100644 --- a/plugin/trino-google-sheets/pom.xml +++ b/plugin/trino-google-sheets/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-hive/pom.xml b/plugin/trino-hive/pom.xml index ad7c19551c724..bb09a21b9606e 100644 --- a/plugin/trino-hive/pom.xml +++ b/plugin/trino-hive/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-http-event-listener/pom.xml b/plugin/trino-http-event-listener/pom.xml index 0d13f868e1b60..9dfa7c093dee0 100644 --- a/plugin/trino-http-event-listener/pom.xml +++ b/plugin/trino-http-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-http-server-event-listener/pom.xml b/plugin/trino-http-server-event-listener/pom.xml index 3cfad9b2a0f40..c1bbf5efc3808 100644 --- a/plugin/trino-http-server-event-listener/pom.xml +++ b/plugin/trino-http-server-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-hudi/pom.xml b/plugin/trino-hudi/pom.xml index c93be563a5fff..b2097c4829195 100644 --- a/plugin/trino-hudi/pom.xml +++ b/plugin/trino-hudi/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-iceberg/pom.xml b/plugin/trino-iceberg/pom.xml index d19b33f41ad1c..e7fe1929aa405 100644 --- a/plugin/trino-iceberg/pom.xml +++ b/plugin/trino-iceberg/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-ignite/pom.xml b/plugin/trino-ignite/pom.xml index 12667df758522..61a1a7c1b5836 100644 --- a/plugin/trino-ignite/pom.xml +++ b/plugin/trino-ignite/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-jmx/pom.xml b/plugin/trino-jmx/pom.xml index b8fdc777448af..ca8fbf329c93a 100644 --- a/plugin/trino-jmx/pom.xml +++ b/plugin/trino-jmx/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-kafka-event-listener/pom.xml b/plugin/trino-kafka-event-listener/pom.xml index a98ccba0fb351..5edec5ce33d69 100644 --- a/plugin/trino-kafka-event-listener/pom.xml +++ b/plugin/trino-kafka-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-kafka/pom.xml b/plugin/trino-kafka/pom.xml index 05574a51bce0f..e96389a0bb6ee 100644 --- a/plugin/trino-kafka/pom.xml +++ b/plugin/trino-kafka/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-kinesis/pom.xml b/plugin/trino-kinesis/pom.xml index 222976bdfa19f..cdecaf1a8a73d 100644 --- a/plugin/trino-kinesis/pom.xml +++ b/plugin/trino-kinesis/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-kudu/pom.xml b/plugin/trino-kudu/pom.xml index 1eb4e2b0bdd6f..9ae57d50b69ca 100644 --- a/plugin/trino-kudu/pom.xml +++ b/plugin/trino-kudu/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-mariadb/pom.xml b/plugin/trino-mariadb/pom.xml index b3f1e2a088b0e..fb7d2faf66aac 100644 --- a/plugin/trino-mariadb/pom.xml +++ b/plugin/trino-mariadb/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-memory/pom.xml b/plugin/trino-memory/pom.xml index 36b78ad5a29cc..6e6801b28a088 100644 --- a/plugin/trino-memory/pom.xml +++ b/plugin/trino-memory/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-ml/pom.xml b/plugin/trino-ml/pom.xml index f7a3fb948b340..952a3554fab22 100644 --- a/plugin/trino-ml/pom.xml +++ b/plugin/trino-ml/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-mongodb/pom.xml b/plugin/trino-mongodb/pom.xml index a26b04797f8bc..85a0c0e8d404c 100644 --- a/plugin/trino-mongodb/pom.xml +++ b/plugin/trino-mongodb/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-mysql-event-listener/pom.xml b/plugin/trino-mysql-event-listener/pom.xml index bd06e05858f6e..4e509f0e3c470 100644 --- a/plugin/trino-mysql-event-listener/pom.xml +++ b/plugin/trino-mysql-event-listener/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-mysql/pom.xml b/plugin/trino-mysql/pom.xml index 54349993e01e9..51ff6ed8e6e83 100644 --- a/plugin/trino-mysql/pom.xml +++ b/plugin/trino-mysql/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-opa/pom.xml b/plugin/trino-opa/pom.xml index 360665aeeff5e..0c9f7dc226c72 100644 --- a/plugin/trino-opa/pom.xml +++ b/plugin/trino-opa/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-openlineage/pom.xml b/plugin/trino-openlineage/pom.xml index c75a8ceb43d61..e84bd6ece7bfd 100644 --- a/plugin/trino-openlineage/pom.xml +++ b/plugin/trino-openlineage/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-opensearch/pom.xml b/plugin/trino-opensearch/pom.xml index 501ca2867a3fa..ac17934fbda17 100644 --- a/plugin/trino-opensearch/pom.xml +++ b/plugin/trino-opensearch/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-oracle/pom.xml b/plugin/trino-oracle/pom.xml index ecb2a8a407438..4d4d11cccb4ac 100644 --- a/plugin/trino-oracle/pom.xml +++ b/plugin/trino-oracle/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-password-authenticators/pom.xml b/plugin/trino-password-authenticators/pom.xml index 8ca6400be8669..c3178345e7d18 100644 --- a/plugin/trino-password-authenticators/pom.xml +++ b/plugin/trino-password-authenticators/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-phoenix5/pom.xml b/plugin/trino-phoenix5/pom.xml index bf16251ed6337..bff2e6770b60f 100644 --- a/plugin/trino-phoenix5/pom.xml +++ b/plugin/trino-phoenix5/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-pinot/pom.xml b/plugin/trino-pinot/pom.xml index b47b61521ca43..78d24d25f5ebf 100755 --- a/plugin/trino-pinot/pom.xml +++ b/plugin/trino-pinot/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-postgresql/pom.xml b/plugin/trino-postgresql/pom.xml index 4fdb5d68268bf..c9e28325364a9 100644 --- a/plugin/trino-postgresql/pom.xml +++ b/plugin/trino-postgresql/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-prometheus/pom.xml b/plugin/trino-prometheus/pom.xml index b68df1d2d3731..0be033221d65a 100644 --- a/plugin/trino-prometheus/pom.xml +++ b/plugin/trino-prometheus/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-redis/pom.xml b/plugin/trino-redis/pom.xml index 58efa78bdc681..27916f7d3313b 100644 --- a/plugin/trino-redis/pom.xml +++ b/plugin/trino-redis/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-redshift/pom.xml b/plugin/trino-redshift/pom.xml index ed0b6a08e68fe..674d8ff07a662 100644 --- a/plugin/trino-redshift/pom.xml +++ b/plugin/trino-redshift/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-resource-group-managers/pom.xml b/plugin/trino-resource-group-managers/pom.xml index 4db338a2a771a..848e5890d633d 100644 --- a/plugin/trino-resource-group-managers/pom.xml +++ b/plugin/trino-resource-group-managers/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-session-property-managers/pom.xml b/plugin/trino-session-property-managers/pom.xml index 6813ef0137efc..83f8e6bd64daa 100644 --- a/plugin/trino-session-property-managers/pom.xml +++ b/plugin/trino-session-property-managers/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-singlestore/pom.xml b/plugin/trino-singlestore/pom.xml index 215d7f0f2f599..893e5dc3c221a 100644 --- a/plugin/trino-singlestore/pom.xml +++ b/plugin/trino-singlestore/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-snowflake/pom.xml b/plugin/trino-snowflake/pom.xml index e03164ec5f8d8..15229cc2e57e7 100644 --- a/plugin/trino-snowflake/pom.xml +++ b/plugin/trino-snowflake/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-spooling-filesystem/pom.xml b/plugin/trino-spooling-filesystem/pom.xml index ad17f87043c75..c33efc35ed764 100644 --- a/plugin/trino-spooling-filesystem/pom.xml +++ b/plugin/trino-spooling-filesystem/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-sqlserver/pom.xml b/plugin/trino-sqlserver/pom.xml index ea1193c419c94..3b4878f19366a 100644 --- a/plugin/trino-sqlserver/pom.xml +++ b/plugin/trino-sqlserver/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-teradata-functions/pom.xml b/plugin/trino-teradata-functions/pom.xml index 2e2862b0bdb67..9e4434c113a75 100644 --- a/plugin/trino-teradata-functions/pom.xml +++ b/plugin/trino-teradata-functions/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-thrift-api/pom.xml b/plugin/trino-thrift-api/pom.xml index 2da5cc20d37ee..6b0e03fd49147 100644 --- a/plugin/trino-thrift-api/pom.xml +++ b/plugin/trino-thrift-api/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-thrift-testing-server/pom.xml b/plugin/trino-thrift-testing-server/pom.xml index c22fc21409a1e..3556a49286130 100644 --- a/plugin/trino-thrift-testing-server/pom.xml +++ b/plugin/trino-thrift-testing-server/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-thrift/pom.xml b/plugin/trino-thrift/pom.xml index 55e02bc199ad3..743043db1356b 100644 --- a/plugin/trino-thrift/pom.xml +++ b/plugin/trino-thrift/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-tpcds/pom.xml b/plugin/trino-tpcds/pom.xml index 074a11caf15cf..b1e0b30b79512 100644 --- a/plugin/trino-tpcds/pom.xml +++ b/plugin/trino-tpcds/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-tpch/pom.xml b/plugin/trino-tpch/pom.xml index 64173feb555e6..dc5ab3029149a 100644 --- a/plugin/trino-tpch/pom.xml +++ b/plugin/trino-tpch/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/plugin/trino-vertica/pom.xml b/plugin/trino-vertica/pom.xml index 06074dcbb0d13..013c8fb95d43f 100644 --- a/plugin/trino-vertica/pom.xml +++ b/plugin/trino-vertica/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/pom.xml b/pom.xml index 4e81065443a14..3ec87bdb53b79 100644 --- a/pom.xml +++ b/pom.xml @@ -10,7 +10,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT pom ${project.artifactId} @@ -139,14 +139,14 @@ scm:git:git://github.com/trinodb/trino.git scm:git:git@github.com:trinodb/trino.git - 464 + HEAD https://github.com/trinodb/trino 23 - 2024-10-30T23:26:09Z + 2024-10-30T23:43:31Z ERROR diff --git a/service/trino-proxy/pom.xml b/service/trino-proxy/pom.xml index 0f19f30b78085..c91c284ffdc81 100644 --- a/service/trino-proxy/pom.xml +++ b/service/trino-proxy/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/service/trino-verifier/pom.xml b/service/trino-verifier/pom.xml index 2c2880b164d30..5a9f801817b88 100644 --- a/service/trino-verifier/pom.xml +++ b/service/trino-verifier/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-benchmark-queries/pom.xml b/testing/trino-benchmark-queries/pom.xml index 1a90127bc911a..0899f49c382a3 100644 --- a/testing/trino-benchmark-queries/pom.xml +++ b/testing/trino-benchmark-queries/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-benchto-benchmarks/pom.xml b/testing/trino-benchto-benchmarks/pom.xml index 3d2e59df9b49d..9c456675d22f4 100644 --- a/testing/trino-benchto-benchmarks/pom.xml +++ b/testing/trino-benchto-benchmarks/pom.xml @@ -4,7 +4,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-faulttolerant-tests/pom.xml b/testing/trino-faulttolerant-tests/pom.xml index 117c16d96b690..b17540be8734b 100644 --- a/testing/trino-faulttolerant-tests/pom.xml +++ b/testing/trino-faulttolerant-tests/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-plugin-reader/pom.xml b/testing/trino-plugin-reader/pom.xml index e4cfbdfe436ad..78d37d255c543 100644 --- a/testing/trino-plugin-reader/pom.xml +++ b/testing/trino-plugin-reader/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-product-tests-groups/pom.xml b/testing/trino-product-tests-groups/pom.xml index 907d6b17dc3b9..f086b16675e15 100644 --- a/testing/trino-product-tests-groups/pom.xml +++ b/testing/trino-product-tests-groups/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-product-tests-launcher/pom.xml b/testing/trino-product-tests-launcher/pom.xml index a6439346440e9..42277c4836fda 100644 --- a/testing/trino-product-tests-launcher/pom.xml +++ b/testing/trino-product-tests-launcher/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-product-tests/pom.xml b/testing/trino-product-tests/pom.xml index ec1ba7145707e..a708c74ce7e1a 100644 --- a/testing/trino-product-tests/pom.xml +++ b/testing/trino-product-tests/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-server-dev/pom.xml b/testing/trino-server-dev/pom.xml index c6fb0528852bb..30a5d6ce28d55 100644 --- a/testing/trino-server-dev/pom.xml +++ b/testing/trino-server-dev/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-test-jdbc-compatibility-old-driver/pom.xml b/testing/trino-test-jdbc-compatibility-old-driver/pom.xml index 78c2a20392e3f..7c799af26ba31 100644 --- a/testing/trino-test-jdbc-compatibility-old-driver/pom.xml +++ b/testing/trino-test-jdbc-compatibility-old-driver/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml @@ -14,7 +14,7 @@ - 464 + 465-SNAPSHOT diff --git a/testing/trino-test-jdbc-compatibility-old-server/pom.xml b/testing/trino-test-jdbc-compatibility-old-server/pom.xml index 4398c2a49f1b8..c940a50d79f59 100644 --- a/testing/trino-test-jdbc-compatibility-old-server/pom.xml +++ b/testing/trino-test-jdbc-compatibility-old-server/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing-containers/pom.xml b/testing/trino-testing-containers/pom.xml index cae2b6b84e108..815aaf81d27e4 100644 --- a/testing/trino-testing-containers/pom.xml +++ b/testing/trino-testing-containers/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing-kafka/pom.xml b/testing/trino-testing-kafka/pom.xml index 1c6fed292153b..763adbb975b49 100644 --- a/testing/trino-testing-kafka/pom.xml +++ b/testing/trino-testing-kafka/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing-resources/pom.xml b/testing/trino-testing-resources/pom.xml index 2e2aaab9022a3..3eafe5541ab31 100644 --- a/testing/trino-testing-resources/pom.xml +++ b/testing/trino-testing-resources/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing-services/pom.xml b/testing/trino-testing-services/pom.xml index 025d6e4890645..5902a321cb15e 100644 --- a/testing/trino-testing-services/pom.xml +++ b/testing/trino-testing-services/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-testing/pom.xml b/testing/trino-testing/pom.xml index d6f23922d1149..f363e5a09c004 100644 --- a/testing/trino-testing/pom.xml +++ b/testing/trino-testing/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml diff --git a/testing/trino-tests/pom.xml b/testing/trino-tests/pom.xml index 46e55e29d87ab..ce84a935b24d8 100644 --- a/testing/trino-tests/pom.xml +++ b/testing/trino-tests/pom.xml @@ -5,7 +5,7 @@ io.trino trino-root - 464 + 465-SNAPSHOT ../../pom.xml From eb46a88a04916f96135defb809836632c122d22c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Wa=C5=9B?= Date: Wed, 9 Oct 2024 13:13:35 +0200 Subject: [PATCH 16/31] Fix redirecting from some Delta Lake tables Don't attempt to read Delta Lake table columns when performing table redirect to a different catalog. This allows redirecting from incomplete or invalid Delta Lake tables. --- ...redGlueMetastoreWithTableRedirections.java | 76 +++++++++++++++++++ .../hive/metastore/glue/GlueConverter.java | 7 +- .../metastore/glue/TestGlueConverter.java | 7 +- 3 files changed, 83 insertions(+), 7 deletions(-) diff --git a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeSharedGlueMetastoreWithTableRedirections.java b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeSharedGlueMetastoreWithTableRedirections.java index 317956370506a..0e9e43c6b1eeb 100644 --- a/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeSharedGlueMetastoreWithTableRedirections.java +++ b/plugin/trino-delta-lake/src/test/java/io/trino/plugin/deltalake/TestDeltaLakeSharedGlueMetastoreWithTableRedirections.java @@ -20,13 +20,25 @@ import io.trino.testing.DistributedQueryRunner; import io.trino.testing.QueryRunner; import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.Column; +import software.amazon.awssdk.services.glue.model.CreateTableRequest; +import software.amazon.awssdk.services.glue.model.DeleteTableRequest; +import software.amazon.awssdk.services.glue.model.GetTableRequest; +import software.amazon.awssdk.services.glue.model.GetTableResponse; +import software.amazon.awssdk.services.glue.model.SerDeInfo; +import software.amazon.awssdk.services.glue.model.StorageDescriptor; +import software.amazon.awssdk.services.glue.model.TableInput; import java.nio.file.Path; +import java.util.Map; import static io.trino.plugin.hive.metastore.glue.TestingGlueHiveMetastore.createTestingGlueHiveMetastore; import static io.trino.testing.TestingSession.testSessionBuilder; import static java.lang.String.format; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.TestInstance.Lifecycle.PER_CLASS; /** @@ -108,4 +120,68 @@ protected String getExpectedDeltaLakeCreateSchema(String catalogName) ")"; return format(expectedDeltaLakeCreateSchema, catalogName, schema, dataDirectory.toUri()); } + + @Test + public void testUnsupportedHiveTypeRedirect() + { + String tableName = "unsupported_types"; + // Use another complete table location so `SHOW CREATE TABLE` doesn't fail on reading metadata + String location; + try (GlueClient glueClient = GlueClient.create()) { + GetTableResponse existingTable = glueClient.getTable(GetTableRequest.builder() + .databaseName(schema) + .name("delta_table") + .build()); + location = existingTable.table().storageDescriptor().location(); + } + // Create a table directly in Glue, simulating an external table being created in Spark, + // with a custom AWS data type not mapped to HiveType when + Column timestampColumn = Column.builder() + .name("last_hour_load") + .type("timestamp_ntz") + .build(); + StorageDescriptor sd = StorageDescriptor.builder() + .columns(timestampColumn) + .location(location) + .inputFormat("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe") + .outputFormat("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat") + .serdeInfo(SerDeInfo.builder() + .serializationLibrary("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe") + .parameters(Map.of( + "serialization.format", "1", + "path", location)) + .build()) + .build(); + TableInput tableInput = TableInput.builder() + .name(tableName) + .storageDescriptor(sd) + .parameters(Map.of( + "spark.sql.sources.provider", "delta")) + .tableType("EXTERNAL_TABLE") + .partitionKeys(timestampColumn) + .build(); + + CreateTableRequest createTableRequest = CreateTableRequest.builder() + .databaseName(schema) + .tableInput(tableInput) + .build(); + try (GlueClient glueClient = GlueClient.create()) { + glueClient.createTable(createTableRequest); + + String tableDefinition = (String) computeScalar("SHOW CREATE TABLE hive_with_redirections." + schema + "." + tableName); + String expected = """ + CREATE TABLE delta_with_redirections.%s.%s ( + a_varchar varchar + ) + WITH ( + location = '%s' + )"""; + assertThat(tableDefinition).isEqualTo(expected.formatted(schema, tableName, location)); + + glueClient.deleteTable(DeleteTableRequest.builder() + .databaseName(schema) + .name(tableInput.name()) + .build()); + } + } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueConverter.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueConverter.java index 227da0312c718..e36dacc6c04a2 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueConverter.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueConverter.java @@ -164,7 +164,7 @@ public static Table fromGlueTable(software.amazon.awssdk.services.glue.model.Tab storage = FAKE_PARQUET_STORAGE; } else if (isIcebergTable(tableParameters)) { - // todo: any reason to not do this for delta and trino mv? + // todo: any reason to not do this for trino mv? if (sd.columns() == null) { dataColumns = ImmutableList.of(FAKE_COLUMN); } @@ -174,6 +174,11 @@ else if (isIcebergTable(tableParameters)) { partitionColumns = ImmutableList.of(); storage = FAKE_PARQUET_STORAGE; } + else if (isDeltaLakeTable(tableParameters)) { + dataColumns = ImmutableList.of(FAKE_COLUMN); + partitionColumns = ImmutableList.of(); + storage = fromGlueStorage(sd, databaseName + "." + glueTable.name()); + } else { boolean isCsv = sd.serdeInfo() != null && HiveStorageFormat.CSV.getSerde().equals(sd.serdeInfo().serializationLibrary()); dataColumns = fromGlueColumns(sd.columns(), ColumnType.DATA, isCsv); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java index ee350bd9adb6e..261e10fcc1591 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java @@ -41,7 +41,6 @@ import java.util.Optional; import java.util.Random; -import static com.google.common.collect.ImmutableSet.toImmutableSet; import static io.trino.metastore.HiveType.HIVE_STRING; import static io.trino.metastore.Table.TABLE_COMMENT; import static io.trino.metastore.TableInfo.ICEBERG_MATERIALIZED_VIEW_COMMENT; @@ -403,11 +402,7 @@ void testDeltaTableNonNullStorageDescriptor() .build(); assertThat(table.storageDescriptor()).isNotNull(); io.trino.metastore.Table trinoTable = GlueConverter.fromGlueTable(table, table.databaseName()); - assertThat(trinoTable.getDataColumns().stream() - .map(Column::getName) - .collect(toImmutableSet())).isEqualTo(glueTable.storageDescriptor().columns().stream() - .map(software.amazon.awssdk.services.glue.model.Column::name) - .collect(toImmutableSet())); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test From 21010fac441f8f44e4a6387ecc07016a29b228ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Wa=C5=9B?= Date: Fri, 25 Oct 2024 09:41:39 +0200 Subject: [PATCH 17/31] Simplify size asserts in TestGlueConverter --- .../hive/metastore/glue/TestGlueConverter.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java index 261e10fcc1591..4eda8e5522545 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueConverter.java @@ -219,7 +219,7 @@ void testToGlueFunctionInput() .build(); LanguageFunction actual = GlueConverter.fromGlueFunction(function); - assertThat(input.resourceUris().size()).isEqualTo(3); + assertThat(input.resourceUris()).hasSize(3); assertThat(actual).isEqualTo(expected); // verify that the owner comes from the metastore @@ -281,7 +281,7 @@ void testConvertTableWithOpenCSVSerDe() assertThat(trinoTable.getTableType()).isEqualTo(glueTable.tableType()); assertThat(trinoTable.getOwner().orElse(null)).isEqualTo(glueTable.owner()); assertThat(trinoTable.getParameters()).isEqualTo(glueTable.parameters()); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); assertThat(trinoTable.getDataColumns().getFirst().getType()).isEqualTo(HIVE_STRING); assertColumnList(glueTable.partitionKeys(), trinoTable.getPartitionColumns()); @@ -369,7 +369,7 @@ void testIcebergTableNullStorageDescriptor() .storageDescriptor((StorageDescriptor) null) .build(); io.trino.metastore.Table trinoTable = GlueConverter.fromGlueTable(table, table.databaseName()); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test @@ -380,7 +380,7 @@ void testIcebergTableNonNullStorageDescriptor() .build(); assertThat(table.storageDescriptor()).isNotNull(); io.trino.metastore.Table trinoTable = GlueConverter.fromGlueTable(table, table.databaseName()); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test @@ -391,7 +391,7 @@ void testDeltaTableNullStorageDescriptor() .storageDescriptor((StorageDescriptor) null) .build(); io.trino.metastore.Table trinoTable = GlueConverter.fromGlueTable(table, table.databaseName()); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test @@ -410,7 +410,7 @@ public void testIcebergMaterializedViewNullStorageDescriptor() { assertThat(glueMaterializedView.storageDescriptor()).isNull(); Table trinoTable = GlueConverter.fromGlueTable(glueMaterializedView, glueMaterializedView.databaseName()); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test @@ -428,7 +428,7 @@ private static void assertColumnList(List Date: Mon, 28 Oct 2024 11:15:27 +0100 Subject: [PATCH 18/31] Fix redirecting from some Delta Lake tables for Glue V1 Don't attempt to read Delta Lake table columns when performing table redirect to a different catalog. This allows redirecting from incomplete or invalid Delta Lake tables. --- .../glue/v1/converter/GlueToTrinoConverter.java | 16 ++++++++++++++-- .../glue/v1/TestGlueToTrinoConverter.java | 7 +------ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/converter/GlueToTrinoConverter.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/converter/GlueToTrinoConverter.java index 549ca17beb369..53bed07ae4935 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/converter/GlueToTrinoConverter.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/converter/GlueToTrinoConverter.java @@ -153,14 +153,26 @@ public static Table convertTable(com.amazonaws.services.glue.model.Table glueTab Optional storageDescriptor = getStorageDescriptor(glueTable); if (isIcebergTable(tableParameters) || - (storageDescriptor.isEmpty() && isDeltaLakeTable(tableParameters)) || (storageDescriptor.isEmpty() && isTrinoMaterializedView(tableType, tableParameters))) { // Iceberg tables do not need to read the StorageDescriptor field, but we still need to return dummy properties for compatibility - // Delta Lake tables only need to provide a dummy properties if a StorageDescriptor was not explicitly configured. // Materialized views do not need to read the StorageDescriptor, but we still need to return dummy properties for compatibility tableBuilder.setDataColumns(ImmutableList.of(new Column("dummy", HIVE_INT, Optional.empty(), ImmutableMap.of()))); tableBuilder.getStorageBuilder().setStorageFormat(HiveStorageFormat.PARQUET.toStorageFormat()); } + else if (isDeltaLakeTable(tableParameters)) { + tableBuilder.setDataColumns(ImmutableList.of(new Column("dummy", HIVE_INT, Optional.empty(), ImmutableMap.of()))); + tableBuilder.setPartitionColumns(ImmutableList.of()); + if (storageDescriptor.isEmpty()) { + tableBuilder.getStorageBuilder().setStorageFormat(HiveStorageFormat.PARQUET.toStorageFormat()); + } + else { + StorageDescriptor sd = storageDescriptor.get(); + if (sd.getSerdeInfo() == null) { + throw new TrinoException(HIVE_UNSUPPORTED_FORMAT, "Table SerdeInfo is null for table '%s' %s".formatted(table, glueTable)); + } + new StorageConverter().setStorageBuilder(sd, tableBuilder.getStorageBuilder()); + } + } else { if (storageDescriptor.isEmpty()) { throw new TrinoException(HIVE_UNSUPPORTED_FORMAT, "Table StorageDescriptor is null for table '%s' %s".formatted(table, glueTable)); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueToTrinoConverter.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueToTrinoConverter.java index 345e11007fb18..4f6127041d25d 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueToTrinoConverter.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueToTrinoConverter.java @@ -35,7 +35,6 @@ import java.util.Optional; import static com.amazonaws.util.CollectionUtils.isNullOrEmpty; -import static com.google.common.collect.ImmutableSet.toImmutableSet; import static io.trino.metastore.HiveType.HIVE_STRING; import static io.trino.plugin.hive.TableType.EXTERNAL_TABLE; import static io.trino.plugin.hive.metastore.glue.v1.TestingMetastoreObjects.getGlueTestColumn; @@ -251,11 +250,7 @@ public void testDeltaTableNonNullStorageDescriptor() testTable.setParameters(ImmutableMap.of(SPARK_TABLE_PROVIDER_KEY, DELTA_LAKE_PROVIDER)); assertThat(getStorageDescriptor(testTable)).isPresent(); io.trino.metastore.Table trinoTable = GlueToTrinoConverter.convertTable(testTable, testDatabase.getName()); - assertThat(trinoTable.getDataColumns().stream() - .map(Column::getName) - .collect(toImmutableSet())).isEqualTo(getStorageDescriptor(testTable).orElseThrow().getColumns().stream() - .map(com.amazonaws.services.glue.model.Column::getName) - .collect(toImmutableSet())); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test From 6763e986a292cc71e4e85eada37d4f0350d2c2a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Wa=C5=9B?= Date: Mon, 28 Oct 2024 11:16:46 +0100 Subject: [PATCH 19/31] Simplify size asserts in TestGlueToTrinoConverter --- .../metastore/glue/v1/TestGlueToTrinoConverter.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueToTrinoConverter.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueToTrinoConverter.java index 4f6127041d25d..e7231d6ee9287 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueToTrinoConverter.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueToTrinoConverter.java @@ -118,7 +118,7 @@ public void testConvertTableWithOpenCSVSerDe() assertThat(trinoTable.getTableType()).isEqualTo(getTableTypeNullable(glueTable)); assertThat(trinoTable.getOwner().orElse(null)).isEqualTo(glueTable.getOwner()); assertThat(trinoTable.getParameters()).isEqualTo(getTableParameters(glueTable)); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); assertThat(trinoTable.getDataColumns().get(0).getType()).isEqualTo(HIVE_STRING); assertColumnList(trinoTable.getPartitionColumns(), glueTable.getPartitionKeys()); @@ -223,7 +223,7 @@ public void testIcebergTableNullStorageDescriptor() testTable.setParameters(ImmutableMap.of(ICEBERG_TABLE_TYPE_NAME, ICEBERG_TABLE_TYPE_VALUE)); testTable.setStorageDescriptor(null); io.trino.metastore.Table trinoTable = GlueToTrinoConverter.convertTable(testTable, testDatabase.getName()); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test @@ -232,7 +232,7 @@ public void testIcebergTableNonNullStorageDescriptor() testTable.setParameters(ImmutableMap.of(ICEBERG_TABLE_TYPE_NAME, ICEBERG_TABLE_TYPE_VALUE)); assertThat(getStorageDescriptor(testTable)).isPresent(); io.trino.metastore.Table trinoTable = GlueToTrinoConverter.convertTable(testTable, testDatabase.getName()); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test @@ -241,7 +241,7 @@ public void testDeltaTableNullStorageDescriptor() testTable.setParameters(ImmutableMap.of(SPARK_TABLE_PROVIDER_KEY, DELTA_LAKE_PROVIDER)); testTable.setStorageDescriptor(null); io.trino.metastore.Table trinoTable = GlueToTrinoConverter.convertTable(testTable, testDatabase.getName()); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test @@ -259,7 +259,7 @@ public void testIcebergMaterializedViewNullStorageDescriptor() Table testMaterializedView = getGlueTestTrinoMaterializedView(testDatabase.getName()); assertThat(getStorageDescriptor(testMaterializedView)).isEmpty(); io.trino.metastore.Table trinoTable = GlueToTrinoConverter.convertTable(testMaterializedView, testDatabase.getName()); - assertThat(trinoTable.getDataColumns().size()).isEqualTo(1); + assertThat(trinoTable.getDataColumns()).hasSize(1); } @Test @@ -274,7 +274,7 @@ private static void assertColumnList(List actual, List Date: Thu, 17 Oct 2024 10:26:49 +0200 Subject: [PATCH 20/31] Add hive.metastore.glue.skip-archive config option When set to true, updating tables in Glue does not create an archived version of the table. Glue tables are updated during INSERT operations, so this option can be used to avoid reaching the limit of table versions, when executing a large number of such operations. --- .../main/sphinx/object-storage/metastores.md | 6 ++ plugin/trino-hive/pom.xml | 2 + .../glue/GlueHiveExecutionInterceptor.java | 40 +++++++++ .../glue/GlueHiveMetastoreConfig.java | 14 +++ .../metastore/glue/GlueMetastoreModule.java | 1 + .../glue/TestGlueHiveMetastoreConfig.java | 7 +- .../TestGlueHiveMetastoreSkipArchive.java | 85 +++++++++++++++++++ 7 files changed, 153 insertions(+), 2 deletions(-) create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveExecutionInterceptor.java create mode 100644 plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueHiveMetastoreSkipArchive.java diff --git a/docs/src/main/sphinx/object-storage/metastores.md b/docs/src/main/sphinx/object-storage/metastores.md index 476052f1ad3fd..f86f2cb54b21b 100644 --- a/docs/src/main/sphinx/object-storage/metastores.md +++ b/docs/src/main/sphinx/object-storage/metastores.md @@ -414,6 +414,12 @@ properties: * - `hive.metastore.glue.partitions-segments` - Number of segments for partitioned Glue tables. - `5` +* - `hive.metastore.glue.skip-archive` + - AWS Glue has the ability to archive older table versions and a user can + roll back the table to any historical version if needed. By default, the + Hive Connector backed by Glue will not skip the archival of older table + versions. + - `false` ::: (iceberg-glue-catalog)= diff --git a/plugin/trino-hive/pom.xml b/plugin/trino-hive/pom.xml index bb09a21b9606e..ec589147d9bf1 100644 --- a/plugin/trino-hive/pom.xml +++ b/plugin/trino-hive/pom.xml @@ -650,6 +650,7 @@ **/TestCachedHiveGlueMetastore.java **/TestGlueHiveMetastore.java **/TestGlueHiveMetastoreQueries.java + **/TestGlueHiveMetastoreSkipArchive.java **/TestHiveGlueMetadataListing.java **/TestHiveGlueMetastoreAccessOperations.java **/TestHiveS3AndGlueMetastoreTest.java @@ -712,6 +713,7 @@ **/TestCachedHiveGlueMetastore.java **/TestGlueHiveMetastore.java **/TestGlueHiveMetastoreQueries.java + **/TestGlueHiveMetastoreSkipArchive.java **/TestHiveGlueMetadataListing.java **/TestHiveGlueMetastoreAccessOperations.java **/TestHiveS3AndGlueMetastoreTest.java diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveExecutionInterceptor.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveExecutionInterceptor.java new file mode 100644 index 0000000000000..5dd7417dfd88c --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveExecutionInterceptor.java @@ -0,0 +1,40 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.glue; + +import software.amazon.awssdk.core.SdkRequest; +import software.amazon.awssdk.core.interceptor.Context; +import software.amazon.awssdk.core.interceptor.ExecutionAttributes; +import software.amazon.awssdk.core.interceptor.ExecutionInterceptor; +import software.amazon.awssdk.services.glue.model.UpdateTableRequest; + +public class GlueHiveExecutionInterceptor + implements ExecutionInterceptor +{ + private final boolean skipArchive; + + GlueHiveExecutionInterceptor(boolean isSkipArchive) + { + this.skipArchive = isSkipArchive; + } + + @Override + public SdkRequest modifyRequest(Context.ModifyRequest context, ExecutionAttributes executionAttributes) + { + if (context.request() instanceof UpdateTableRequest updateTableRequest) { + return updateTableRequest.toBuilder().skipArchive(skipArchive).build(); + } + return context.request(); + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreConfig.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreConfig.java index ecf552d189bd5..682521467f6ed 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreConfig.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreConfig.java @@ -49,6 +49,7 @@ public class GlueHiveMetastoreConfig private int partitionSegments = 5; private int threads = 40; private boolean assumeCanonicalPartitionKeys; + private boolean skipArchive; public Optional getGlueRegion() { @@ -277,4 +278,17 @@ public GlueHiveMetastoreConfig setAssumeCanonicalPartitionKeys(boolean assumeCan this.assumeCanonicalPartitionKeys = assumeCanonicalPartitionKeys; return this; } + + public boolean isSkipArchive() + { + return skipArchive; + } + + @Config("hive.metastore.glue.skip-archive") + @ConfigDescription("Skip archiving an old table version when updating a table in the Glue metastore") + public GlueHiveMetastoreConfig setSkipArchive(boolean skipArchive) + { + this.skipArchive = skipArchive; + return this; + } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java index 319b2c74724ee..8c04a29ff7bbb 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java @@ -130,6 +130,7 @@ public static GlueClient createGlueClient(GlueHiveMetastoreConfig config, OpenTe .setCaptureExperimentalSpanAttributes(true) .setRecordIndividualHttpError(true) .build().newExecutionInterceptor()) + .addExecutionInterceptor(new GlueHiveExecutionInterceptor(config.isSkipArchive())) .retryStrategy(retryBuilder -> retryBuilder .retryOnException(throwable -> throwable instanceof ConcurrentModificationException) .backoffStrategy(BackoffStrategy.exponentialDelay( diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueHiveMetastoreConfig.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueHiveMetastoreConfig.java index 3ec8914810579..903bf49b9657f 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueHiveMetastoreConfig.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueHiveMetastoreConfig.java @@ -44,7 +44,8 @@ void testDefaults() .setCatalogId(null) .setPartitionSegments(5) .setThreads(40) - .setAssumeCanonicalPartitionKeys(false)); + .setAssumeCanonicalPartitionKeys(false) + .setSkipArchive(false)); } @Test @@ -68,6 +69,7 @@ void testExplicitPropertyMapping() .put("hive.metastore.glue.partitions-segments", "10") .put("hive.metastore.glue.threads", "77") .put("hive.metastore.glue.assume-canonical-partition-keys", "true") + .put("hive.metastore.glue.skip-archive", "true") .buildOrThrow(); GlueHiveMetastoreConfig expected = new GlueHiveMetastoreConfig() @@ -87,7 +89,8 @@ void testExplicitPropertyMapping() .setCatalogId("0123456789") .setPartitionSegments(10) .setThreads(77) - .setAssumeCanonicalPartitionKeys(true); + .setAssumeCanonicalPartitionKeys(true) + .setSkipArchive(true); assertFullMapping(properties, expected); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueHiveMetastoreSkipArchive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueHiveMetastoreSkipArchive.java new file mode 100644 index 0000000000000..19a0c276d336c --- /dev/null +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestGlueHiveMetastoreSkipArchive.java @@ -0,0 +1,85 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.glue; + +import io.trino.plugin.hive.HiveQueryRunner; +import io.trino.testing.AbstractTestQueryFramework; +import io.trino.testing.DistributedQueryRunner; +import io.trino.testing.QueryRunner; +import io.trino.testing.sql.TestTable; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Test; +import software.amazon.awssdk.services.glue.GlueClient; +import software.amazon.awssdk.services.glue.model.TableVersion; + +import java.util.List; + +import static com.google.common.collect.Iterables.getOnlyElement; +import static io.trino.testing.TestingNames.randomNameSuffix; +import static io.trino.testing.TestingSession.testSessionBuilder; +import static org.assertj.core.api.Assertions.assertThat; + +final class TestGlueHiveMetastoreSkipArchive + extends AbstractTestQueryFramework +{ + private final String testSchema = "test_schema_" + randomNameSuffix(); + private final GlueClient glueClient = GlueClient.create(); + + @Override + protected QueryRunner createQueryRunner() + throws Exception + { + DistributedQueryRunner queryRunner = HiveQueryRunner.builder(testSessionBuilder() + .setCatalog("hive") + .setSchema(testSchema) + .build()) + .addHiveProperty("hive.metastore", "glue") + .addHiveProperty("hive.metastore.glue.default-warehouse-dir", "local:///glue") + .addHiveProperty("hive.security", "allow-all") + .addHiveProperty("hive.metastore.glue.skip-archive", "true") + .setCreateTpchSchemas(false) + .build(); + queryRunner.execute("CREATE SCHEMA " + testSchema); + return queryRunner; + } + + @AfterAll + void cleanUpSchema() + { + getQueryRunner().execute("DROP SCHEMA " + testSchema + " CASCADE"); + } + + @Test + void testSkipArchive() + { + try (TestTable table = new TestTable(getQueryRunner()::execute, "test_skip_archive", "(col int)")) { + List tableVersionsBeforeInsert = getTableVersions(testSchema, table.getName()); + assertThat(tableVersionsBeforeInsert).hasSize(1); + String versionIdBeforeInsert = getOnlyElement(tableVersionsBeforeInsert).versionId(); + + assertUpdate("INSERT INTO " + table.getName() + " VALUES 1", 1); + + // Verify count of table versions isn't increased, but version id is changed + List tableVersionsAfterInsert = getTableVersions(testSchema, table.getName()); + assertThat(tableVersionsAfterInsert).hasSize(1); + String versionIdAfterInsert = getOnlyElement(tableVersionsAfterInsert).versionId(); + assertThat(versionIdBeforeInsert).isNotEqualTo(versionIdAfterInsert); + } + } + + private List getTableVersions(String databaseName, String tableName) + { + return glueClient.getTableVersions(builder -> builder.databaseName(databaseName).tableName(tableName)).tableVersions(); + } +} From 51915cb83f0adb9e66fcdf5bb4b494d91a1765f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Wa=C5=9B?= Date: Thu, 24 Oct 2024 10:35:32 +0200 Subject: [PATCH 21/31] Sync order of GlueHiveMetastoreConfig methods and properties --- .../glue/v1/GlueHiveMetastoreConfig.java | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueHiveMetastoreConfig.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueHiveMetastoreConfig.java index 0f526d34f6512..619c1d98f4459 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueHiveMetastoreConfig.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueHiveMetastoreConfig.java @@ -276,19 +276,6 @@ public GlueHiveMetastoreConfig setGetPartitionThreads(int getPartitionThreads) return this; } - public boolean isAssumeCanonicalPartitionKeys() - { - return assumeCanonicalPartitionKeys; - } - - @Config("hive.metastore.glue.assume-canonical-partition-keys") - @ConfigDescription("Allow conversion of non-char types (eg BIGINT, timestamp) to canonical string formats") - public GlueHiveMetastoreConfig setAssumeCanonicalPartitionKeys(boolean assumeCanonicalPartitionKeys) - { - this.assumeCanonicalPartitionKeys = assumeCanonicalPartitionKeys; - return this; - } - @Min(1) public int getReadStatisticsThreads() { @@ -317,6 +304,19 @@ public GlueHiveMetastoreConfig setWriteStatisticsThreads(int writeStatisticsThre return this; } + public boolean isAssumeCanonicalPartitionKeys() + { + return assumeCanonicalPartitionKeys; + } + + @Config("hive.metastore.glue.assume-canonical-partition-keys") + @ConfigDescription("Allow conversion of non-char types (eg BIGINT, timestamp) to canonical string formats") + public GlueHiveMetastoreConfig setAssumeCanonicalPartitionKeys(boolean assumeCanonicalPartitionKeys) + { + this.assumeCanonicalPartitionKeys = assumeCanonicalPartitionKeys; + return this; + } + @PostConstruct public void validate() { From 00ccd3c47e59d801ac47617aec05da9377907302 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Wa=C5=9B?= Date: Thu, 17 Oct 2024 12:59:31 +0200 Subject: [PATCH 22/31] Support hive.metastore.glue.skip-archive in v1 Glue --- .../src/main/sphinx/object-storage/metastores.md | 7 +------ .../glue/v1/GlueHiveMetastoreConfig.java | 16 ++++++++++++++++ .../metastore/glue/v1/GlueMetastoreModule.java | 13 ++++++++++++- .../glue/v1}/SkipArchiveRequestHandler.java | 2 +- .../glue/v1/TestGlueHiveMetastoreConfig.java | 3 +++ .../catalog/glue/IcebergGlueCatalogConfig.java | 15 --------------- .../catalog/glue/IcebergGlueCatalogModule.java | 8 +------- .../glue/TestIcebergGlueCatalogConfig.java | 7 ++----- .../glue/TestingIcebergGlueCatalogModule.java | 9 +-------- 9 files changed, 37 insertions(+), 43 deletions(-) rename plugin/{trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue => trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1}/SkipArchiveRequestHandler.java (98%) diff --git a/docs/src/main/sphinx/object-storage/metastores.md b/docs/src/main/sphinx/object-storage/metastores.md index f86f2cb54b21b..e175fe9c86672 100644 --- a/docs/src/main/sphinx/object-storage/metastores.md +++ b/docs/src/main/sphinx/object-storage/metastores.md @@ -436,16 +436,11 @@ described with the following additional property: * - Property name - Description - Default -* - `iceberg.glue.skip-archive` - - Skip archiving an old table version when creating a new version in a commit. - See [AWS Glue Skip - Archive](https://iceberg.apache.org/docs/latest/aws/#skip-archive). - - `true` * - `iceberg.glue.cache-table-metadata` - While updating the table in AWS Glue, store the table metadata with the purpose of accelerating `information_schema.columns` and `system.metadata.table_comments` queries. - - `true` + - `true` ::: ## Iceberg-specific metastores diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueHiveMetastoreConfig.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueHiveMetastoreConfig.java index 619c1d98f4459..52a185fa18217 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueHiveMetastoreConfig.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueHiveMetastoreConfig.java @@ -17,6 +17,7 @@ import io.airlift.configuration.ConfigDescription; import io.airlift.configuration.ConfigSecuritySensitive; import io.airlift.configuration.DefunctConfig; +import io.airlift.configuration.LegacyConfig; import jakarta.annotation.PostConstruct; import jakarta.validation.constraints.Max; import jakarta.validation.constraints.Min; @@ -48,6 +49,7 @@ public class GlueHiveMetastoreConfig private int readStatisticsThreads = 5; private int writeStatisticsThreads = 20; private boolean assumeCanonicalPartitionKeys; + private boolean skipArchive; public Optional getGlueRegion() { @@ -317,6 +319,20 @@ public GlueHiveMetastoreConfig setAssumeCanonicalPartitionKeys(boolean assumeCan return this; } + public boolean isSkipArchive() + { + return skipArchive; + } + + @Config("hive.metastore.glue.skip-archive") + @LegacyConfig("iceberg.glue.skip-archive") + @ConfigDescription("Skip archiving an old table version when updating a table in the Glue metastore") + public GlueHiveMetastoreConfig setSkipArchive(boolean skipArchive) + { + this.skipArchive = skipArchive; + return this; + } + @PostConstruct public void validate() { diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueMetastoreModule.java index 4e3450fecb27f..5bdd79f6ace38 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/GlueMetastoreModule.java @@ -94,7 +94,18 @@ protected void setup(Binder binder) @ProvidesIntoSet @Singleton @ForGlueHiveMetastore - public RequestHandler2 createRequestHandler(OpenTelemetry openTelemetry) + public RequestHandler2 createSkipArchiveRequestHandler(GlueHiveMetastoreConfig config) + { + if (!config.isSkipArchive()) { + return new RequestHandler2() {}; + } + return new SkipArchiveRequestHandler(); + } + + @ProvidesIntoSet + @Singleton + @ForGlueHiveMetastore + public RequestHandler2 createTelemetryRequestHandler(OpenTelemetry openTelemetry) { return AwsSdkTelemetry.builder(openTelemetry) .setCaptureExperimentalSpanAttributes(true) diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/SkipArchiveRequestHandler.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/SkipArchiveRequestHandler.java similarity index 98% rename from plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/SkipArchiveRequestHandler.java rename to plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/SkipArchiveRequestHandler.java index 5bfa765acaa0d..d5ba1bc434f22 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/SkipArchiveRequestHandler.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/v1/SkipArchiveRequestHandler.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package io.trino.plugin.iceberg.catalog.glue; +package io.trino.plugin.hive.metastore.glue.v1; import com.amazonaws.AmazonWebServiceRequest; import com.amazonaws.handlers.RequestHandler2; diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueHiveMetastoreConfig.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueHiveMetastoreConfig.java index 4e2ca96e03162..38c71002bbe49 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueHiveMetastoreConfig.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/v1/TestGlueHiveMetastoreConfig.java @@ -46,6 +46,7 @@ public void testDefaults() .setPartitionSegments(5) .setGetPartitionThreads(20) .setAssumeCanonicalPartitionKeys(false) + .setSkipArchive(false) .setReadStatisticsThreads(5) .setWriteStatisticsThreads(20)); } @@ -72,6 +73,7 @@ public void testExplicitPropertyMapping() .put("hive.metastore.glue.partitions-segments", "10") .put("hive.metastore.glue.get-partition-threads", "42") .put("hive.metastore.glue.assume-canonical-partition-keys", "true") + .put("hive.metastore.glue.skip-archive", "true") .put("hive.metastore.glue.read-statistics-threads", "42") .put("hive.metastore.glue.write-statistics-threads", "43") .buildOrThrow(); @@ -95,6 +97,7 @@ public void testExplicitPropertyMapping() .setPartitionSegments(10) .setGetPartitionThreads(42) .setAssumeCanonicalPartitionKeys(true) + .setSkipArchive(true) .setReadStatisticsThreads(42) .setWriteStatisticsThreads(43); diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/IcebergGlueCatalogConfig.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/IcebergGlueCatalogConfig.java index 156f48acebc0a..b692527ab903b 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/IcebergGlueCatalogConfig.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/IcebergGlueCatalogConfig.java @@ -14,12 +14,10 @@ package io.trino.plugin.iceberg.catalog.glue; import io.airlift.configuration.Config; -import io.airlift.configuration.ConfigDescription; public class IcebergGlueCatalogConfig { private boolean cacheTableMetadata = true; - private boolean skipArchive = true; public boolean isCacheTableMetadata() { @@ -32,17 +30,4 @@ public IcebergGlueCatalogConfig setCacheTableMetadata(boolean cacheTableMetadata this.cacheTableMetadata = cacheTableMetadata; return this; } - - public boolean isSkipArchive() - { - return skipArchive; - } - - @Config("iceberg.glue.skip-archive") - @ConfigDescription("Skip archiving an old table version when creating a new version in a commit") - public IcebergGlueCatalogConfig setSkipArchive(boolean skipArchive) - { - this.skipArchive = skipArchive; - return this; - } } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/IcebergGlueCatalogModule.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/IcebergGlueCatalogModule.java index f86128a7da765..3aeef110d3889 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/IcebergGlueCatalogModule.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/glue/IcebergGlueCatalogModule.java @@ -14,7 +14,6 @@ package io.trino.plugin.iceberg.catalog.glue; import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.handlers.RequestHandler2; import com.amazonaws.services.glue.model.Table; import com.google.inject.Binder; import com.google.inject.Key; @@ -37,7 +36,6 @@ import static com.google.inject.multibindings.Multibinder.newSetBinder; import static com.google.inject.multibindings.OptionalBinder.newOptionalBinder; -import static io.airlift.configuration.ConditionalModule.conditionalModule; import static io.airlift.configuration.ConfigBinder.configBinder; import static org.weakref.jmx.guice.ExportBinder.newExporter; @@ -48,6 +46,7 @@ public class IcebergGlueCatalogModule protected void setup(Binder binder) { configBinder(binder).bindConfig(GlueHiveMetastoreConfig.class); + configBinder(binder).bindConfigDefaults(GlueHiveMetastoreConfig.class, config -> config.setSkipArchive(true)); configBinder(binder).bindConfig(IcebergGlueCatalogConfig.class); binder.bind(GlueMetastoreStats.class).in(Scopes.SINGLETON); newExporter(binder).export(GlueMetastoreStats.class).withGeneratedName(); @@ -56,11 +55,6 @@ protected void setup(Binder binder) binder.bind(TrinoCatalogFactory.class).to(TrinoGlueCatalogFactory.class).in(Scopes.SINGLETON); newExporter(binder).export(TrinoCatalogFactory.class).withGeneratedName(); - install(conditionalModule( - IcebergGlueCatalogConfig.class, - IcebergGlueCatalogConfig::isSkipArchive, - internalBinder -> newSetBinder(internalBinder, RequestHandler2.class, ForGlueHiveMetastore.class).addBinding().toInstance(new SkipArchiveRequestHandler()))); - // Required to inject HiveMetastoreFactory for migrate procedure binder.bind(Key.get(boolean.class, HideDeltaLakeTables.class)).toInstance(false); newOptionalBinder(binder, Key.get(new TypeLiteral>() {}, ForGlueHiveMetastore.class)) diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestIcebergGlueCatalogConfig.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestIcebergGlueCatalogConfig.java index c6b3293401540..1833bf9c33374 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestIcebergGlueCatalogConfig.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestIcebergGlueCatalogConfig.java @@ -28,8 +28,7 @@ public class TestIcebergGlueCatalogConfig public void testDefaults() { assertRecordedDefaults(recordDefaults(IcebergGlueCatalogConfig.class) - .setCacheTableMetadata(true) - .setSkipArchive(true)); + .setCacheTableMetadata(true)); } @Test @@ -37,12 +36,10 @@ public void testExplicitPropertyMapping() { Map properties = ImmutableMap.builder() .put("iceberg.glue.cache-table-metadata", "false") - .put("iceberg.glue.skip-archive", "false") .buildOrThrow(); IcebergGlueCatalogConfig expected = new IcebergGlueCatalogConfig() - .setCacheTableMetadata(false) - .setSkipArchive(false); + .setCacheTableMetadata(false); assertFullMapping(properties, expected); } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestingIcebergGlueCatalogModule.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestingIcebergGlueCatalogModule.java index 765081865a677..2db57a9288f1f 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestingIcebergGlueCatalogModule.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/catalog/glue/TestingIcebergGlueCatalogModule.java @@ -14,7 +14,6 @@ package io.trino.plugin.iceberg.catalog.glue; import com.amazonaws.auth.AWSCredentialsProvider; -import com.amazonaws.handlers.RequestHandler2; import com.amazonaws.services.glue.model.Table; import com.google.inject.Binder; import com.google.inject.Key; @@ -32,9 +31,7 @@ import java.util.function.Predicate; -import static com.google.inject.multibindings.Multibinder.newSetBinder; import static com.google.inject.multibindings.OptionalBinder.newOptionalBinder; -import static io.airlift.configuration.ConditionalModule.conditionalModule; import static io.airlift.configuration.ConfigBinder.configBinder; import static java.util.Objects.requireNonNull; import static org.weakref.jmx.guice.ExportBinder.newExporter; @@ -53,6 +50,7 @@ public TestingIcebergGlueCatalogModule(AWSGlueAsyncAdapterProvider awsGlueAsyncA protected void setup(Binder binder) { configBinder(binder).bindConfig(GlueHiveMetastoreConfig.class); + configBinder(binder).bindConfigDefaults(GlueHiveMetastoreConfig.class, config -> config.setSkipArchive(true)); configBinder(binder).bindConfig(IcebergGlueCatalogConfig.class); binder.bind(GlueMetastoreStats.class).in(Scopes.SINGLETON); newExporter(binder).export(GlueMetastoreStats.class).withGeneratedName(); @@ -62,11 +60,6 @@ protected void setup(Binder binder) newExporter(binder).export(TrinoCatalogFactory.class).withGeneratedName(); binder.bind(AWSGlueAsyncAdapterProvider.class).toInstance(awsGlueAsyncAdapterProvider); - install(conditionalModule( - IcebergGlueCatalogConfig.class, - IcebergGlueCatalogConfig::isSkipArchive, - internalBinder -> newSetBinder(internalBinder, RequestHandler2.class, ForGlueHiveMetastore.class).addBinding().toInstance(new SkipArchiveRequestHandler()))); - // Required to inject HiveMetastoreFactory for migrate procedure binder.bind(Key.get(boolean.class, HideDeltaLakeTables.class)).toInstance(false); newOptionalBinder(binder, Key.get(new TypeLiteral>() {}, ForGlueHiveMetastore.class)) From 1424a77e4640ca8b3359c61db09b3fa7e07a17ef Mon Sep 17 00:00:00 2001 From: Yuya Ebihara Date: Wed, 30 Oct 2024 17:59:27 +0900 Subject: [PATCH 23/31] Remove unused method from HashGenerationOptimizer --- .../HashGenerationOptimizer.java | 29 ------------------- 1 file changed, 29 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/HashGenerationOptimizer.java b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/HashGenerationOptimizer.java index cf4717e660520..5c8a4c84c0749 100644 --- a/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/HashGenerationOptimizer.java +++ b/core/trino-main/src/main/java/io/trino/sql/planner/optimizations/HashGenerationOptimizer.java @@ -852,30 +852,6 @@ private static Optional computeHash(Iterable fields) return Optional.of(new HashComputation(fields)); } - public static Optional getHashExpression(Metadata metadata, SymbolAllocator symbolAllocator, List symbols) - { - if (symbols.isEmpty()) { - return Optional.empty(); - } - - Expression result = new Constant(BIGINT, (long) INITIAL_HASH_VALUE); - for (Symbol symbol : symbols) { - Expression hashField = BuiltinFunctionCallBuilder.resolve(metadata) - .setName(HASH_CODE) - .addArgument(symbol.type(), new Reference(BIGINT, symbol.name())) - .build(); - - hashField = new Coalesce(hashField, new Constant(BIGINT, (long) NULL_HASH_CODE)); - - result = BuiltinFunctionCallBuilder.resolve(metadata) - .setName("combine_hash") - .addArgument(BIGINT, result) - .addArgument(BIGINT, hashField) - .build(); - } - return Optional.of(result); - } - private static class HashComputation { private final List fields; @@ -887,11 +863,6 @@ private HashComputation(Iterable fields) checkArgument(!this.fields.isEmpty(), "fields cannot be empty"); } - public List getFields() - { - return fields; - } - public Optional translate(Function> translator) { ImmutableList.Builder newSymbols = ImmutableList.builder(); From 65ddeffa7beceef1e07c281ac31ccf16d48d10db Mon Sep 17 00:00:00 2001 From: "Mateusz \"Serafin\" Gajewski" Date: Wed, 30 Oct 2024 10:02:46 +0100 Subject: [PATCH 24/31] Serialize directly from the memory representation This avoids unnecessary boxing/unboxing for primitive values which are boxed once the `Object getObjectValue` is called on a `Type`. This now fetches primitive values directly and writes them to a JsonGenerator without unboxing. --- .../server/protocol/JsonEncodingUtils.java | 431 ++++++++++++++++++ .../encoding/JsonQueryDataEncoder.java | 130 +----- .../spooling/TestJsonQueryDataEncoding.java | 3 +- 3 files changed, 445 insertions(+), 119 deletions(-) create mode 100644 core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java b/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java new file mode 100644 index 0000000000000..fef8964c96219 --- /dev/null +++ b/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java @@ -0,0 +1,431 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.server.protocol; + +import com.fasterxml.jackson.core.JsonGenerator; +import io.airlift.slice.Slice; +import io.trino.spi.Page; +import io.trino.spi.block.Block; +import io.trino.spi.block.SqlMap; +import io.trino.spi.block.SqlRow; +import io.trino.spi.connector.ConnectorSession; +import io.trino.spi.type.ArrayType; +import io.trino.spi.type.BigintType; +import io.trino.spi.type.BooleanType; +import io.trino.spi.type.CharType; +import io.trino.spi.type.DoubleType; +import io.trino.spi.type.IntegerType; +import io.trino.spi.type.MapType; +import io.trino.spi.type.RealType; +import io.trino.spi.type.RowType; +import io.trino.spi.type.SmallintType; +import io.trino.spi.type.SqlDate; +import io.trino.spi.type.SqlDecimal; +import io.trino.spi.type.SqlTime; +import io.trino.spi.type.SqlTimeWithTimeZone; +import io.trino.spi.type.SqlTimestamp; +import io.trino.spi.type.SqlTimestampWithTimeZone; +import io.trino.spi.type.SqlVarbinary; +import io.trino.spi.type.TinyintType; +import io.trino.spi.type.Type; +import io.trino.spi.type.VarbinaryType; +import io.trino.spi.type.VarcharType; +import io.trino.type.SqlIntervalDayTime; +import io.trino.type.SqlIntervalYearMonth; + +import java.io.IOException; +import java.math.BigDecimal; +import java.util.List; + +import static com.google.common.base.Verify.verify; +import static io.trino.spi.type.BigintType.BIGINT; +import static io.trino.spi.type.BooleanType.BOOLEAN; +import static io.trino.spi.type.Chars.padSpaces; +import static io.trino.spi.type.DoubleType.DOUBLE; +import static io.trino.spi.type.IntegerType.INTEGER; +import static io.trino.spi.type.RealType.REAL; +import static io.trino.spi.type.SmallintType.SMALLINT; +import static io.trino.spi.type.TinyintType.TINYINT; +import static io.trino.spi.type.VarbinaryType.VARBINARY; +import static io.trino.spi.type.VarcharType.VARCHAR; +import static java.util.Objects.requireNonNull; + +public class JsonEncodingUtils +{ + private JsonEncodingUtils() {} + + private static final BigintEncoder BIGINT_ENCODER = new BigintEncoder(); + private static final BooleanEncoder BOOLEAN_ENCODER = new BooleanEncoder(); + private static final IntegerEncoder INTEGER_ENCODER = new IntegerEncoder(); + private static final SmallintEncoder SMALLINT_ENCODER = new SmallintEncoder(); + private static final DoubleEncoder DOUBLE_ENCODER = new DoubleEncoder(); + private static final RealEncoder REAL_ENCODER = new RealEncoder(); + private static final TinyintEncoder TINYINT_ENCODER = new TinyintEncoder(); + private static final VarcharEncoder VARCHAR_ENCODER = new VarcharEncoder(); + private static final VarbinaryEncoder VARBINARY_ENCODER = new VarbinaryEncoder(); + + public static TypeEncoder[] createTypeEncoders(ConnectorSession session, List columns) + { + verify(!columns.isEmpty(), "Columns must not be empty"); + + return columns.stream() + .map(column -> createTypeEncoder(column.type())) + .toArray(TypeEncoder[]::new); + } + + public static TypeEncoder createTypeEncoder(Type type) + { + return switch (type) { + case BigintType _ -> BIGINT_ENCODER; + case BooleanType _ -> BOOLEAN_ENCODER; + case IntegerType _ -> INTEGER_ENCODER; + case SmallintType _ -> SMALLINT_ENCODER; + case DoubleType _ -> DOUBLE_ENCODER; + case RealType _ -> REAL_ENCODER; + case TinyintType _ -> TINYINT_ENCODER; + case VarcharType _ -> VARCHAR_ENCODER; + case VarbinaryType _ -> VARBINARY_ENCODER; + case CharType charType -> new CharEncoder(charType.getLength()); + // TODO: add specialized Short/Long decimal encoders + case ArrayType arrayType -> new ArrayEncoder(arrayType, createTypeEncoder(arrayType.getElementType())); + case MapType mapType -> new MapEncoder(mapType, createTypeEncoder(mapType.getValueType())); + case RowType rowType -> new RowEncoder(rowType, rowType.getTypeParameters() + .stream() + .map(JsonEncodingUtils::createTypeEncoder) + .toArray(TypeEncoder[]::new)); + case Type _ -> new TypeObjectValueEncoder(type); + }; + } + + public static void writePagesToJsonGenerator(ConnectorSession connectorSession, JsonGenerator generator, TypeEncoder[] typeEncoders, int[] sourcePageChannels, List pages) + throws IOException + { + verify(typeEncoders.length == sourcePageChannels.length, "Source page channels and type encoders must have the same length"); + generator.writeStartArray(); + for (Page page : pages) { + for (int position = 0; position < page.getPositionCount(); position++) { + generator.writeStartArray(); + for (int column = 0; column < typeEncoders.length; column++) { + typeEncoders[column].encode(generator, connectorSession, page.getBlock(sourcePageChannels[column]), position); + } + generator.writeEndArray(); + } + } + generator.writeEndArray(); + generator.flush(); // final flush to have the data written to the output stream + } + + public interface TypeEncoder + { + void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException; + } + + private static class BigintEncoder + implements TypeEncoder + { + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + generator.writeNumber(BIGINT.getLong(block, position)); + } + } + + private static class IntegerEncoder + implements TypeEncoder + { + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + generator.writeNumber(INTEGER.getInt(block, position)); + } + } + + private static class BooleanEncoder + implements TypeEncoder + { + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + generator.writeBoolean(BOOLEAN.getBoolean(block, position)); + } + } + + private static class SmallintEncoder + implements TypeEncoder + { + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + generator.writeNumber(SMALLINT.getShort(block, position)); + } + } + + private static class TinyintEncoder + implements TypeEncoder + { + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + generator.writeNumber(TINYINT.getByte(block, position)); + } + } + + private static class DoubleEncoder + implements TypeEncoder + { + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + generator.writeNumber(DOUBLE.getDouble(block, position)); + } + } + + private static class RealEncoder + implements TypeEncoder + { + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + generator.writeNumber(REAL.getFloat(block, position)); + } + } + + private static class VarcharEncoder + implements TypeEncoder + { + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + Slice slice = VARCHAR.getSlice(block, position); + // Optimization: avoid conversion from Slice to String and String to bytes when writing UTF-8 strings + generator.writeUTF8String(slice.byteArray(), slice.byteArrayOffset(), slice.length()); + } + } + + private static class CharEncoder + implements TypeEncoder + { + private final int length; + + private CharEncoder(int length) + { + this.length = length; + } + + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + Slice slice = padSpaces(VARCHAR.getSlice(block, position), length); + // Optimization: avoid conversion from Slice to String and String to bytes when writing UTF-8 strings + generator.writeUTF8String(slice.byteArray(), slice.byteArrayOffset(), slice.length()); + } + } + + private static class VarbinaryEncoder + implements TypeEncoder + { + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + + // Optimization: avoid copying Slice to byte array + Slice slice = VARBINARY.getSlice(block, position); + generator.writeBinary(slice.byteArray(), slice.byteArrayOffset(), slice.length()); + } + } + + private static class ArrayEncoder + implements TypeEncoder + { + private final ArrayType arrayType; + private final TypeEncoder typeEncoder; + + public ArrayEncoder(ArrayType arrayType, TypeEncoder typeEncoder) + { + this.arrayType = requireNonNull(arrayType, "arrayType is null"); + this.typeEncoder = requireNonNull(typeEncoder, "typeEncoder is null"); + } + + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + + Block arrayBlock = arrayType.getObject(block, position); + generator.writeStartArray(); + for (int i = 0; i < arrayBlock.getPositionCount(); i++) { + typeEncoder.encode(generator, session, arrayBlock, i); + } + generator.writeEndArray(); + } + } + + private static class MapEncoder + implements TypeEncoder + { + private final MapType mapType; + private final TypeEncoder valueEncoder; + + public MapEncoder(MapType mapType, TypeEncoder valueEncoder) + { + this.mapType = requireNonNull(mapType, "mapType is null"); + this.valueEncoder = requireNonNull(valueEncoder, "valueEncoder is null"); + } + + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + + SqlMap map = mapType.getObject(block, position); + int offset = map.getRawOffset(); + Block keyBlock = map.getRawKeyBlock(); + Block valueBlock = map.getRawValueBlock(); + + verify(keyBlock.getPositionCount() == valueBlock.getPositionCount(), "Key and value blocks have different number of positions"); + generator.writeStartObject(); + for (int i = 0; i < map.getSize(); i++) { + // Field name is always written as String for backward compatibility, + // only value is properly encoded. + generator.writeFieldName(mapType.getKeyType().getObjectValue(session, keyBlock, offset + i).toString()); + valueEncoder.encode(generator, session, valueBlock, offset + i); + } + generator.writeEndObject(); + } + } + + private static class RowEncoder + implements TypeEncoder + { + private final RowType rowType; + private final TypeEncoder[] fieldEncoders; + + public RowEncoder(RowType rowType, TypeEncoder[] fieldEncoders) + { + this.rowType = requireNonNull(rowType, "rowType is null"); + this.fieldEncoders = requireNonNull(fieldEncoders, "fieldEncoders is null"); + } + + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + SqlRow row = rowType.getObject(block, position); + generator.writeStartArray(); + for (int i = 0; i < row.getFieldCount(); i++) { + fieldEncoders[i].encode(generator, session, row.getRawFieldBlock(i), row.getRawIndex()); + } + generator.writeEndArray(); + } + } + + private static class TypeObjectValueEncoder + implements TypeEncoder + { + private final Type type; + + public TypeObjectValueEncoder(Type type) + { + this.type = requireNonNull(type, "type is null"); + } + + @Override + public void encode(JsonGenerator generator, ConnectorSession session, Block block, int position) + throws IOException + { + if (block.isNull(position)) { + generator.writeNull(); + return; + } + + Object value = type.getObjectValue(session, block, position); + switch (value) { + case BigDecimal bigDecimalValue -> generator.writeNumber(bigDecimalValue); + case SqlDate dateValue -> generator.writeString(dateValue.toString()); + case SqlDecimal decimalValue -> generator.writeString(decimalValue.toString()); + case SqlIntervalDayTime intervalValue -> generator.writeString(intervalValue.toString()); + case SqlIntervalYearMonth intervalValue -> generator.writeString(intervalValue.toString()); + case SqlTime timeValue -> generator.writeString(timeValue.toString()); + case SqlTimeWithTimeZone timeWithTimeZone -> generator.writeString(timeWithTimeZone.toString()); + case SqlTimestamp timestamp -> generator.writeString(timestamp.toString()); + case SqlTimestampWithTimeZone timestampWithTimeZone -> generator.writeString(timestampWithTimeZone.toString()); + case SqlVarbinary sqlVarbinary -> generator.writeBinary(sqlVarbinary.getBytes()); + default -> generator.writePOJO(value); + } + } + } +} diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java index 9c5116e3635dd..f097fa83c4795 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java @@ -16,39 +16,24 @@ import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.io.CountingOutputStream; import com.google.inject.Inject; -import io.airlift.slice.Slice; import io.trino.Session; import io.trino.client.spooling.DataAttributes; +import io.trino.server.protocol.JsonEncodingUtils.TypeEncoder; import io.trino.server.protocol.OutputColumn; import io.trino.server.protocol.spooling.QueryDataEncoder; import io.trino.spi.Page; -import io.trino.spi.block.Block; import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.type.CharType; -import io.trino.spi.type.SqlDate; -import io.trino.spi.type.SqlDecimal; -import io.trino.spi.type.SqlTime; -import io.trino.spi.type.SqlTimeWithTimeZone; -import io.trino.spi.type.SqlTimestamp; -import io.trino.spi.type.SqlTimestampWithTimeZone; -import io.trino.spi.type.SqlVarbinary; -import io.trino.spi.type.VarcharType; -import io.trino.type.SqlIntervalDayTime; -import io.trino.type.SqlIntervalYearMonth; import java.io.IOException; import java.io.OutputStream; -import java.math.BigDecimal; -import java.math.BigInteger; import java.util.List; -import java.util.Map; import static io.trino.client.spooling.DataAttribute.SEGMENT_SIZE; import static io.trino.plugin.base.util.JsonUtils.jsonFactory; -import static io.trino.spi.type.Chars.padSpaces; +import static io.trino.server.protocol.JsonEncodingUtils.createTypeEncoders; +import static io.trino.server.protocol.JsonEncodingUtils.writePagesToJsonGenerator; import static java.lang.Math.toIntExact; import static java.util.Objects.requireNonNull; @@ -57,14 +42,16 @@ public class JsonQueryDataEncoder { private static final String ENCODING = "json"; private final Session session; - private final List columns; - private final ObjectMapper mapper; + private final TypeEncoder[] typeEncoders; + private final int[] sourcePageChannels; - public JsonQueryDataEncoder(ObjectMapper mapper, Session session, List columns) + public JsonQueryDataEncoder(Session session, List columns) { - this.mapper = requireNonNull(mapper, "mapper is null"); this.session = requireNonNull(session, "session is null"); - this.columns = requireNonNull(columns, "columns is null"); + this.typeEncoders = createTypeEncoders(session.toConnectorSession(), requireNonNull(columns, "columns is null")); + this.sourcePageChannels = requireNonNull(columns, "columns is null").stream() + .mapToInt(OutputColumn::sourcePageChannel) + .toArray(); } @Override @@ -74,28 +61,7 @@ public DataAttributes encodeTo(OutputStream output, List pages) JsonFactory jsonFactory = jsonFactory(); ConnectorSession connectorSession = session.toConnectorSession(); try (CountingOutputStream wrapper = new CountingOutputStream(output); JsonGenerator generator = jsonFactory.createGenerator(wrapper)) { - generator.writeStartArray(); - for (Page page : pages) { - for (int position = 0; position < page.getPositionCount(); position++) { - generator.writeStartArray(); - for (OutputColumn column : columns) { - Block block = page.getBlock(column.sourcePageChannel()); - if (block.isNull(position)) { - generator.writeNull(); - continue; - } - switch (column.type()) { - case VarcharType varcharType -> writeSliceToRawUtf8(generator, varcharType.getSlice(block, position)); - case CharType charType -> writeSliceToRawUtf8(generator, padSpaces(charType.getSlice(block, position), charType.getLength())); - default -> writeValue(mapper, generator, column.type().getObjectValue(connectorSession, block, position)); - } - } - generator.writeEndArray(); - } - } - generator.writeEndArray(); - generator.flush(); // final flush to have the data written to the output stream - + writePagesToJsonGenerator(connectorSession, generator, typeEncoders, sourcePageChannels, pages); return DataAttributes.builder() .set(SEGMENT_SIZE, toIntExact(wrapper.getCount())) .build(); @@ -105,62 +71,6 @@ public DataAttributes encodeTo(OutputStream output, List pages) } } - private static void writeValue(ObjectMapper mapper, JsonGenerator generator, Object value) - throws IOException - { - switch (value) { - case null -> generator.writeNull(); - case Boolean booleanValue -> generator.writeBoolean(booleanValue); - case Double doubleValue when doubleValue.isInfinite() -> generator.writeString(doubleValue.toString()); - case Double doubleValue when doubleValue.isNaN() -> generator.writeString("NaN"); - case Float floatValue when floatValue.isInfinite() -> generator.writeString(floatValue.toString()); - case Float floatValue when floatValue.isNaN() -> generator.writeString("NaN"); - case Float floatValue -> generator.writeNumber(floatValue); - case Double doubleValue -> generator.writeNumber(doubleValue); - case Integer integerValue -> generator.writeNumber(integerValue); - case Long longValue -> generator.writeNumber(longValue); - case BigInteger bigIntegerValue -> generator.writeNumber(bigIntegerValue); - case Byte byteValue -> generator.writeNumber(byteValue); - case BigDecimal bigDecimalValue -> generator.writeNumber(bigDecimalValue); - case SqlDate dateValue -> generator.writeString(dateValue.toString()); - case SqlDecimal decimalValue -> generator.writeString(decimalValue.toString()); - case SqlIntervalDayTime intervalValue -> generator.writeString(intervalValue.toString()); - case SqlIntervalYearMonth intervalValue -> generator.writeString(intervalValue.toString()); - case SqlTime timeValue -> generator.writeString(timeValue.toString()); - case SqlTimeWithTimeZone timeWithTimeZone -> generator.writeString(timeWithTimeZone.toString()); - case SqlTimestamp timestamp -> generator.writeString(timestamp.toString()); - case SqlTimestampWithTimeZone timestampWithTimeZone -> generator.writeString(timestampWithTimeZone.toString()); - case SqlVarbinary varbinaryValue -> generator.writeBinary(varbinaryValue.getBytes()); - case String stringValue -> generator.writeString(stringValue); - case byte[] binaryValue -> generator.writeBinary(binaryValue); - case List listValue -> { - generator.writeStartArray(); - for (Object element : listValue) { - writeValue(mapper, generator, element); - } - generator.writeEndArray(); - } - // Interleaved array of key-value pairs, compact and retains key structure - case Map mapValue -> { - generator.writeStartObject(); - for (Map.Entry entry : mapValue.entrySet()) { - // The original JSON encoding, always converts a key to a String to use it in the JSON object - generator.writeFieldName(entry.getKey().toString()); - writeValue(mapper, generator, entry.getValue()); - } - generator.writeEndObject(); - } - default -> mapper.writeValue(generator, value); - } - } - - private static void writeSliceToRawUtf8(JsonGenerator generator, Slice slice) - throws IOException - { - // Optimization: avoid conversion from Slice to String and String to bytes when writing UTF-8 strings - generator.writeUTF8String(slice.byteArray(), slice.byteArrayOffset(), slice.length()); - } - @Override public String encoding() { @@ -171,19 +81,17 @@ public static class Factory implements QueryDataEncoder.Factory { protected final JsonFactory factory; - private final ObjectMapper mapper; @Inject - public Factory(ObjectMapper mapper) + public Factory() { this.factory = jsonFactory(); - this.mapper = requireNonNull(mapper, "mapper is null"); } @Override public QueryDataEncoder create(Session session, List columns) { - return new JsonQueryDataEncoder(mapper, session, columns); + return new JsonQueryDataEncoder(session, columns); } @Override @@ -196,12 +104,6 @@ public String encoding() public static class ZstdFactory extends Factory { - @Inject - public ZstdFactory(ObjectMapper mapper) - { - super(mapper); - } - @Override public QueryDataEncoder create(Session session, List columns) { @@ -218,12 +120,6 @@ public String encoding() public static class Lz4Factory extends Factory { - @Inject - public Lz4Factory(ObjectMapper mapper) - { - super(mapper); - } - @Override public QueryDataEncoder create(Session session, List columns) { diff --git a/core/trino-main/src/test/java/io/trino/server/protocol/spooling/TestJsonQueryDataEncoding.java b/core/trino-main/src/test/java/io/trino/server/protocol/spooling/TestJsonQueryDataEncoding.java index fd24b20f74626..e4d9085b63b37 100644 --- a/core/trino-main/src/test/java/io/trino/server/protocol/spooling/TestJsonQueryDataEncoding.java +++ b/core/trino-main/src/test/java/io/trino/server/protocol/spooling/TestJsonQueryDataEncoding.java @@ -13,7 +13,6 @@ */ package io.trino.server.protocol.spooling; -import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.collect.ImmutableList; import io.trino.client.Column; import io.trino.client.QueryDataDecoder; @@ -79,7 +78,7 @@ protected QueryDataDecoder createDecoder(List columns) protected QueryDataEncoder createEncoder(List columns) { - return new JsonQueryDataEncoder.Factory(new ObjectMapper()).create(TEST_SESSION, columns); + return new JsonQueryDataEncoder.Factory().create(TEST_SESSION, columns); } @Test From 9bc3d8c42c46ef9640ccd65fc2f675e0232432a0 Mon Sep 17 00:00:00 2001 From: "Mateusz \"Serafin\" Gajewski" Date: Wed, 30 Oct 2024 10:18:33 +0100 Subject: [PATCH 25/31] Rename raw JSON query data producer --- .../io/trino/server/protocol/ExecutingStatementResource.java | 4 ++-- ...QueryDataProducer.java => JsonPagesQueryDataProducer.java} | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) rename core/trino-main/src/main/java/io/trino/server/protocol/spooling/{RawQueryDataProducer.java => JsonPagesQueryDataProducer.java} (97%) diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/ExecutingStatementResource.java b/core/trino-main/src/main/java/io/trino/server/protocol/ExecutingStatementResource.java index 6db9899819404..5033ed246d66c 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/ExecutingStatementResource.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/ExecutingStatementResource.java @@ -29,9 +29,9 @@ import io.trino.server.ExternalUriInfo; import io.trino.server.ForStatementResource; import io.trino.server.ServerConfig; +import io.trino.server.protocol.spooling.JsonPagesQueryDataProducer; import io.trino.server.protocol.spooling.QueryDataEncoder; import io.trino.server.protocol.spooling.QueryDataEncoders; -import io.trino.server.protocol.spooling.RawQueryDataProducer; import io.trino.server.protocol.spooling.SpooledQueryDataProducer; import io.trino.server.security.ResourceSecurity; import io.trino.spi.QueryId; @@ -207,7 +207,7 @@ protected Query getQuery(QueryId queryId, String slug, long token) queryManager, encoderFactory .map(SpooledQueryDataProducer::createSpooledQueryDataProducer) - .orElseGet(RawQueryDataProducer::new), + .orElseGet(JsonPagesQueryDataProducer::new), queryInfoUrlFactory.getQueryInfoUrl(queryId), directExchangeClientSupplier, exchangeManagerRegistry, diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/RawQueryDataProducer.java b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/JsonPagesQueryDataProducer.java similarity index 97% rename from core/trino-main/src/main/java/io/trino/server/protocol/spooling/RawQueryDataProducer.java rename to core/trino-main/src/main/java/io/trino/server/protocol/spooling/JsonPagesQueryDataProducer.java index a6527e4649939..16848ac12c6e6 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/RawQueryDataProducer.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/JsonPagesQueryDataProducer.java @@ -24,7 +24,7 @@ import static io.trino.server.protocol.JsonArrayResultsIterator.toIterableList; -public class RawQueryDataProducer +public class JsonPagesQueryDataProducer implements QueryDataProducer { @Override From f25f7620318abbe0b8be68d33b1718b00b7b0d90 Mon Sep 17 00:00:00 2001 From: "Mateusz \"Serafin\" Gajewski" Date: Wed, 30 Oct 2024 10:21:42 +0100 Subject: [PATCH 26/31] Serialize pages directly to json in the direct protocol This unifies serialization between the spooled and direct protocols --- .../java/io/trino/cli/TestQueryRunner.java | 4 +- .../io/trino/client/ResultRowsDecoder.java | 4 +- ...{RawQueryData.java => TypedQueryData.java} | 6 +- .../io/trino/client/TestClientRedirect.java | 2 +- .../test/java/io/trino/client/TestRetry.java | 2 +- .../io/trino/jdbc/TestProgressMonitor.java | 4 +- .../dispatcher/QueuedStatementResource.java | 4 +- .../protocol/ExecutingStatementResource.java | 3 +- .../protocol/JsonArrayResultsIterator.java | 233 --------- .../server/protocol/JsonBytesQueryData.java | 45 ++ .../protocol/JsonBytesQueryDataProducer.java | 70 +++ .../server/protocol/JsonEncodingUtils.java | 69 ++- .../spooling/JsonPagesQueryDataProducer.java | 39 -- .../spooling/QueryDataJacksonModule.java | 7 +- .../encoding/JsonQueryDataEncoder.java | 8 +- .../protocol/TestQueryDataSerialization.java | 10 +- .../TestQueryResultsSerialization.java | 8 +- .../trino/server/protocol/TestResultRows.java | 492 ------------------ 18 files changed, 198 insertions(+), 812 deletions(-) rename client/trino-client/src/main/java/io/trino/client/{RawQueryData.java => TypedQueryData.java} (91%) delete mode 100644 core/trino-main/src/main/java/io/trino/server/protocol/JsonArrayResultsIterator.java create mode 100644 core/trino-main/src/main/java/io/trino/server/protocol/JsonBytesQueryData.java create mode 100644 core/trino-main/src/main/java/io/trino/server/protocol/JsonBytesQueryDataProducer.java delete mode 100644 core/trino-main/src/main/java/io/trino/server/protocol/spooling/JsonPagesQueryDataProducer.java delete mode 100644 core/trino-main/src/test/java/io/trino/server/protocol/TestResultRows.java diff --git a/client/trino-cli/src/test/java/io/trino/cli/TestQueryRunner.java b/client/trino-cli/src/test/java/io/trino/cli/TestQueryRunner.java index 2975565aa469d..574256cfc3d66 100644 --- a/client/trino-cli/src/test/java/io/trino/cli/TestQueryRunner.java +++ b/client/trino-cli/src/test/java/io/trino/cli/TestQueryRunner.java @@ -20,8 +20,8 @@ import io.trino.client.Column; import io.trino.client.JsonCodec; import io.trino.client.QueryResults; -import io.trino.client.RawQueryData; import io.trino.client.StatementStats; +import io.trino.client.TypedQueryData; import io.trino.client.uri.PropertyName; import io.trino.client.uri.TrinoUri; import okhttp3.mockwebserver.MockResponse; @@ -136,7 +136,7 @@ static String createResults(MockWebServer server) null, null, ImmutableList.of(new Column("_col0", BIGINT, new ClientTypeSignature(BIGINT))), - RawQueryData.of(ImmutableList.of(ImmutableList.of(123))), + TypedQueryData.of(ImmutableList.of(ImmutableList.of(123))), StatementStats.builder() .setState("FINISHED") .setProgressPercentage(OptionalDouble.empty()) diff --git a/client/trino-client/src/main/java/io/trino/client/ResultRowsDecoder.java b/client/trino-client/src/main/java/io/trino/client/ResultRowsDecoder.java index e81621a0e1e91..25047cedc31b3 100644 --- a/client/trino-client/src/main/java/io/trino/client/ResultRowsDecoder.java +++ b/client/trino-client/src/main/java/io/trino/client/ResultRowsDecoder.java @@ -83,8 +83,8 @@ public ResultRows toRows(List columns, QueryData data) } verify(columns != null && !columns.isEmpty(), "Columns must be set when decoding data"); - if (data instanceof RawQueryData) { - RawQueryData rawData = (RawQueryData) data; + if (data instanceof TypedQueryData) { + TypedQueryData rawData = (TypedQueryData) data; if (rawData.isNull()) { return NULL_ROWS; // for backward compatibility instead of null } diff --git a/client/trino-client/src/main/java/io/trino/client/RawQueryData.java b/client/trino-client/src/main/java/io/trino/client/TypedQueryData.java similarity index 91% rename from client/trino-client/src/main/java/io/trino/client/RawQueryData.java rename to client/trino-client/src/main/java/io/trino/client/TypedQueryData.java index d090102a515c6..7697cc4cc20dc 100644 --- a/client/trino-client/src/main/java/io/trino/client/RawQueryData.java +++ b/client/trino-client/src/main/java/io/trino/client/TypedQueryData.java @@ -24,12 +24,12 @@ * Class represents QueryData of already typed values * */ -public class RawQueryData +public class TypedQueryData implements QueryData { private final Iterable> iterable; - private RawQueryData(Iterable> values) + private TypedQueryData(Iterable> values) { this.iterable = values == null ? null : unmodifiableIterable(values); } @@ -42,7 +42,7 @@ public Iterable> getIterable() public static QueryData of(@Nullable Iterable> values) { - return new RawQueryData(values); + return new TypedQueryData(values); } @Override diff --git a/client/trino-client/src/test/java/io/trino/client/TestClientRedirect.java b/client/trino-client/src/test/java/io/trino/client/TestClientRedirect.java index 73684af8e4fc7..2e1413b87f066 100644 --- a/client/trino-client/src/test/java/io/trino/client/TestClientRedirect.java +++ b/client/trino-client/src/test/java/io/trino/client/TestClientRedirect.java @@ -161,7 +161,7 @@ private String newQueryResults(MockWebServer server) Stream.of(new Column("id", INTEGER, new ClientTypeSignature("integer")), new Column("name", VARCHAR, new ClientTypeSignature("varchar"))) .collect(toList()), - RawQueryData.of(IntStream.range(0, numRecords) + TypedQueryData.of(IntStream.range(0, numRecords) .mapToObj(index -> Stream.of((Object) index, "a").collect(toList())) .collect(toList())), StatementStats.builder() diff --git a/client/trino-client/src/test/java/io/trino/client/TestRetry.java b/client/trino-client/src/test/java/io/trino/client/TestRetry.java index f414b8e2c0822..ab72f2db0278a 100644 --- a/client/trino-client/src/test/java/io/trino/client/TestRetry.java +++ b/client/trino-client/src/test/java/io/trino/client/TestRetry.java @@ -140,7 +140,7 @@ private String newQueryResults(String state) Stream.of(new Column("id", INTEGER, new ClientTypeSignature("integer")), new Column("name", VARCHAR, new ClientTypeSignature("varchar"))) .collect(toList()), - RawQueryData.of(IntStream.range(0, numRecords) + TypedQueryData.of(IntStream.range(0, numRecords) .mapToObj(index -> Stream.of((Object) index, "a").collect(toList())) .collect(toList())), new StatementStats(state, state.equals("QUEUED"), true, OptionalDouble.of(0), OptionalDouble.of(0), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, null), diff --git a/client/trino-jdbc/src/test/java/io/trino/jdbc/TestProgressMonitor.java b/client/trino-jdbc/src/test/java/io/trino/jdbc/TestProgressMonitor.java index 8bc70a14ec7f4..000b47195e8cc 100644 --- a/client/trino-jdbc/src/test/java/io/trino/jdbc/TestProgressMonitor.java +++ b/client/trino-jdbc/src/test/java/io/trino/jdbc/TestProgressMonitor.java @@ -21,8 +21,8 @@ import io.trino.client.ClientTypeSignature; import io.trino.client.Column; import io.trino.client.QueryResults; -import io.trino.client.RawQueryData; import io.trino.client.StatementStats; +import io.trino.client.TypedQueryData; import io.trino.server.protocol.spooling.QueryDataJacksonModule; import io.trino.spi.type.StandardTypes; import okhttp3.mockwebserver.MockResponse; @@ -98,7 +98,7 @@ private String newQueryResults(Integer partialCancelId, Integer nextUriId, List< partialCancelId == null ? null : server.url(format("/v1/statement/partialCancel/%s.%s", queryId, partialCancelId)).uri(), nextUriId == null ? null : server.url(format("/v1/statement/%s/%s", queryId, nextUriId)).uri(), responseColumns, - RawQueryData.of(data), + TypedQueryData.of(data), new StatementStats(state, state.equals("QUEUED"), true, OptionalDouble.of(0), OptionalDouble.of(0), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, null), null, ImmutableList.of(), diff --git a/core/trino-main/src/main/java/io/trino/dispatcher/QueuedStatementResource.java b/core/trino-main/src/main/java/io/trino/dispatcher/QueuedStatementResource.java index d26fe15d1323b..1b10eb1f97968 100644 --- a/core/trino-main/src/main/java/io/trino/dispatcher/QueuedStatementResource.java +++ b/core/trino-main/src/main/java/io/trino/dispatcher/QueuedStatementResource.java @@ -27,8 +27,8 @@ import io.opentelemetry.api.trace.Tracer; import io.trino.client.QueryError; import io.trino.client.QueryResults; -import io.trino.client.RawQueryData; import io.trino.client.StatementStats; +import io.trino.client.TypedQueryData; import io.trino.execution.ExecutionFailureInfo; import io.trino.execution.QueryManagerConfig; import io.trino.execution.QueryState; @@ -281,7 +281,7 @@ private static QueryResults createQueryResults( null, nextUri, null, - RawQueryData.of(null), + TypedQueryData.of(null), StatementStats.builder() .setState(state.toString()) .setQueued(state == QUEUED) diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/ExecutingStatementResource.java b/core/trino-main/src/main/java/io/trino/server/protocol/ExecutingStatementResource.java index 5033ed246d66c..eef5b7185c8ec 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/ExecutingStatementResource.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/ExecutingStatementResource.java @@ -29,7 +29,6 @@ import io.trino.server.ExternalUriInfo; import io.trino.server.ForStatementResource; import io.trino.server.ServerConfig; -import io.trino.server.protocol.spooling.JsonPagesQueryDataProducer; import io.trino.server.protocol.spooling.QueryDataEncoder; import io.trino.server.protocol.spooling.QueryDataEncoders; import io.trino.server.protocol.spooling.SpooledQueryDataProducer; @@ -207,7 +206,7 @@ protected Query getQuery(QueryId queryId, String slug, long token) queryManager, encoderFactory .map(SpooledQueryDataProducer::createSpooledQueryDataProducer) - .orElseGet(JsonPagesQueryDataProducer::new), + .orElseGet(JsonBytesQueryDataProducer::new), queryInfoUrlFactory.getQueryInfoUrl(queryId), directExchangeClientSupplier, exchangeManagerRegistry, diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/JsonArrayResultsIterator.java b/core/trino-main/src/main/java/io/trino/server/protocol/JsonArrayResultsIterator.java deleted file mode 100644 index dcda457bf1b2a..0000000000000 --- a/core/trino-main/src/main/java/io/trino/server/protocol/JsonArrayResultsIterator.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.server.protocol; - -import com.google.common.collect.AbstractIterator; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Maps; -import io.trino.Session; -import io.trino.client.ClientCapabilities; -import io.trino.spi.Page; -import io.trino.spi.TrinoException; -import io.trino.spi.block.Block; -import io.trino.spi.connector.ConnectorSession; -import io.trino.spi.type.ArrayType; -import io.trino.spi.type.MapType; -import io.trino.spi.type.RowType; -import io.trino.spi.type.SqlTime; -import io.trino.spi.type.SqlTimeWithTimeZone; -import io.trino.spi.type.SqlTimestamp; -import io.trino.spi.type.SqlTimestampWithTimeZone; -import io.trino.spi.type.TimeType; -import io.trino.spi.type.TimeWithTimeZoneType; -import io.trino.spi.type.TimestampType; -import io.trino.spi.type.TimestampWithTimeZoneType; -import io.trino.spi.type.Type; -import jakarta.annotation.Nullable; - -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Deque; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.function.Consumer; - -import static io.trino.spi.StandardErrorCode.SERIALIZATION_ERROR; -import static java.lang.String.format; -import static java.util.Collections.emptyList; -import static java.util.Collections.unmodifiableList; -import static java.util.Collections.unmodifiableMap; -import static java.util.Objects.requireNonNull; - -public class JsonArrayResultsIterator - extends AbstractIterator> - implements Iterable> -{ - private final Deque queue; - private final Session session; - private final ImmutableList pages; - private final List columns; - private final boolean supportsParametricDateTime; - private final Consumer exceptionConsumer; - - private Page currentPage; - private int rowPosition = -1; - private int inPageIndex = -1; - - public JsonArrayResultsIterator(Session session, List pages, List columns, Consumer exceptionConsumer) - { - this.pages = ImmutableList.copyOf(pages); - this.queue = new ArrayDeque<>(pages); - this.session = requireNonNull(session, "session is null"); - this.columns = ImmutableList.copyOf(requireNonNull(columns, "columns is null")); - this.supportsParametricDateTime = session.getClientCapabilities().contains(ClientCapabilities.PARAMETRIC_DATETIME.toString()); - this.exceptionConsumer = requireNonNull(exceptionConsumer, "exceptionConsumer is null"); - this.currentPage = queue.pollFirst(); - } - - @Override - protected List computeNext() - { - while (true) { - if (currentPage == null) { - return endOfData(); - } - - inPageIndex++; - - if (inPageIndex >= currentPage.getPositionCount()) { - currentPage = queue.pollFirst(); - - if (currentPage == null) { - return endOfData(); - } - - inPageIndex = 0; - } - - rowPosition++; - - List row = getRowValues(); - if (row != null) { - // row is not skipped, return it - return row; - } - } - } - - @Nullable - private List getRowValues() - { - // types are present if data is present - List row = new ArrayList<>(columns.size()); - ConnectorSession connectorSession = session.toConnectorSession(); - for (OutputColumn outputColumn : columns) { - Type type = outputColumn.type(); - - try { - Block block = currentPage.getBlock(outputColumn.sourcePageChannel()); - Object value = type.getObjectValue(connectorSession, block, inPageIndex); - if (!supportsParametricDateTime) { - value = getLegacyValue(value, type); - } - row.add(value); - } - catch (Throwable throwable) { - propagateException(rowPosition, outputColumn.sourcePageChannel(), outputColumn.columnName(), outputColumn.type(), throwable); - // skip row as it contains non-serializable value - return null; - } - } - return unmodifiableList(row); - } - - private Object getLegacyValue(Object value, Type type) - { - if (value == null) { - return null; - } - - if (!supportsParametricDateTime) { - // for legacy clients we need to round timestamp and timestamp with timezone to default precision (3) - - if (type instanceof TimestampType) { - return ((SqlTimestamp) value).roundTo(3); - } - - if (type instanceof TimestampWithTimeZoneType) { - return ((SqlTimestampWithTimeZone) value).roundTo(3); - } - - if (type instanceof TimeType) { - return ((SqlTime) value).roundTo(3); - } - - if (type instanceof TimeWithTimeZoneType) { - return ((SqlTimeWithTimeZone) value).roundTo(3); - } - } - - if (type instanceof ArrayType) { - Type elementType = ((ArrayType) type).getElementType(); - - if (!(elementType instanceof TimestampType || elementType instanceof TimestampWithTimeZoneType)) { - return value; - } - - List listValue = (List) value; - List legacyValues = new ArrayList<>(listValue.size()); - for (Object element : listValue) { - legacyValues.add(getLegacyValue(element, elementType)); - } - - return unmodifiableList(legacyValues); - } - - if (type instanceof MapType) { - Type keyType = ((MapType) type).getKeyType(); - Type valueType = ((MapType) type).getValueType(); - - Map mapValue = (Map) value; - Map result = Maps.newHashMapWithExpectedSize(mapValue.size()); - mapValue.forEach((key, val) -> result.put(getLegacyValue(key, keyType), getLegacyValue(val, valueType))); - return unmodifiableMap(result); - } - - if (type instanceof RowType) { - List fields = ((RowType) type).getFields(); - List values = (List) value; - - List result = new ArrayList<>(values.size()); - for (int i = 0; i < values.size(); i++) { - result.add(getLegacyValue(values.get(i), fields.get(i).getType())); - } - return unmodifiableList(result); - } - - return value; - } - - private void propagateException(int row, int channel, String name, Type type, Throwable cause) - { - // columns and rows are 0-indexed - String message = format("Could not serialize column '%s' of type '%s' at position %d:%d", - name, - type, - row + 1, - channel + 1); - - exceptionConsumer.accept(new TrinoException(SERIALIZATION_ERROR, message, cause)); - } - - @Override - public Iterator> iterator() - { - return new JsonArrayResultsIterator(session, pages, columns, exceptionConsumer); - } - - public static Iterable> toIterableList(Session session, QueryResultRows rows, Consumer serializationExceptionHandler) - { - if (rows.getOutputColumns().isEmpty()) { - return emptyList(); - } - - List columnAndTypes = rows.getOutputColumns().orElseThrow(); - return new JsonArrayResultsIterator( - session, - rows.getPages(), - columnAndTypes, - serializationExceptionHandler); - } -} diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/JsonBytesQueryData.java b/core/trino-main/src/main/java/io/trino/server/protocol/JsonBytesQueryData.java new file mode 100644 index 0000000000000..004033cbf1744 --- /dev/null +++ b/core/trino-main/src/main/java/io/trino/server/protocol/JsonBytesQueryData.java @@ -0,0 +1,45 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.server.protocol; + +import com.fasterxml.jackson.core.JsonGenerator; +import io.trino.client.QueryData; + +import java.io.IOException; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Objects.requireNonNull; + +public class JsonBytesQueryData + implements QueryData +{ + private final byte[] json; + + public JsonBytesQueryData(byte[] json) + { + this.json = requireNonNull(json, "json is null"); + } + + public void writeTo(JsonGenerator generator) + throws IOException + { + generator.writeRawValue(new String(json, UTF_8)); + } + + @Override + public boolean isNull() + { + return false; + } +} diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/JsonBytesQueryDataProducer.java b/core/trino-main/src/main/java/io/trino/server/protocol/JsonBytesQueryDataProducer.java new file mode 100644 index 0000000000000..062c471c4e762 --- /dev/null +++ b/core/trino-main/src/main/java/io/trino/server/protocol/JsonBytesQueryDataProducer.java @@ -0,0 +1,70 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.server.protocol; + +import com.fasterxml.jackson.core.JsonGenerator; +import io.trino.Session; +import io.trino.client.QueryData; +import io.trino.server.ExternalUriInfo; +import io.trino.server.protocol.JsonEncodingUtils.TypeEncoder; +import io.trino.server.protocol.spooling.QueryDataProducer; +import io.trino.spi.TrinoException; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.function.Consumer; + +import static io.trino.plugin.base.util.JsonUtils.jsonFactory; +import static io.trino.server.protocol.JsonEncodingUtils.createTypeEncoders; +import static io.trino.server.protocol.JsonEncodingUtils.writePagesToJsonGenerator; +import static java.util.Objects.requireNonNull; + +public class JsonBytesQueryDataProducer + implements QueryDataProducer +{ + private TypeEncoder[] typeEncoders; + private int[] sourcePageChannels; + + @Override + public QueryData produce(ExternalUriInfo uriInfo, Session session, QueryResultRows rows, Consumer throwableConsumer) + { + if (rows.isEmpty()) { + return null; + } + + List columns = rows.getOutputColumns() + .orElseThrow(() -> new IllegalStateException("Data present without columns")); + + if (typeEncoders == null) { + typeEncoders = createTypeEncoders(session, columns); + sourcePageChannels = requireNonNull(columns, "columns is null").stream() + .mapToInt(OutputColumn::sourcePageChannel) + .toArray(); + } + + // Write to a buffer so we can capture and propagate the exception + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); JsonGenerator generator = jsonFactory().createGenerator(outputStream)) { + writePagesToJsonGenerator(session.toConnectorSession(), generator, typeEncoders, sourcePageChannels, rows.getPages()); + return new JsonBytesQueryData(outputStream.toByteArray()); + } + catch (TrinoException e) { + return null; + } + catch (IOException e) { + throw new UncheckedIOException(e); + } + } +} diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java b/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java index fef8964c96219..fbf3833394cd3 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java @@ -15,7 +15,10 @@ import com.fasterxml.jackson.core.JsonGenerator; import io.airlift.slice.Slice; +import io.trino.Session; +import io.trino.client.ClientCapabilities; import io.trino.spi.Page; +import io.trino.spi.TrinoException; import io.trino.spi.block.Block; import io.trino.spi.block.SqlMap; import io.trino.spi.block.SqlRow; @@ -49,6 +52,7 @@ import java.util.List; import static com.google.common.base.Verify.verify; +import static io.trino.spi.StandardErrorCode.SERIALIZATION_ERROR; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.spi.type.BooleanType.BOOLEAN; import static io.trino.spi.type.Chars.padSpaces; @@ -75,16 +79,20 @@ private JsonEncodingUtils() {} private static final VarcharEncoder VARCHAR_ENCODER = new VarcharEncoder(); private static final VarbinaryEncoder VARBINARY_ENCODER = new VarbinaryEncoder(); - public static TypeEncoder[] createTypeEncoders(ConnectorSession session, List columns) + public static TypeEncoder[] createTypeEncoders(Session session, List columns) { verify(!columns.isEmpty(), "Columns must not be empty"); + boolean useLegacyValue = !requireNonNull(session, "session is null") + .getClientCapabilities() + .contains(ClientCapabilities.PARAMETRIC_DATETIME.toString()); + return columns.stream() - .map(column -> createTypeEncoder(column.type())) + .map(column -> createTypeEncoder(column.type(), useLegacyValue)) .toArray(TypeEncoder[]::new); } - public static TypeEncoder createTypeEncoder(Type type) + public static TypeEncoder createTypeEncoder(Type type, boolean useLegacyValue) { return switch (type) { case BigintType _ -> BIGINT_ENCODER; @@ -98,32 +106,37 @@ public static TypeEncoder createTypeEncoder(Type type) case VarbinaryType _ -> VARBINARY_ENCODER; case CharType charType -> new CharEncoder(charType.getLength()); // TODO: add specialized Short/Long decimal encoders - case ArrayType arrayType -> new ArrayEncoder(arrayType, createTypeEncoder(arrayType.getElementType())); - case MapType mapType -> new MapEncoder(mapType, createTypeEncoder(mapType.getValueType())); + case ArrayType arrayType -> new ArrayEncoder(arrayType, createTypeEncoder(arrayType.getElementType(), useLegacyValue)); + case MapType mapType -> new MapEncoder(mapType, createTypeEncoder(mapType.getValueType(), useLegacyValue)); case RowType rowType -> new RowEncoder(rowType, rowType.getTypeParameters() .stream() - .map(JsonEncodingUtils::createTypeEncoder) + .map(elementType -> createTypeEncoder(elementType, useLegacyValue)) .toArray(TypeEncoder[]::new)); - case Type _ -> new TypeObjectValueEncoder(type); + case Type _ -> new TypeObjectValueEncoder(type, useLegacyValue); }; } public static void writePagesToJsonGenerator(ConnectorSession connectorSession, JsonGenerator generator, TypeEncoder[] typeEncoders, int[] sourcePageChannels, List pages) - throws IOException { verify(typeEncoders.length == sourcePageChannels.length, "Source page channels and type encoders must have the same length"); - generator.writeStartArray(); - for (Page page : pages) { - for (int position = 0; position < page.getPositionCount(); position++) { - generator.writeStartArray(); - for (int column = 0; column < typeEncoders.length; column++) { - typeEncoders[column].encode(generator, connectorSession, page.getBlock(sourcePageChannels[column]), position); + try { + generator.writeStartArray(); + + for (Page page : pages) { + for (int position = 0; position < page.getPositionCount(); position++) { + generator.writeStartArray(); + for (int column = 0; column < typeEncoders.length; column++) { + typeEncoders[column].encode(generator, connectorSession, page.getBlock(sourcePageChannels[column]), position); + } + generator.writeEndArray(); } - generator.writeEndArray(); } + generator.writeEndArray(); + generator.flush(); // final flush to have the data written to the output stream + } + catch (Exception e) { + throw new TrinoException(SERIALIZATION_ERROR, "Could not serialize data to JSON", e); } - generator.writeEndArray(); - generator.flush(); // final flush to have the data written to the output stream } public interface TypeEncoder @@ -397,10 +410,12 @@ private static class TypeObjectValueEncoder implements TypeEncoder { private final Type type; + private final boolean useLegacyValue; - public TypeObjectValueEncoder(Type type) + public TypeObjectValueEncoder(Type type, boolean useLegacyValue) { this.type = requireNonNull(type, "type is null"); + this.useLegacyValue = useLegacyValue; } @Override @@ -412,7 +427,8 @@ public void encode(JsonGenerator generator, ConnectorSession session, Block bloc return; } - Object value = type.getObjectValue(session, block, position); + Object value = roundParametricTypes(type.getObjectValue(session, block, position)); + switch (value) { case BigDecimal bigDecimalValue -> generator.writeNumber(bigDecimalValue); case SqlDate dateValue -> generator.writeString(dateValue.toString()); @@ -427,5 +443,20 @@ public void encode(JsonGenerator generator, ConnectorSession session, Block bloc default -> generator.writePOJO(value); } } + + private Object roundParametricTypes(Object value) + { + if (!useLegacyValue) { + return value; + } + + return switch (value) { + case SqlTimestamp sqlTimestamp -> sqlTimestamp.roundTo(3); + case SqlTimestampWithTimeZone sqlTimestampWithTimeZone -> sqlTimestampWithTimeZone.roundTo(3); + case SqlTime sqlTime -> sqlTime.roundTo(3); + case SqlTimeWithTimeZone sqlTimeWithTimeZone -> sqlTimeWithTimeZone.roundTo(3); + default -> value; + }; + } } } diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/JsonPagesQueryDataProducer.java b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/JsonPagesQueryDataProducer.java deleted file mode 100644 index 16848ac12c6e6..0000000000000 --- a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/JsonPagesQueryDataProducer.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.server.protocol.spooling; - -import io.trino.Session; -import io.trino.client.QueryData; -import io.trino.client.RawQueryData; -import io.trino.server.ExternalUriInfo; -import io.trino.server.protocol.QueryResultRows; -import io.trino.spi.TrinoException; - -import java.util.function.Consumer; - -import static io.trino.server.protocol.JsonArrayResultsIterator.toIterableList; - -public class JsonPagesQueryDataProducer - implements QueryDataProducer -{ - @Override - public QueryData produce(ExternalUriInfo uriInfo, Session session, QueryResultRows rows, Consumer throwableConsumer) - { - if (rows.isEmpty()) { - return null; - } - - return RawQueryData.of(toIterableList(session, rows, throwableConsumer)); - } -} diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/QueryDataJacksonModule.java b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/QueryDataJacksonModule.java index f168e999d8f7d..23091c3d224ea 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/QueryDataJacksonModule.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/QueryDataJacksonModule.java @@ -24,11 +24,12 @@ import com.fasterxml.jackson.databind.ser.BeanSerializerFactory; import com.fasterxml.jackson.databind.ser.std.StdSerializer; import io.trino.client.QueryData; -import io.trino.client.RawQueryData; +import io.trino.client.TypedQueryData; import io.trino.client.spooling.EncodedQueryData; import io.trino.client.spooling.InlineSegment; import io.trino.client.spooling.Segment; import io.trino.client.spooling.SpooledSegment; +import io.trino.server.protocol.JsonBytesQueryData; import java.io.IOException; @@ -37,6 +38,7 @@ *

* * If the passed QueryData is raw - serialize its' data as a materialized array of array of objects. + * If the passed QueryData is bytes - just write them directly * Otherwise, this is a protocol extension and serialize it directly as an object. */ public class QueryDataJacksonModule @@ -65,7 +67,8 @@ public void serialize(QueryData value, JsonGenerator generator, SerializerProvid { switch (value) { case null -> provider.defaultSerializeNull(generator); - case RawQueryData rawQueryData -> provider.defaultSerializeValue(rawQueryData.getIterable(), generator); // serialize as list of lists of objects + case JsonBytesQueryData jsonBytes -> jsonBytes.writeTo(generator); + case TypedQueryData typedQueryData -> provider.defaultSerializeValue(typedQueryData.getIterable(), generator); // serialize as list of lists of objects case EncodedQueryData encoded -> createSerializer(provider, provider.constructType(EncodedQueryData.class)).serialize(encoded, generator, provider); default -> throw new IllegalArgumentException("Unsupported QueryData implementation: " + value.getClass().getSimpleName()); } diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java index f097fa83c4795..8372296b8198c 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/encoding/JsonQueryDataEncoder.java @@ -15,7 +15,6 @@ import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; -import com.fasterxml.jackson.core.JsonProcessingException; import com.google.common.io.CountingOutputStream; import com.google.inject.Inject; import io.trino.Session; @@ -24,12 +23,14 @@ import io.trino.server.protocol.OutputColumn; import io.trino.server.protocol.spooling.QueryDataEncoder; import io.trino.spi.Page; +import io.trino.spi.TrinoException; import io.trino.spi.connector.ConnectorSession; import java.io.IOException; import java.io.OutputStream; import java.util.List; +import static com.google.common.base.Throwables.throwIfInstanceOf; import static io.trino.client.spooling.DataAttribute.SEGMENT_SIZE; import static io.trino.plugin.base.util.JsonUtils.jsonFactory; import static io.trino.server.protocol.JsonEncodingUtils.createTypeEncoders; @@ -48,7 +49,7 @@ public class JsonQueryDataEncoder public JsonQueryDataEncoder(Session session, List columns) { this.session = requireNonNull(session, "session is null"); - this.typeEncoders = createTypeEncoders(session.toConnectorSession(), requireNonNull(columns, "columns is null")); + this.typeEncoders = createTypeEncoders(session, requireNonNull(columns, "columns is null")); this.sourcePageChannels = requireNonNull(columns, "columns is null").stream() .mapToInt(OutputColumn::sourcePageChannel) .toArray(); @@ -66,7 +67,8 @@ public DataAttributes encodeTo(OutputStream output, List pages) .set(SEGMENT_SIZE, toIntExact(wrapper.getCount())) .build(); } - catch (JsonProcessingException e) { + catch (Exception e) { + throwIfInstanceOf(e, TrinoException.class); throw new IOException("Could not serialize to JSON", e); } } diff --git a/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryDataSerialization.java b/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryDataSerialization.java index cd861adf9c499..a41efd89de0f1 100644 --- a/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryDataSerialization.java +++ b/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryDataSerialization.java @@ -23,9 +23,9 @@ import io.trino.client.JsonCodec; import io.trino.client.QueryData; import io.trino.client.QueryResults; -import io.trino.client.RawQueryData; import io.trino.client.ResultRowsDecoder; import io.trino.client.StatementStats; +import io.trino.client.TypedQueryData; import io.trino.client.spooling.DataAttributes; import io.trino.client.spooling.EncodedQueryData; import io.trino.server.protocol.spooling.QueryDataJacksonModule; @@ -63,15 +63,15 @@ public class TestQueryDataSerialization public void testNullDataSerialization() { assertThat(serialize(null)).doesNotContain("data"); - assertThat(serialize(RawQueryData.of(null))).doesNotContain("data"); + assertThat(serialize(TypedQueryData.of(null))).doesNotContain("data"); } @Test public void testEmptyArraySerialization() { - testRoundTrip(RawQueryData.of(ImmutableList.of()), "[]"); + testRoundTrip(TypedQueryData.of(ImmutableList.of()), "[]"); - assertThatThrownBy(() -> testRoundTrip(RawQueryData.of(ImmutableList.of(ImmutableList.of())), "[[]]")) + assertThatThrownBy(() -> testRoundTrip(TypedQueryData.of(ImmutableList.of(ImmutableList.of())), "[[]]")) .isInstanceOf(RuntimeException.class) .hasMessageContaining("Unexpected token END_ARRAY"); } @@ -80,7 +80,7 @@ public void testEmptyArraySerialization() public void testQueryDataSerialization() { Iterable> values = ImmutableList.of(ImmutableList.of(1L), ImmutableList.of(5L)); - testRoundTrip(RawQueryData.of(values), "[[1],[5]]"); + testRoundTrip(TypedQueryData.of(values), "[[1],[5]]"); } @Test diff --git a/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryResultsSerialization.java b/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryResultsSerialization.java index 92be607b9e5bf..c92c3a7da1103 100644 --- a/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryResultsSerialization.java +++ b/core/trino-main/src/test/java/io/trino/server/protocol/TestQueryResultsSerialization.java @@ -22,9 +22,9 @@ import io.trino.client.JsonCodec; import io.trino.client.QueryData; import io.trino.client.QueryResults; -import io.trino.client.RawQueryData; import io.trino.client.ResultRowsDecoder; import io.trino.client.StatementStats; +import io.trino.client.TypedQueryData; import io.trino.server.protocol.spooling.QueryDataJacksonModule; import org.junit.jupiter.api.Test; @@ -96,9 +96,9 @@ public void testNullDataSerialization() public void testEmptyArraySerialization() throws Exception { - testRoundTrip(RawQueryData.of(ImmutableList.of()), "[]"); + testRoundTrip(TypedQueryData.of(ImmutableList.of()), "[]"); - assertThatThrownBy(() -> testRoundTrip(RawQueryData.of(ImmutableList.of(ImmutableList.of())), "[[]]")) + assertThatThrownBy(() -> testRoundTrip(TypedQueryData.of(ImmutableList.of(ImmutableList.of())), "[[]]")) .isInstanceOf(RuntimeException.class) .hasMessageContaining("Unexpected token END_ARRAY"); } @@ -107,7 +107,7 @@ public void testEmptyArraySerialization() public void testSerialization() throws Exception { - QueryData values = RawQueryData.of(ImmutableList.of(ImmutableList.of(1L), ImmutableList.of(5L))); + QueryData values = TypedQueryData.of(ImmutableList.of(ImmutableList.of(1L), ImmutableList.of(5L))); testRoundTrip(values, "[[1],[5]]"); } diff --git a/core/trino-main/src/test/java/io/trino/server/protocol/TestResultRows.java b/core/trino-main/src/test/java/io/trino/server/protocol/TestResultRows.java deleted file mode 100644 index afad5fa1a88cc..0000000000000 --- a/core/trino-main/src/test/java/io/trino/server/protocol/TestResultRows.java +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.server.protocol; - -import com.google.common.base.VerifyException; -import com.google.common.collect.ImmutableList; -import io.trino.Session; -import io.trino.client.ClientTypeSignature; -import io.trino.client.Column; -import io.trino.spi.Page; -import io.trino.spi.TrinoException; -import io.trino.spi.type.ArrayType; -import io.trino.spi.type.BigintType; -import io.trino.spi.type.BooleanType; -import io.trino.spi.type.IntegerType; -import io.trino.spi.type.RowType; -import io.trino.spi.type.SmallintType; -import io.trino.spi.type.TimestampType; -import io.trino.spi.type.TimestampWithTimeZoneType; -import io.trino.spi.type.Type; -import io.trino.testing.TestingSession; -import io.trino.tests.BogusType; -import org.junit.jupiter.api.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import java.util.function.Consumer; -import java.util.function.Function; - -import static com.google.common.collect.Lists.newArrayList; -import static io.trino.RowPagesBuilder.rowPagesBuilder; -import static io.trino.client.ClientStandardTypes.ARRAY; -import static io.trino.client.ClientStandardTypes.BIGINT; -import static io.trino.client.ClientStandardTypes.BOOLEAN; -import static io.trino.client.ClientStandardTypes.INTEGER; -import static io.trino.client.ClientStandardTypes.MAP; -import static io.trino.client.ClientStandardTypes.ROW; -import static io.trino.client.ClientStandardTypes.TIMESTAMP; -import static io.trino.client.ClientStandardTypes.TIMESTAMP_WITH_TIME_ZONE; -import static io.trino.server.protocol.JsonArrayResultsIterator.toIterableList; -import static io.trino.server.protocol.ProtocolUtil.createColumn; -import static io.trino.server.protocol.QueryResultRows.queryResultRowsBuilder; -import static io.trino.spi.type.TypeSignature.mapType; -import static io.trino.type.InternalTypeManager.TESTING_TYPE_MANAGER; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; -import static org.assertj.core.api.Assertions.assertThat; -import static org.assertj.core.api.Assertions.assertThatThrownBy; - -public class TestResultRows -{ - private static final Function BOOLEAN_COLUMN = name -> new Column(name, BOOLEAN, new ClientTypeSignature(BOOLEAN)); - private static final Function BIGINT_COLUMN = name -> new Column(name, BIGINT, new ClientTypeSignature(BIGINT)); - private static final Function INT_COLUMN = name -> new Column(name, INTEGER, new ClientTypeSignature(INTEGER)); - - @Test - public void shouldNotReturnValues() - { - QueryResultRows rows = QueryResultRows.empty(getSession()); - - assertThat((Iterable>) toIterableList(getSession(), rows, _ -> {})).as("rows").isEmpty(); - assertThat(getAllValues(rows, ignoredException -> {})).hasSize(0); - assertThat(rows.getColumns()).isEmpty(); - assertThat(toIterableList(getSession(), rows, _ -> {}).iterator().hasNext()).isFalse(); - } - - @Test - public void shouldReturnSingleValue() - { - Column column = BOOLEAN_COLUMN.apply("_col0"); - - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withSingleBooleanValue(column, true) - .build(); - - assertThat((Iterable>) toIterableList(getSession(), rows, _ -> {})).as("rows").isNotEmpty(); - assertThat(getAllValues(rows, ignoredException -> {})).hasSize(1).containsOnly(ImmutableList.of(true)); - assertThat(rows.getColumns().orElseThrow()).containsOnly(column); - } - - @Test - public void shouldReturnUpdateCount() - { - Column column = BIGINT_COLUMN.apply("_col0"); - long value = 10123; - - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withColumnsAndTypes(ImmutableList.of(column), ImmutableList.of(BigintType.BIGINT)) - .addPages(rowPagesBuilder(BigintType.BIGINT).row(value).build()) - .build(); - - assertThat((Iterable>) toIterableList(getSession(), rows, _ -> {})).as("rows").isNotEmpty(); - assertThat(rows.getUpdateCount()).isPresent(); - assertThat(rows.getUpdateCount().get()).isEqualTo(value); - - assertThat(getAllValues(rows, ignoredException -> {})).containsExactly(ImmutableList.of(value)); - assertThat(rows.getColumns().orElseThrow()).containsOnly(column); - } - - @Test - public void shouldNotHaveUpdateCount() - { - Column column = BOOLEAN_COLUMN.apply("_col0"); - - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withSingleBooleanValue(column, false) - .build(); - - assertThat((Iterable>) toIterableList(getSession(), rows, _ -> {})).as("rows").isNotEmpty(); - assertThat(rows.getUpdateCount()).isEmpty(); - assertThat(toIterableList(getSession(), rows, _ -> {}).iterator().hasNext()).isTrue(); - } - - @Test - public void shouldReadAllValuesFromMultiplePages() - { - List columns = ImmutableList.of(INT_COLUMN.apply("_col0"), BIGINT_COLUMN.apply("_col1")); - List types = ImmutableList.of(IntegerType.INTEGER, BigintType.BIGINT); - - List pages = rowPagesBuilder(types) - .row(0, 10L) - .row(1, 11L) - .row(2, 12L) - .row(3, 13L) - .row(4, 14L) - .pageBreak() - .row(100, 110L) - .row(101, 111L) - .row(102, 112L) - .row(103, 113L) - .row(104, 114L) - .build(); - - TestExceptionConsumer exceptionConsumer = new TestExceptionConsumer(); - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withColumnsAndTypes(columns, types) - .addPages(pages) - .build(); - - assertThat((Iterable>) toIterableList(getSession(), rows, _ -> {})).as("rows").isNotEmpty(); - assertThat(rows.getTotalRowsCount()).isEqualTo(10); - assertThat(rows.getColumns()).isEqualTo(Optional.of(columns)); - assertThat(rows.getUpdateCount()).isEmpty(); - - assertThat(getAllValues(rows, exceptionConsumer)).containsExactly( - ImmutableList.of(0, 10L), - ImmutableList.of(1, 11L), - ImmutableList.of(2, 12L), - ImmutableList.of(3, 13L), - ImmutableList.of(4, 14L), - ImmutableList.of(100, 110L), - ImmutableList.of(101, 111L), - ImmutableList.of(102, 112L), - ImmutableList.of(103, 113L), - ImmutableList.of(104, 114L)); - - assertThat(exceptionConsumer.getExceptions()).isEmpty(); - } - - @Test - public void shouldOmitBadRows() - { - List columns = ImmutableList.of( - createColumn("_col0", BogusType.BOGUS, true), - createColumn("_col1", BogusType.BOGUS, true)); - List types = ImmutableList.of(BogusType.BOGUS, BogusType.BOGUS); - - List pages = rowPagesBuilder(types) - .row(0, 1) - .row(0, 0) - .row(0, 1) - .row(1, 0) - .row(0, 1) - .build(); - - TestExceptionConsumer exceptionConsumer = new TestExceptionConsumer(); - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withColumnsAndTypes(columns, types) - .addPages(pages) - .build(); - - assertThat(rows.isEmpty()) - .describedAs("rows are empty") - .isFalse(); - assertThat(rows.getTotalRowsCount()).isEqualTo(5); - assertThat(rows.getColumns()).isEqualTo(Optional.of(columns)); - assertThat(rows.getUpdateCount().isEmpty()).isTrue(); - - assertThat(getAllValues(rows, exceptionConsumer)) - .containsExactly(ImmutableList.of(0, 0)); - - List exceptions = exceptionConsumer.getExceptions(); - - assertThat(exceptions) - .isNotEmpty(); - - assertThat(exceptions) - .hasSize(4); - - assertThat(exceptions.get(0)) - .isInstanceOf(TrinoException.class) - .hasMessage("Could not serialize column '_col1' of type 'Bogus' at position 1:2") - .hasRootCauseMessage("This is bogus exception"); - - assertThat(exceptions.get(1)) - .isInstanceOf(TrinoException.class) - .hasMessage("Could not serialize column '_col1' of type 'Bogus' at position 3:2") - .hasRootCauseMessage("This is bogus exception"); - - assertThat(exceptions.get(2)) - .isInstanceOf(TrinoException.class) - .hasMessage("Could not serialize column '_col0' of type 'Bogus' at position 4:1") - .hasRootCauseMessage("This is bogus exception"); - - assertThat(exceptions.get(3)) - .isInstanceOf(TrinoException.class) - .hasMessage("Could not serialize column '_col1' of type 'Bogus' at position 5:2") - .hasRootCauseMessage("This is bogus exception"); - } - - @Test - public void shouldHandleNullValues() - { - List columns = ImmutableList.of(new Column("_col0", INTEGER, new ClientTypeSignature(INTEGER)), new Column("_col1", BOOLEAN, new ClientTypeSignature(BOOLEAN))); - List types = ImmutableList.of(IntegerType.INTEGER, BooleanType.BOOLEAN); - - List pages = rowPagesBuilder(types) - .row(0, null) - .pageBreak() - .row(1, null) - .pageBreak() - .row(2, true) - .build(); - - TestExceptionConsumer exceptionConsumer = new TestExceptionConsumer(); - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withColumnsAndTypes(columns, types) - .addPages(pages) - .build(); - - assertThat(rows.isEmpty()) - .describedAs("rows are empty") - .isFalse(); - assertThat(rows.getTotalRowsCount()).isEqualTo(3); - - assertThat(getAllValues(rows, exceptionConsumer)) - .hasSize(3) - .containsExactly(newArrayList(0, null), newArrayList(1, null), newArrayList(2, true)); - } - - @Test - public void shouldHandleNullTimestamps() - { - List columns = ImmutableList.of( - new Column("_col0", TIMESTAMP, new ClientTypeSignature(TIMESTAMP)), - new Column("_col1", TIMESTAMP_WITH_TIME_ZONE, new ClientTypeSignature(TIMESTAMP_WITH_TIME_ZONE))); - List types = ImmutableList.of(TimestampType.TIMESTAMP_MILLIS, TimestampWithTimeZoneType.TIMESTAMP_TZ_MILLIS); - - List pages = rowPagesBuilder(types) - .row(null, null) - .build(); - - TestExceptionConsumer exceptionConsumer = new TestExceptionConsumer(); - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withColumnsAndTypes(columns, types) - .addPages(pages) - .build(); - - assertThat(exceptionConsumer.getExceptions()).isEmpty(); - assertThat(rows.isEmpty()) - .describedAs("rows are empty") - .isFalse(); - assertThat(rows.getTotalRowsCount()).isEqualTo(1); - - assertThat(getAllValues(rows, exceptionConsumer)) - .hasSize(1) - .containsExactly(newArrayList(null, null)); - } - - @Test - public void shouldHandleNullValuesInArray() - { - List columns = ImmutableList.of(new Column("_col0", ARRAY, new ClientTypeSignature(ARRAY))); - List types = ImmutableList.of(new ArrayType(TimestampWithTimeZoneType.TIMESTAMP_TZ_MILLIS)); - - List pages = rowPagesBuilder(types) - .row(singletonList(null)) - .build(); - - TestExceptionConsumer exceptionConsumer = new TestExceptionConsumer(); - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withColumnsAndTypes(columns, types) - .addPages(pages) - .build(); - - assertThat(exceptionConsumer.getExceptions()).isEmpty(); - assertThat(rows.isEmpty()) - .describedAs("rows are empty") - .isFalse(); - assertThat(rows.getTotalRowsCount()).isEqualTo(1); - - assertThat(getAllValues(rows, exceptionConsumer)) - .hasSize(1) - .containsOnly(singletonList(singletonList(null))); - - assertThat(exceptionConsumer.getExceptions()).isEmpty(); - } - - @Test - public void shouldHandleNullValuesInMap() - { - List columns = ImmutableList.of(new Column("_col0", MAP, new ClientTypeSignature(MAP))); - List types = ImmutableList.of(createMapType(BigintType.BIGINT, BigintType.BIGINT)); - - List pages = rowPagesBuilder(types) - .row(singletonMap(10, null)) - .build(); - - TestExceptionConsumer exceptionConsumer = new TestExceptionConsumer(); - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withColumnsAndTypes(columns, types) - .addPages(pages) - .build(); - - assertThat(exceptionConsumer.getExceptions()).isEmpty(); - assertThat(rows.isEmpty()) - .describedAs("rows are empty") - .isFalse(); - assertThat(rows.getTotalRowsCount()).isEqualTo(1); - - assertThat(getAllValues(rows, exceptionConsumer)) - .hasSize(1) - .containsOnly(singletonList(singletonMap(10L, null))); - - assertThat(exceptionConsumer.getExceptions()).isEmpty(); - } - - @Test - public void shouldHandleNullValuesInRow() - { - List columns = ImmutableList.of(new Column("_col0", ROW, new ClientTypeSignature(ROW))); - List types = ImmutableList.of(RowType.from(ImmutableList.of(RowType.field("first", SmallintType.SMALLINT), RowType.field("second", SmallintType.SMALLINT)))); - - List values = new ArrayList<>(); - values.add(null); - values.add((short) 1); - - List pages = rowPagesBuilder(types) - .row(values) - .build(); - - TestExceptionConsumer exceptionConsumer = new TestExceptionConsumer(); - QueryResultRows rows = queryResultRowsBuilder(getSession()) - .withColumnsAndTypes(columns, types) - .addPages(pages) - .build(); - - assertThat(exceptionConsumer.getExceptions()).isEmpty(); - assertThat(rows.isEmpty()) - .describedAs("rows are empty") - .isFalse(); - assertThat(rows.getTotalRowsCount()).isEqualTo(1); - - List> allValues = getAllValues(rows, exceptionConsumer); - - assertThat(allValues) - .hasSize(1) - .containsOnly(singletonList(newArrayList(null, (short) 1))); - - assertThat(exceptionConsumer.getExceptions()).isEmpty(); - } - - @Test - public void shouldNotThrowWhenDataAndColumnsAreMissing() - { - QueryResultRows.empty(getSession()); - } - - @Test - public void shouldThrowWhenColumnsAndTypesSizeMismatch() - { - List columns = ImmutableList.of(INT_COLUMN.apply("_col0")); - List types = ImmutableList.of(IntegerType.INTEGER, BooleanType.BOOLEAN); - - List pages = rowPagesBuilder(types) - .row(0, null) - .build(); - - assertThatThrownBy(() -> queryResultRowsBuilder(getSession()).addPages(pages).withColumnsAndTypes(columns, types).build()) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("columns and types size mismatch"); - } - - @Test - public void shouldThrowWhenColumnsAreNull() - { - List types = ImmutableList.of(IntegerType.INTEGER, BooleanType.BOOLEAN); - - List pages = rowPagesBuilder(types) - .row(0, null) - .build(); - - assertThatThrownBy(() -> queryResultRowsBuilder(getSession()).addPages(pages).withColumnsAndTypes(null, types).build()) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("columns and types must be present at the same time"); - } - - @Test - public void shouldAcceptNullColumnsAndTypes() - { - queryResultRowsBuilder(getSession()) - .withColumnsAndTypes(null, null) - .build(); - } - - @Test - public void shouldThrowWhenTypesAreNull() - { - List columns = ImmutableList.of(INT_COLUMN.apply("_col0")); - List types = ImmutableList.of(IntegerType.INTEGER, BooleanType.BOOLEAN); - - List pages = rowPagesBuilder(types) - .row(0, null) - .build(); - - assertThatThrownBy(() -> queryResultRowsBuilder(getSession()).addPages(pages).withColumnsAndTypes(columns, null).build()) - .isInstanceOf(IllegalArgumentException.class) - .hasMessage("columns and types must be present at the same time"); - } - - @Test - public void shouldThrowWhenDataIsPresentWithoutColumns() - { - List pages = rowPagesBuilder(ImmutableList.of(IntegerType.INTEGER, BooleanType.BOOLEAN)) - .row(0, null) - .build(); - - assertThatThrownBy(() -> queryResultRowsBuilder(getSession()).addPages(pages).build()) - .isInstanceOf(VerifyException.class) - .hasMessage("data present without columns and types"); - } - - private static List> getAllValues(QueryResultRows rows, Consumer throwableConsumer) - { - ImmutableList.Builder> builder = ImmutableList.builder(); - for (List values : toIterableList(getSession(), rows, throwableConsumer)) { - builder.add(values); - } - - return builder.build(); - } - - private static Session getSession() - { - return TestingSession.testSessionBuilder() - .build(); - } - - private static final class TestExceptionConsumer - implements Consumer - { - private final List exceptions = new ArrayList<>(); - - @Override - public void accept(TrinoException exception) - { - exceptions.add(exception); - } - - public List getExceptions() - { - return exceptions; - } - } - - private static Type createMapType(Type keyType, Type valueType) - { - return TESTING_TYPE_MANAGER.getType(mapType(keyType.getTypeSignature(), valueType.getTypeSignature())); - } -} From 67be3f26b7b3586e17289b99522b2e97f248bf48 Mon Sep 17 00:00:00 2001 From: "Mateusz \"Serafin\" Gajewski" Date: Wed, 30 Oct 2024 17:51:47 +0100 Subject: [PATCH 27/31] Invert the condition and flag --- .../server/protocol/JsonEncodingUtils.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java b/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java index fbf3833394cd3..10e4b5634ae6e 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/JsonEncodingUtils.java @@ -83,16 +83,16 @@ public static TypeEncoder[] createTypeEncoders(Session session, List createTypeEncoder(column.type(), useLegacyValue)) + .map(column -> createTypeEncoder(column.type(), supportsParametricDateTime)) .toArray(TypeEncoder[]::new); } - public static TypeEncoder createTypeEncoder(Type type, boolean useLegacyValue) + public static TypeEncoder createTypeEncoder(Type type, boolean supportsParametricDateTime) { return switch (type) { case BigintType _ -> BIGINT_ENCODER; @@ -106,13 +106,13 @@ public static TypeEncoder createTypeEncoder(Type type, boolean useLegacyValue) case VarbinaryType _ -> VARBINARY_ENCODER; case CharType charType -> new CharEncoder(charType.getLength()); // TODO: add specialized Short/Long decimal encoders - case ArrayType arrayType -> new ArrayEncoder(arrayType, createTypeEncoder(arrayType.getElementType(), useLegacyValue)); - case MapType mapType -> new MapEncoder(mapType, createTypeEncoder(mapType.getValueType(), useLegacyValue)); + case ArrayType arrayType -> new ArrayEncoder(arrayType, createTypeEncoder(arrayType.getElementType(), supportsParametricDateTime)); + case MapType mapType -> new MapEncoder(mapType, createTypeEncoder(mapType.getValueType(), supportsParametricDateTime)); case RowType rowType -> new RowEncoder(rowType, rowType.getTypeParameters() .stream() - .map(elementType -> createTypeEncoder(elementType, useLegacyValue)) + .map(elementType -> createTypeEncoder(elementType, supportsParametricDateTime)) .toArray(TypeEncoder[]::new)); - case Type _ -> new TypeObjectValueEncoder(type, useLegacyValue); + case Type _ -> new TypeObjectValueEncoder(type, supportsParametricDateTime); }; } @@ -410,12 +410,12 @@ private static class TypeObjectValueEncoder implements TypeEncoder { private final Type type; - private final boolean useLegacyValue; + private final boolean supportsParametricDateTime; - public TypeObjectValueEncoder(Type type, boolean useLegacyValue) + public TypeObjectValueEncoder(Type type, boolean supportsParametricDateTime) { this.type = requireNonNull(type, "type is null"); - this.useLegacyValue = useLegacyValue; + this.supportsParametricDateTime = supportsParametricDateTime; } @Override @@ -446,7 +446,7 @@ public void encode(JsonGenerator generator, ConnectorSession session, Block bloc private Object roundParametricTypes(Object value) { - if (!useLegacyValue) { + if (supportsParametricDateTime) { return value; } From b177f3ae6d41486170a10d993aabb12d5553d1a0 Mon Sep 17 00:00:00 2001 From: "Mateusz \"Serafin\" Gajewski" Date: Wed, 30 Oct 2024 18:05:53 +0100 Subject: [PATCH 28/31] Check whether spooling is enabled while getting encoding --- .../protocol/spooling/QueryDataEncoders.java | 17 +++++++++++++++-- .../main/java/io/trino/testing/PlanTester.java | 2 +- .../java/io/trino/execution/TaskTestUtils.java | 3 ++- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/QueryDataEncoders.java b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/QueryDataEncoders.java index 82860b6707779..794fe2ab0ad2f 100644 --- a/core/trino-main/src/main/java/io/trino/server/protocol/spooling/QueryDataEncoders.java +++ b/core/trino-main/src/main/java/io/trino/server/protocol/spooling/QueryDataEncoders.java @@ -27,25 +27,35 @@ public class QueryDataEncoders private static final Logger LOG = Logger.get(QueryDataEncoders.class); private final Map factories; + private final boolean enabled; @Inject - public QueryDataEncoders(Set factories) + public QueryDataEncoders(SpoolingEnabledConfig enabledConfig, Set factories) { + this.enabled = enabledConfig.isEnabled(); this.factories = requireNonNull(factories, "factories is null") .stream() .map(factory -> Map.entry(factory.encoding(), factory)) .collect(toImmutableMap(Map.Entry::getKey, Map.Entry::getValue)); - LOG.info("Spooled client protocol is enabled with encodings: " + getAvailableEncodings()); + if (enabled) { + LOG.info("Spooled client protocol is enabled with encodings: " + getAvailableEncodings()); + } } public boolean exists(String encoding) { + if (!enabled) { + throw new IllegalStateException("Spooled client protocol is not enabled"); + } return factories.containsKey(encoding); } public QueryDataEncoder.Factory get(String encoding) { + if (!enabled) { + throw new IllegalStateException("Spooled client protocol is not enabled"); + } if (!exists(encoding)) { throw new IllegalArgumentException("Unknown spooled protocol encoding: " + encoding); } @@ -55,6 +65,9 @@ public QueryDataEncoder.Factory get(String encoding) public Set getAvailableEncodings() { + if (!enabled) { + throw new IllegalStateException("Spooled client protocol is not enabled"); + } return factories.keySet(); } } diff --git a/core/trino-main/src/main/java/io/trino/testing/PlanTester.java b/core/trino-main/src/main/java/io/trino/testing/PlanTester.java index dda72c2dd8819..2f210edb303ab 100644 --- a/core/trino-main/src/main/java/io/trino/testing/PlanTester.java +++ b/core/trino-main/src/main/java/io/trino/testing/PlanTester.java @@ -745,7 +745,7 @@ private List createDrivers(Session session, @Language("SQL") String sql) new IndexJoinLookupStats(), this.taskManagerConfig, new GenericSpillerFactory(unsupportedSingleStreamSpillerFactory()), - new QueryDataEncoders(Set.of()), + new QueryDataEncoders(new SpoolingEnabledConfig(), Set.of()), Optional.empty(), Optional.empty(), unsupportedSingleStreamSpillerFactory(), diff --git a/core/trino-main/src/test/java/io/trino/execution/TaskTestUtils.java b/core/trino-main/src/test/java/io/trino/execution/TaskTestUtils.java index 3883934f4adf8..885cd8cd5a99d 100644 --- a/core/trino-main/src/test/java/io/trino/execution/TaskTestUtils.java +++ b/core/trino-main/src/test/java/io/trino/execution/TaskTestUtils.java @@ -40,6 +40,7 @@ import io.trino.operator.index.IndexJoinLookupStats; import io.trino.operator.index.IndexManager; import io.trino.server.protocol.spooling.QueryDataEncoders; +import io.trino.server.protocol.spooling.SpoolingEnabledConfig; import io.trino.spi.connector.CatalogHandle; import io.trino.spiller.GenericSpillerFactory; import io.trino.split.PageSinkManager; @@ -172,7 +173,7 @@ public static LocalExecutionPlanner createTestingPlanner() new GenericSpillerFactory((types, spillContext, memoryContext) -> { throw new UnsupportedOperationException(); }), - new QueryDataEncoders(Set.of()), + new QueryDataEncoders(new SpoolingEnabledConfig(), Set.of()), Optional.empty(), Optional.empty(), (types, spillContext, memoryContext) -> { From 17e0f4edcaa7ec8bf08fabeb32da0a1ec87da071 Mon Sep 17 00:00:00 2001 From: Yuya Ebihara Date: Wed, 30 Oct 2024 13:58:49 +0900 Subject: [PATCH 29/31] Use switch expression in Vertica --- .../BaseVerticaConnectorSmokeTest.java | 12 ++--- .../vertica/TestVerticaConnectorTest.java | 46 ++++++++----------- 2 files changed, 23 insertions(+), 35 deletions(-) diff --git a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/BaseVerticaConnectorSmokeTest.java b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/BaseVerticaConnectorSmokeTest.java index fc23c96d93e8c..25df780431bdf 100644 --- a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/BaseVerticaConnectorSmokeTest.java +++ b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/BaseVerticaConnectorSmokeTest.java @@ -22,12 +22,10 @@ public abstract class BaseVerticaConnectorSmokeTest @Override protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) { - switch (connectorBehavior) { - case SUPPORTS_RENAME_SCHEMA: - case SUPPORTS_RENAME_TABLE_ACROSS_SCHEMAS: - return false; - default: - return super.hasBehavior(connectorBehavior); - } + return switch (connectorBehavior) { + case SUPPORTS_RENAME_SCHEMA, + SUPPORTS_RENAME_TABLE_ACROSS_SCHEMAS -> false; + default -> super.hasBehavior(connectorBehavior); + }; } } diff --git a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaConnectorTest.java b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaConnectorTest.java index 1bbae148ff585..19b1ac487abb4 100644 --- a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaConnectorTest.java +++ b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaConnectorTest.java @@ -65,34 +65,24 @@ protected QueryRunner createQueryRunner() @Override protected boolean hasBehavior(TestingConnectorBehavior connectorBehavior) { - switch (connectorBehavior) { - case SUPPORTS_JOIN_PUSHDOWN: - return true; - case SUPPORTS_JOIN_PUSHDOWN_WITH_DISTINCT_FROM: - return false; - case SUPPORTS_TOPN_PUSHDOWN: - return false; - case SUPPORTS_RENAME_TABLE_ACROSS_SCHEMAS: - return false; - case SUPPORTS_DROP_COLUMN: - case SUPPORTS_SET_COLUMN_TYPE: - return false; - case SUPPORTS_RENAME_SCHEMA: - return false; - case SUPPORTS_COMMENT_ON_TABLE: - case SUPPORTS_COMMENT_ON_COLUMN: - case SUPPORTS_ADD_COLUMN_WITH_COMMENT: - case SUPPORTS_CREATE_TABLE_WITH_TABLE_COMMENT: - case SUPPORTS_CREATE_TABLE_WITH_COLUMN_COMMENT: - return false; - case SUPPORTS_ARRAY: - case SUPPORTS_ROW_TYPE: - return false; - case SUPPORTS_AGGREGATION_PUSHDOWN: - return false; - default: - return super.hasBehavior(connectorBehavior); - } + return switch (connectorBehavior) { + case SUPPORTS_JOIN_PUSHDOWN -> true; + case SUPPORTS_ARRAY, + SUPPORTS_ADD_COLUMN_WITH_COMMENT, + SUPPORTS_AGGREGATION_PUSHDOWN, + SUPPORTS_COMMENT_ON_COLUMN, + SUPPORTS_COMMENT_ON_TABLE, + SUPPORTS_CREATE_TABLE_WITH_COLUMN_COMMENT, + SUPPORTS_CREATE_TABLE_WITH_TABLE_COMMENT, + SUPPORTS_DROP_COLUMN, + SUPPORTS_JOIN_PUSHDOWN_WITH_DISTINCT_FROM, + SUPPORTS_RENAME_SCHEMA, + SUPPORTS_RENAME_TABLE_ACROSS_SCHEMAS, + SUPPORTS_ROW_TYPE, + SUPPORTS_SET_COLUMN_TYPE, + SUPPORTS_TOPN_PUSHDOWN -> false; + default -> super.hasBehavior(connectorBehavior); + }; } // Overridden due to test case with a push down on a DOUBLE type From 58caa0beec3908cd4dab1349490915602f64ba29 Mon Sep 17 00:00:00 2001 From: Yuya Ebihara Date: Wed, 30 Oct 2024 13:59:05 +0900 Subject: [PATCH 30/31] Remove redundant var from TestVerticaPlugin --- .../test/java/io/trino/plugin/vertica/TestVerticaPlugin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaPlugin.java b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaPlugin.java index 1718dc036a028..d71c164a0de9c 100644 --- a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaPlugin.java +++ b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaPlugin.java @@ -28,6 +28,6 @@ public void testCreateConnector() { Plugin plugin = new VerticaPlugin(); ConnectorFactory factory = getOnlyElement(plugin.getConnectorFactories()); - var _ = factory.create("test", ImmutableMap.of("connection-url", "jdbc:vertica://test"), new TestingConnectorContext()); + factory.create("test", ImmutableMap.of("connection-url", "jdbc:vertica://test"), new TestingConnectorContext()).shutdown(); } } From 8818f050ecbb12be97ab14c5d7e6a94dfe4b5f60 Mon Sep 17 00:00:00 2001 From: Yuya Ebihara Date: Wed, 30 Oct 2024 14:18:26 +0900 Subject: [PATCH 31/31] Extend DistributedQueryRunner in Vertica builder --- plugin/trino-vertica/pom.xml | 6 + .../vertica/TestVerticaTableStatistics.java | 2 +- .../plugin/vertica/VerticaQueryRunner.java | 163 ++++++++---------- 3 files changed, 75 insertions(+), 96 deletions(-) diff --git a/plugin/trino-vertica/pom.xml b/plugin/trino-vertica/pom.xml index 013c8fb95d43f..c3b77be73d44f 100644 --- a/plugin/trino-vertica/pom.xml +++ b/plugin/trino-vertica/pom.xml @@ -112,6 +112,12 @@ provided + + com.google.errorprone + error_prone_annotations + runtime + + io.airlift log-manager diff --git a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaTableStatistics.java b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaTableStatistics.java index 04c94a04ebd38..3df522ca38613 100644 --- a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaTableStatistics.java +++ b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/TestVerticaTableStatistics.java @@ -56,7 +56,7 @@ protected QueryRunner createQueryRunner() // Use the latest image to avoid "Must be superuser to run export_statistics" verticaServer = closeAfterClass(new TestingVerticaServer(LATEST_IMAGE)); return VerticaQueryRunner.builder(verticaServer) - .addConnectorProperties(ImmutableMap.of("statistics.enabled", "true")) + .addConnectorProperty("statistics.enabled", "true") .setTables(ImmutableList.of(TpchTable.ORDERS, TpchTable.REGION, TpchTable.NATION)) .build(); } diff --git a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/VerticaQueryRunner.java b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/VerticaQueryRunner.java index 42509c819f745..8adb125b773b9 100644 --- a/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/VerticaQueryRunner.java +++ b/plugin/trino-vertica/src/test/java/io/trino/plugin/vertica/VerticaQueryRunner.java @@ -14,7 +14,7 @@ package io.trino.plugin.vertica; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; +import com.google.errorprone.annotations.CanIgnoreReturnValue; import io.airlift.log.Logger; import io.airlift.log.Logging; import io.trino.Session; @@ -22,9 +22,10 @@ import io.trino.plugin.tpch.TpchPlugin; import io.trino.spi.security.Identity; import io.trino.testing.DistributedQueryRunner; -import io.trino.testing.QueryRunner; import io.trino.tpch.TpchTable; +import java.util.HashMap; +import java.util.List; import java.util.Map; import static io.airlift.testing.Closeables.closeAllSuppress; @@ -41,124 +42,96 @@ private VerticaQueryRunner() {} public static final String NON_GRANTED_USER = "bob"; public static final String TPCH_SCHEMA = "tpch"; - private static DistributedQueryRunner createVerticaQueryRunner( - TestingVerticaServer server, - Map extraProperties, - Map connectorProperties, - Iterable> tables) - throws Exception - { - DistributedQueryRunner queryRunner = null; - try { - DistributedQueryRunner.Builder builder = DistributedQueryRunner.builder(createSession(GRANTED_USER, "vertica")); - extraProperties.forEach(builder::addExtraProperty); - queryRunner = builder.build(); - - queryRunner.installPlugin(new JmxPlugin()); - queryRunner.createCatalog("jmx", "jmx"); - - queryRunner.installPlugin(new TpchPlugin()); - queryRunner.createCatalog(TPCH_SCHEMA, TPCH_SCHEMA); - - // Create two users, one of which will have access to the TPCH database/schema - executeAsAdmin(server, "CREATE SCHEMA IF NOT EXISTS tpch"); - executeAsAdmin(server, "CREATE ROLE " + GRANTED_USER); - executeAsAdmin(server, "CREATE ROLE " + NON_GRANTED_USER); - executeAsAdmin(server, "GRANT ALL PRIVILEGES ON DATABASE tpch TO " + GRANTED_USER); - executeAsAdmin(server, "GRANT ALL PRIVILEGES ON SCHEMA tpch TO " + GRANTED_USER); - - // Allow the user to set the roles - executeAsAdmin(server, "GRANT " + GRANTED_USER + " TO " + server.getUsername()); - executeAsAdmin(server, "GRANT " + NON_GRANTED_USER + " TO " + server.getUsername()); - - queryRunner.installPlugin(new VerticaPlugin()); - queryRunner.createCatalog("vertica", "vertica", connectorProperties); - - copyTpchTables(queryRunner, TPCH_SCHEMA, TINY_SCHEMA_NAME, createSession(GRANTED_USER, "vertica"), tables); - - // Revoke all access to the database for the server's user if impersonation is enabled - // This will allow the impersonation to work as intended for testing as Vertica roles add to the user's existing permissions - // Running queries with the NON_GRANTED_USER user/role will succeed because the user in the JDBC connection has access to the tables - if (Boolean.parseBoolean(connectorProperties.getOrDefault("vertica.impersonation.enabled", "false"))) { - executeAsAdmin(server, "REVOKE ALL ON SCHEMA tpch FROM " + server.getUsername()); - executeAsAdmin(server, "REVOKE ALL ON DATABASE tpch FROM " + server.getUsername()); - } - - return queryRunner; - } - catch (Throwable e) { - closeAllSuppress(e, queryRunner); - throw e; - } - } - - public static Session createSession(String user, String catalogName) - { - return testSessionBuilder() - .setCatalog(catalogName) - .setSchema(TPCH_SCHEMA) - .setIdentity(Identity.ofUser(user)) - .build(); - } - public static Builder builder(TestingVerticaServer server) { - return new Builder(server); + return new Builder(server) + .addConnectorProperty("connection-url", requireNonNull(server.getJdbcUrl(), "jdbcUrl is null")) + .addConnectorProperty("connection-user", requireNonNull(server.getUsername(), "user is null")) + .addConnectorProperty("connection-password", requireNonNull(server.getPassword(), "password is null")); } - public static class Builder + public static final class Builder + extends DistributedQueryRunner.Builder { private final TestingVerticaServer server; - private Iterable> tables = ImmutableList.of(); - private Map connectorProperties; - private Map extraProperties; + private List> tables = ImmutableList.of(); + private final Map connectorProperties = new HashMap<>(); - public Builder(TestingVerticaServer server) + private Builder(TestingVerticaServer server) { + super(testSessionBuilder() + .setCatalog("vertica") + .setSchema(TPCH_SCHEMA) + .build()); this.server = requireNonNull(server, "server is null"); - connectorProperties = ImmutableMap.builder() - .put("connection-url", requireNonNull(server.getJdbcUrl(), "jdbcUrl is null")) - .put("connection-user", requireNonNull(server.getUsername(), "user is null")) - .put("connection-password", requireNonNull(server.getPassword(), "password is null")) - .buildOrThrow(); - extraProperties = ImmutableMap.of(); } - public Builder addConnectorProperties(Map properties) + @CanIgnoreReturnValue + public Builder addConnectorProperty(String key, String value) { - connectorProperties = updateProperties(connectorProperties, properties); - return this; - } - - public Builder addExtraProperties(Map properties) - { - extraProperties = updateProperties(extraProperties, properties); + connectorProperties.put(key, value); return this; } + @CanIgnoreReturnValue public Builder setTables(Iterable> tables) { this.tables = ImmutableList.copyOf(requireNonNull(tables, "tables is null")); return this; } - public QueryRunner build() + @Override + public DistributedQueryRunner build() throws Exception { - return createVerticaQueryRunner( - server, - extraProperties, - connectorProperties, - tables); + DistributedQueryRunner queryRunner = super.build(); + try { + queryRunner.installPlugin(new JmxPlugin()); + queryRunner.createCatalog("jmx", "jmx"); + + queryRunner.installPlugin(new TpchPlugin()); + queryRunner.createCatalog(TPCH_SCHEMA, TPCH_SCHEMA); + + // Create two users, one of which will have access to the TPCH database/schema + executeAsAdmin(server, "CREATE SCHEMA IF NOT EXISTS tpch"); + executeAsAdmin(server, "CREATE ROLE " + GRANTED_USER); + executeAsAdmin(server, "CREATE ROLE " + NON_GRANTED_USER); + executeAsAdmin(server, "GRANT ALL PRIVILEGES ON DATABASE tpch TO " + GRANTED_USER); + executeAsAdmin(server, "GRANT ALL PRIVILEGES ON SCHEMA tpch TO " + GRANTED_USER); + + // Allow the user to set the roles + executeAsAdmin(server, "GRANT " + GRANTED_USER + " TO " + server.getUsername()); + executeAsAdmin(server, "GRANT " + NON_GRANTED_USER + " TO " + server.getUsername()); + + queryRunner.installPlugin(new VerticaPlugin()); + queryRunner.createCatalog("vertica", "vertica", connectorProperties); + + copyTpchTables(queryRunner, TPCH_SCHEMA, TINY_SCHEMA_NAME, createSession(GRANTED_USER, "vertica"), tables); + + // Revoke all access to the database for the server's user if impersonation is enabled + // This will allow the impersonation to work as intended for testing as Vertica roles add to the user's existing permissions + // Running queries with the NON_GRANTED_USER user/role will succeed because the user in the JDBC connection has access to the tables + if (Boolean.parseBoolean(connectorProperties.getOrDefault("vertica.impersonation.enabled", "false"))) { + executeAsAdmin(server, "REVOKE ALL ON SCHEMA tpch FROM " + server.getUsername()); + executeAsAdmin(server, "REVOKE ALL ON DATABASE tpch FROM " + server.getUsername()); + } + + return queryRunner; + } + catch (Throwable e) { + closeAllSuppress(e, queryRunner); + throw e; + } } } - private static Map updateProperties(Map properties, Map update) + public static Session createSession(String user, String catalogName) { - return ImmutableMap.builder() - .putAll(requireNonNull(properties, "properties is null")) - .putAll(requireNonNull(update, "update is null")) - .buildOrThrow(); + return testSessionBuilder() + .setCatalog(catalogName) + .setSchema(TPCH_SCHEMA) + .setIdentity(Identity.ofUser(user)) + .build(); } private static void executeAsAdmin(TestingVerticaServer server, String sql) @@ -171,8 +144,8 @@ public static void main(String[] args) { Logging.initialize(); - DistributedQueryRunner queryRunner = (DistributedQueryRunner) builder(new TestingVerticaServer()) - .addExtraProperties(ImmutableMap.of("http-server.http.port", "8080")) + DistributedQueryRunner queryRunner = builder(new TestingVerticaServer()) + .addCoordinatorProperty("http-server.http.port", "8080") .setTables(TpchTable.getTables()) .build();